* [dpdk-dev] [PATCH 0/6] convert mlx PMDs to new ethdev offloads API
@ 2017-11-23 12:02 Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 1/6] net/mlx5: store PMD args in private structure Shahaf Shuler
` (6 more replies)
0 siblings, 7 replies; 29+ messages in thread
From: Shahaf Shuler @ 2017-11-23 12:02 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
This series is to convert mlx4 and mlx5 PMDs to the new offloads API [1].
[1] http://dpdk.org/ml/archives/dev/2017-October/077329.html
Shahaf Shuler (6):
net/mlx5: store PMD args in private structure
net/mlx5: convert to new Tx offloads API
net/mlx5: convert to new Rx offloads API
net/mlx5: fix VLAN configuration after port stop
net/mlx4: convert to new Tx offloads API
net/mlx4: convert to new Rx offloads API
doc/guides/nics/mlx5.rst | 12 +--
drivers/net/mlx4/mlx4.c | 20 ++++
drivers/net/mlx4/mlx4_ethdev.c | 17 +---
drivers/net/mlx4/mlx4_flow.c | 5 +-
drivers/net/mlx4/mlx4_rxq.c | 78 ++++++++++++++-
drivers/net/mlx4/mlx4_rxtx.h | 3 +
drivers/net/mlx4/mlx4_txq.c | 66 ++++++++++++-
drivers/net/mlx5/mlx5.c | 176 ++++++++++++++++++----------------
drivers/net/mlx5/mlx5.h | 24 +++--
drivers/net/mlx5/mlx5_ethdev.c | 55 ++++++-----
drivers/net/mlx5/mlx5_rxq.c | 108 +++++++++++++++++----
drivers/net/mlx5/mlx5_rxtx.h | 6 +-
drivers/net/mlx5/mlx5_rxtx_vec.c | 30 ++++--
drivers/net/mlx5/mlx5_txq.c | 95 +++++++++++++++---
drivers/net/mlx5/mlx5_vlan.c | 8 +-
15 files changed, 517 insertions(+), 186 deletions(-)
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH 1/6] net/mlx5: store PMD args in private structure
2017-11-23 12:02 [dpdk-dev] [PATCH 0/6] convert mlx PMDs to new ethdev offloads API Shahaf Shuler
@ 2017-11-23 12:02 ` Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 2/6] net/mlx5: convert to new Tx offloads API Shahaf Shuler
` (5 subsequent siblings)
6 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2017-11-23 12:02 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
The PMD has several specific parameters set by the application.
The current implementation parse those args as part of the probe stage,
however the args value as set by the application are not stored in any
place.
This patch stores the parameters set by the application in the PMD
private structure. This in order to provide an infrastructure for
dynamic Tx and Rx burst callback changes based on application offloads
selection.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
drivers/net/mlx5/mlx5.c | 151 +++++++++++++++++++---------------
drivers/net/mlx5/mlx5.h | 24 ++++--
drivers/net/mlx5/mlx5_ethdev.c | 10 +--
drivers/net/mlx5/mlx5_rxq.c | 6 +-
drivers/net/mlx5/mlx5_rxtx_vec.c | 10 +--
drivers/net/mlx5/mlx5_txq.c | 27 +++---
6 files changed, 126 insertions(+), 102 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 0548d17ad..be21c72e8 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -106,17 +106,6 @@
#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
#endif
-struct mlx5_args {
- int cqe_comp;
- int txq_inline;
- int txqs_inline;
- int mps;
- int mpw_hdr_dseg;
- int inline_max_packet_sz;
- int tso;
- int tx_vec_en;
- int rx_vec_en;
-};
/**
* Retrieve integer value from environment variable.
*
@@ -489,35 +478,91 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
static struct rte_pci_driver mlx5_driver;
/**
- * Assign parameters from args into priv, only non default
- * values are considered.
+ * Update parameters from application configuration.
*
- * @param[out] priv
+ * @param[in/out] priv
* Pointer to private structure.
- * @param[in] args
- * Pointer to args values.
*/
static void
-mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
+mlx5_args_update(struct priv *priv)
{
- if (args->cqe_comp != MLX5_ARG_UNSET)
- priv->cqe_comp = args->cqe_comp;
- if (args->txq_inline != MLX5_ARG_UNSET)
- priv->txq_inline = args->txq_inline;
- if (args->txqs_inline != MLX5_ARG_UNSET)
- priv->txqs_inline = args->txqs_inline;
- if (args->mps != MLX5_ARG_UNSET)
- priv->mps = args->mps ? priv->mps : 0;
- if (args->mpw_hdr_dseg != MLX5_ARG_UNSET)
- priv->mpw_hdr_dseg = args->mpw_hdr_dseg;
- if (args->inline_max_packet_sz != MLX5_ARG_UNSET)
- priv->inline_max_packet_sz = args->inline_max_packet_sz;
- if (args->tso != MLX5_ARG_UNSET)
- priv->tso = args->tso;
- if (args->tx_vec_en != MLX5_ARG_UNSET)
- priv->tx_vec_en = args->tx_vec_en;
- if (args->rx_vec_en != MLX5_ARG_UNSET)
- priv->rx_vec_en = args->rx_vec_en;
+ struct mlx5_args *args_def = &priv->args_default;
+ struct mlx5_args *args = &priv->args;
+
+ if (args_def->cqe_comp != MLX5_ARG_UNSET) {
+ if (!priv->cqe_comp && args_def->cqe_comp) {
+ WARN("Rx CQE compression is not supported");
+ args_def->cqe_comp = 0;
+ }
+ args->cqe_comp = args_def->cqe_comp;
+ } else {
+ args->cqe_comp = priv->cqe_comp;
+ }
+ if (args_def->tso != MLX5_ARG_UNSET) {
+ if (!priv->tso && args_def->tso) {
+ WARN("TSO is not supported");
+ args_def->tso = 0;
+ }
+ args->tso = args_def->tso;
+ } else {
+ args->tso = 0;
+ }
+ if (args_def->mps != MLX5_ARG_UNSET) {
+ if (!priv->mps && args_def->mps) {
+ WARN("multi-packet send not supported");
+ args_def->mps = MLX5_MPW_DISABLED;
+ }
+ if (args->tso && args_def->mps) {
+ WARN("multi-packet send not supported in conjunction "
+ "with TSO. MPS disabled");
+ args->mps = MLX5_MPW_DISABLED;
+ } else {
+ args->mps = args_def->mps ? priv->mps :
+ MLX5_MPW_DISABLED;
+ }
+ } else {
+ if (args->tso)
+ args->mps = MLX5_MPW_DISABLED;
+ else
+ args->mps = priv->mps;
+ }
+ if (args_def->txq_inline != MLX5_ARG_UNSET) {
+ args->txq_inline = args_def->txq_inline;
+ } else {
+ if (args->mps == MLX5_MPW_ENHANCED)
+ args->txq_inline = MLX5_WQE_SIZE_MAX -
+ MLX5_WQE_SIZE;
+ else
+ args->txq_inline = 0;
+ }
+ if (args_def->txqs_inline != MLX5_ARG_UNSET) {
+ args->txqs_inline = args_def->txqs_inline;
+ } else {
+ if (args->mps == MLX5_MPW_ENHANCED)
+ args->txqs_inline = MLX5_EMPW_MIN_TXQS;
+ else
+ args->txqs_inline = 0;
+ }
+ if (args_def->mpw_hdr_dseg != MLX5_ARG_UNSET)
+ args->mpw_hdr_dseg = args_def->mpw_hdr_dseg;
+ else
+ args->mpw_hdr_dseg = 0;
+ if (args_def->inline_max_packet_sz != MLX5_ARG_UNSET) {
+ args->inline_max_packet_sz = args_def->inline_max_packet_sz;
+ } else {
+ if (args->mps == MLX5_MPW_ENHANCED)
+ args->inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
+ else
+ args->inline_max_packet_sz = 0;
+ }
+ if (args_def->tx_vec_en != MLX5_ARG_UNSET)
+ args->tx_vec_en = args_def->tx_vec_en;
+ else
+ args->tx_vec_en = 1;
+ if (args_def->rx_vec_en != MLX5_ARG_UNSET)
+ args->rx_vec_en = args_def->rx_vec_en;
+ else
+ args->rx_vec_en = 1;
}
/**
@@ -774,19 +819,16 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->port = port;
priv->pd = pd;
priv->mtu = ETHER_MTU;
- priv->mps = mps; /* Enable MPW by default if supported. */
+ priv->mps = mps;
priv->cqe_comp = cqe_comp;
priv->tunnel_en = tunnel_en;
- /* Enable vector by default if supported. */
- priv->tx_vec_en = 1;
- priv->rx_vec_en = 1;
err = mlx5_args(&args, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
strerror(err));
goto port_error;
}
- mlx5_args_assign(priv, &args);
+ priv->args_default = args;
if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {
ERROR("ibv_query_device_ex() failed");
goto port_error;
@@ -847,34 +889,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (priv->tso)
priv->max_tso_payload_sz =
device_attr_ex.tso_caps.max_tso;
- if (priv->mps && !mps) {
- ERROR("multi-packet send not supported on this device"
- " (" MLX5_TXQ_MPW_EN ")");
- err = ENOTSUP;
- goto port_error;
- } else if (priv->mps && priv->tso) {
- WARN("multi-packet send not supported in conjunction "
- "with TSO. MPS disabled");
- priv->mps = 0;
- }
- INFO("%sMPS is %s",
- priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
- priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
- /* Set default values for Enhanced MPW, a.k.a MPWv2. */
- if (priv->mps == MLX5_MPW_ENHANCED) {
- if (args.txqs_inline == MLX5_ARG_UNSET)
- priv->txqs_inline = MLX5_EMPW_MIN_TXQS;
- if (args.inline_max_packet_sz == MLX5_ARG_UNSET)
- priv->inline_max_packet_sz =
- MLX5_EMPW_MAX_INLINE_LEN;
- if (args.txq_inline == MLX5_ARG_UNSET)
- priv->txq_inline = MLX5_WQE_SIZE_MAX -
- MLX5_WQE_SIZE;
- }
- if (priv->cqe_comp && !cqe_comp) {
- WARN("Rx CQE compression isn't supported");
- priv->cqe_comp = 0;
- }
+ mlx5_args_update(priv);
/* Configure the first MAC address by default. */
if (priv_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx5_en loaded?"
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e6a69b823..5e943000e 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -90,6 +90,18 @@ struct mlx5_xstats_ctrl {
/* Flow list . */
TAILQ_HEAD(mlx5_flows, rte_flow);
+struct mlx5_args {
+ int cqe_comp;
+ int txq_inline;
+ int txqs_inline;
+ int mps;
+ int mpw_hdr_dseg;
+ int inline_max_packet_sz;
+ int tso;
+ int tx_vec_en;
+ int rx_vec_en;
+};
+
struct priv {
struct rte_eth_dev *dev; /* Ethernet device of master process. */
struct ibv_context *ctx; /* Verbs context. */
@@ -108,21 +120,15 @@ struct priv {
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int sriov:1; /* This is a VF or PF with VF devices. */
- unsigned int mps:2; /* Multi-packet send mode (0: disabled). */
- unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
- unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
+ unsigned int mps:2; /* Multi-packet send supported mode. */
+ unsigned int cqe_comp:1; /* Whether CQE compression is supported. */
unsigned int pending_alarm:1; /* An alarm is pending. */
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tunnel_en:1;
unsigned int isolated:1; /* Whether isolated mode is enabled. */
- unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
- unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
unsigned int counter_set_supported:1; /* Counter set is supported. */
/* Whether Tx offloads for tunneled packets are supported. */
unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
- unsigned int txq_inline; /* Maximum packet size for inlining. */
- unsigned int txqs_inline; /* Queue number threshold for inlining. */
- unsigned int inline_max_packet_sz; /* Max packet size for inlining. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
@@ -149,6 +155,8 @@ struct priv {
rte_spinlock_t lock; /* Lock for control functions. */
int primary_socket; /* Unix socket for primary process. */
struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
+ struct mlx5_args args_default; /* Args default as set by the app. */
+ struct mlx5_args args; /* Args value as set on runtime. */
};
/**
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index a3cef6891..5c59bc45e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -701,14 +701,14 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
(priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
DEV_RX_OFFLOAD_TIMESTAMP;
- if (!priv->mps)
+ if (!priv->args.mps)
info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
if (priv->hw_csum)
info->tx_offload_capa |=
(DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
- if (priv->tso)
+ if (priv->args.tso)
info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
if (priv->tunnel_en)
info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
@@ -1434,7 +1434,7 @@ priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
assert(dev != NULL);
dev->tx_pkt_burst = mlx5_tx_burst;
/* Select appropriate TX function. */
- if (priv->mps == MLX5_MPW_ENHANCED) {
+ if (priv->args.mps == MLX5_MPW_ENHANCED) {
if (priv_check_vec_tx_support(priv) > 0) {
if (priv_check_raw_vec_tx_support(priv) > 0)
dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
@@ -1445,10 +1445,10 @@ priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
dev->tx_pkt_burst = mlx5_tx_burst_empw;
DEBUG("selected Enhanced MPW TX function");
}
- } else if (priv->mps && priv->txq_inline) {
+ } else if (priv->args.mps && priv->args.txq_inline) {
dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
- } else if (priv->mps) {
+ } else if (priv->args.mps) {
dev->tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 85399eff5..4298c1b4d 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -612,7 +612,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0,
};
- if (priv->cqe_comp && !rxq_data->hw_timestamp) {
+ if (priv->args.cqe_comp && !rxq_data->hw_timestamp) {
attr.cq.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
@@ -622,7 +622,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
*/
if (rxq_check_vec_support(rxq_data) < 0)
attr.cq.ibv.cqe *= 2;
- } else if (priv->cqe_comp && rxq_data->hw_timestamp) {
+ } else if (priv->args.cqe_comp && rxq_data->hw_timestamp) {
DEBUG("Rx CQE compression is disabled for HW timestamp");
}
tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
@@ -885,7 +885,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
struct rte_eth_dev *dev = priv->dev;
struct mlx5_rxq_ctrl *tmpl;
const uint16_t desc_n =
- desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+ desc + priv->args.rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
tmpl = rte_calloc_socket("RXQ", 1,
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index ba6c8cefd..2556f5ebf 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -329,10 +329,10 @@ priv_check_raw_vec_tx_support(struct priv *priv)
int __attribute__((cold))
priv_check_vec_tx_support(struct priv *priv)
{
- if (!priv->tx_vec_en ||
+ if (!priv->args.tx_vec_en ||
priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
- priv->mps != MLX5_MPW_ENHANCED ||
- priv->tso)
+ priv->args.mps != MLX5_MPW_ENHANCED ||
+ priv->args.tso)
return -ENOTSUP;
return 1;
}
@@ -352,7 +352,7 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq)
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
+ if (!ctrl->priv->args.rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return 1;
}
@@ -371,7 +371,7 @@ priv_check_vec_rx_support(struct priv *priv)
{
uint16_t i;
- if (!priv->rx_vec_en)
+ if (!priv->args.rx_vec_en)
return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 9c5860ff4..28fc90e2e 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -322,7 +322,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
};
cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
- if (priv->mps == MLX5_MPW_ENHANCED)
+ if (priv->args.mps == MLX5_MPW_ENHANCED)
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
@@ -582,34 +582,35 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
tmpl->priv = priv;
tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc);
- if (priv->mps == MLX5_MPW_ENHANCED)
- tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
+ if (priv->args.mps == MLX5_MPW_ENHANCED)
+ tmpl->txq.mpw_hdr_dseg = priv->args.mpw_hdr_dseg;
/* MRs will be registered in mp2mr[] later. */
DEBUG("priv->device_attr.max_qp_wr is %d",
priv->device_attr.orig_attr.max_qp_wr);
DEBUG("priv->device_attr.max_sge is %d",
priv->device_attr.orig_attr.max_sge);
- if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
+ if (priv->args.txq_inline &&
+ priv->txqs_n >= (unsigned int)priv->args.txqs_inline) {
unsigned int ds_cnt;
tmpl->txq.max_inline =
- ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
+ ((priv->args.txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
tmpl->txq.inline_en = 1;
/* TSO and MPS can't be enabled concurrently. */
- assert(!priv->tso || !priv->mps);
- if (priv->mps == MLX5_MPW_ENHANCED) {
+ assert(!priv->args.tso || !priv->args.mps);
+ if (priv->args.mps == MLX5_MPW_ENHANCED) {
tmpl->txq.inline_max_packet_sz =
- priv->inline_max_packet_sz;
+ priv->args.inline_max_packet_sz;
/* To minimize the size of data set, avoid requesting
* too large WQ.
*/
tmpl->max_inline_data =
- ((RTE_MIN(priv->txq_inline,
- priv->inline_max_packet_sz) +
+ ((RTE_MIN(priv->args.txq_inline,
+ priv->args.inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (priv->tso) {
+ } else if (priv->args.tso) {
int inline_diff = tmpl->txq.max_inline - max_tso_inline;
/*
@@ -641,11 +642,11 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
RTE_CACHE_LINE_SIZE);
WARN("txq inline is too large (%d) setting it to "
"the maximum possible: %d\n",
- priv->txq_inline, max_inline);
+ priv->args.txq_inline, max_inline);
tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
}
}
- if (priv->tso) {
+ if (priv->args.tso) {
tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline,
max_tso_inline);
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH 2/6] net/mlx5: convert to new Tx offloads API
2017-11-23 12:02 [dpdk-dev] [PATCH 0/6] convert mlx PMDs to new ethdev offloads API Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 1/6] net/mlx5: store PMD args in private structure Shahaf Shuler
@ 2017-11-23 12:02 ` Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 3/6] net/mlx5: convert to new Rx " Shahaf Shuler
` (4 subsequent siblings)
6 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2017-11-23 12:02 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
doc/guides/nics/mlx5.rst | 12 ++----
drivers/net/mlx5/mlx5.c | 49 +++++++++++------------
drivers/net/mlx5/mlx5.h | 2 +-
drivers/net/mlx5/mlx5_ethdev.c | 27 ++++++-------
drivers/net/mlx5/mlx5_rxtx.h | 3 +-
drivers/net/mlx5/mlx5_rxtx_vec.c | 22 ++++++++---
drivers/net/mlx5/mlx5_txq.c | 74 ++++++++++++++++++++++++++++++++---
7 files changed, 129 insertions(+), 60 deletions(-)
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index f9558da89..1942eda47 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -253,8 +253,10 @@ Run-time configuration
Enhanced MPS supports hybrid mode - mixing inlined packets and pointers
in the same descriptor.
- This option cannot be used in conjunction with ``tso`` below. When ``tso``
- is set, ``txq_mpw_en`` is disabled.
+ This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO,
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO,
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, DEV_TX_OFFLOAD_VLAN_INSERT``.
+ When those offloads enabled the mpw send function will be disabled.
It is currently only supported on the ConnectX-4 Lx and ConnectX-5
families of adapters. Enabled by default.
@@ -275,12 +277,6 @@ Run-time configuration
Effective only when Enhanced MPS is supported. The default value is 256.
-- ``tso`` parameter [int]
-
- A nonzero value enables hardware TSO.
- When hardware TSO is enabled, packets marked with TCP segmentation
- offload will be divided into segments by the hardware. Disabled by default.
-
- ``tx_vec_en`` parameter [int]
A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index be21c72e8..03839271c 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -85,9 +85,6 @@
/* Device parameter to limit the size of inlining packet. */
#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
-/* Device parameter to enable hardware TSO offload. */
-#define MLX5_TSO "tso"
-
/* Device parameter to enable hardware Tx vector. */
#define MLX5_TX_VEC_EN "tx_vec_en"
@@ -411,8 +408,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
args->mpw_hdr_dseg = !!tmp;
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
args->inline_max_packet_sz = tmp;
- } else if (strcmp(MLX5_TSO, key) == 0) {
- args->tso = !!tmp;
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
args->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
@@ -445,7 +440,6 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
MLX5_TXQ_MPW_EN,
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
- MLX5_TSO,
MLX5_TX_VEC_EN,
MLX5_RX_VEC_EN,
NULL,
@@ -483,11 +477,22 @@ static struct rte_pci_driver mlx5_driver;
* @param[in/out] priv
* Pointer to private structure.
*/
-static void
+void
mlx5_args_update(struct priv *priv)
{
struct mlx5_args *args_def = &priv->args_default;
struct mlx5_args *args = &priv->args;
+ uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
+ uint64_t tx_offloads = priv->dev ?
+ priv->dev->data->dev_conf.txmode.offloads :
+ 0;
+ int tso = !!(tx_offloads & supp_tx_offloads & DEV_TX_OFFLOAD_TCP_TSO);
+ int vlan_insert = !!(tx_offloads & supp_tx_offloads &
+ DEV_TX_OFFLOAD_VLAN_INSERT);
+ int tunnel = !!(tx_offloads & supp_tx_offloads &
+ (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM));
if (args_def->cqe_comp != MLX5_ARG_UNSET) {
if (!priv->cqe_comp && args_def->cqe_comp) {
@@ -498,30 +503,28 @@ mlx5_args_update(struct priv *priv)
} else {
args->cqe_comp = priv->cqe_comp;
}
- if (args_def->tso != MLX5_ARG_UNSET) {
- if (!priv->tso && args_def->tso) {
- WARN("TSO is not supported");
- args_def->tso = 0;
- }
- args->tso = args_def->tso;
- } else {
- args->tso = 0;
- }
if (args_def->mps != MLX5_ARG_UNSET) {
if (!priv->mps && args_def->mps) {
WARN("multi-packet send not supported");
args_def->mps = MLX5_MPW_DISABLED;
- }
- if (args->tso && args_def->mps) {
+ } else if (tso && args_def->mps) {
WARN("multi-packet send not supported in conjunction "
"with TSO. MPS disabled");
args->mps = MLX5_MPW_DISABLED;
+ } else if (vlan_insert && args_def->mps) {
+ WARN("multi-packet send not supported in conjunction "
+ "with vlan insertion. MPS disabled");
+ args->mps = MLX5_MPW_DISABLED;
+ } else if (tunnel && args_def->mps) {
+ WARN("multi-packet send not supported in conjunction "
+ "with tunnel offloads. MPS disabled");
+ args->mps = MLX5_MPW_DISABLED;
} else {
args->mps = args_def->mps ? priv->mps :
MLX5_MPW_DISABLED;
}
} else {
- if (args->tso)
+ if (tso || vlan_insert || tunnel)
args->mps = MLX5_MPW_DISABLED;
else
args->mps = priv->mps;
@@ -725,7 +728,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
.mps = MLX5_ARG_UNSET,
.mpw_hdr_dseg = MLX5_ARG_UNSET,
.inline_max_packet_sz = MLX5_ARG_UNSET,
- .tso = MLX5_ARG_UNSET,
.tx_vec_en = MLX5_ARG_UNSET,
.rx_vec_en = MLX5_ARG_UNSET,
};
@@ -882,10 +884,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv_get_num_vfs(priv, &num_vfs);
priv->sriov = (num_vfs || sriov);
- priv->tso = ((priv->tso) &&
- (device_attr_ex.tso_caps.max_tso > 0) &&
- (device_attr_ex.tso_caps.supported_qpts &
- (1 << IBV_QPT_RAW_PACKET)));
+ priv->tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
+ (device_attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
if (priv->tso)
priv->max_tso_payload_sz =
device_attr_ex.tso_caps.max_tso;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5e943000e..cba6d3ceb 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -97,7 +97,6 @@ struct mlx5_args {
int mps;
int mpw_hdr_dseg;
int inline_max_packet_sz;
- int tso;
int tx_vec_en;
int rx_vec_en;
};
@@ -187,6 +186,7 @@ priv_unlock(struct priv *priv)
/* mlx5.c */
int mlx5_getenv_int(const char *);
+void mlx5_args_update(struct priv *);
/* mlx5_ethdev.c */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 5c59bc45e..decc6edfa 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -578,7 +578,15 @@ dev_configure(struct rte_eth_dev *dev)
unsigned int reta_idx_n;
const uint8_t use_app_rss_key =
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
-
+ uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+ if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
+ ERROR("Some Tx offloads are not supported "
+ "requested 0x%lx supported 0x%lx\n",
+ tx_offloads, supp_tx_offloads);
+ return ENOTSUP;
+ }
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
rss_hash_default_key_len)) {
@@ -610,6 +618,8 @@ dev_configure(struct rte_eth_dev *dev)
ERROR("cannot handle this many RX queues (%u)", rxqs_n);
return EINVAL;
}
+ /* Update args according to selected offloads. */
+ mlx5_args_update(priv);
if (rxqs_n == priv->rxqs_n)
return 0;
INFO("%p: RX queues number update: %u -> %u",
@@ -700,20 +710,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
0) |
(priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
DEV_RX_OFFLOAD_TIMESTAMP;
-
- if (!priv->args.mps)
- info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
- if (priv->hw_csum)
- info->tx_offload_capa |=
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
- if (priv->args.tso)
- info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
- if (priv->tunnel_en)
- info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index d34f3cc04..1e0a9875f 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -200,7 +200,7 @@ struct mlx5_txq_data {
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
uint16_t mr_cache_idx; /* Index of last hit entry. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
- uint32_t flags; /* Flags for Tx Queue. */
+ uint64_t offloads; /* Offloads for Tx Queue. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
@@ -292,6 +292,7 @@ int mlx5_priv_txq_release(struct priv *, uint16_t);
int mlx5_priv_txq_releasable(struct priv *, uint16_t);
int mlx5_priv_txq_verify(struct priv *);
void txq_alloc_elts(struct mlx5_txq_ctrl *);
+uint64_t mlx5_priv_get_tx_port_offloads(struct priv *);
/* mlx5_rxtx.c */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 2556f5ebf..4e09a959d 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -202,15 +202,18 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t ret;
/* Transmit multi-seg packets in the head of pkts list. */
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
+ if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
NB_SEGS(pkts[nb_tx]) > 1)
nb_tx += txq_scatter_v(txq,
&pkts[nb_tx],
pkts_n - nb_tx);
n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
+ if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
n = txq_check_multiseg(&pkts[nb_tx], n);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+ if (txq->offloads &
+ (DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM))
n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
nb_tx += ret;
@@ -308,8 +311,12 @@ priv_check_raw_vec_tx_support(struct priv *priv)
for (i = 0; i < priv->txqs_n; ++i) {
struct mlx5_txq_data *txq = (*priv->txqs)[i];
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
- !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+ if (txq->offloads &
+ (DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_IPV4_CKSUM))
break;
}
if (i != priv->txqs_n)
@@ -329,10 +336,13 @@ priv_check_raw_vec_tx_support(struct priv *priv)
int __attribute__((cold))
priv_check_vec_tx_support(struct priv *priv)
{
+ uint64_t offloads = priv->dev->data->dev_conf.txmode.offloads;
+ int tso = !!(offloads & DEV_TX_OFFLOAD_TCP_TSO);
+
if (!priv->args.tx_vec_en ||
priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
priv->args.mps != MLX5_MPW_ENHANCED ||
- priv->args.tso)
+ tso)
return -ENOTSUP;
return 1;
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 28fc90e2e..4d9c7d697 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -116,6 +116,62 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
}
/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx5_priv_get_tx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT);
+
+ if (priv->hw_csum)
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ if (priv->tso)
+ offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (priv->tunnel_en) {
+ if (priv->hw_csum)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (priv->tso)
+ offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ }
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
+ uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv);
+
+ /* There are no Tx offloads which are per queue. */
+ if ((offloads & port_supp_offloads) != offloads)
+ return 0;
+ if ((port_offloads ^ offloads) & port_supp_offloads)
+ return 0;
+ return 1;
+}
+
+/**
* DPDK callback to configure a TX queue.
*
* @param dev
@@ -146,6 +202,15 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
return -E_RTE_SECONDARY;
priv_lock(priv);
+ if (!priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
+ ret = ENOTSUP;
+ ERROR("%p: Tx queue offloads 0x%lx don't match port "
+ "offloads 0x%lx or supported offloads 0x%lx",
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ mlx5_priv_get_tx_port_offloads(priv));
+ goto out;
+ }
if (desc <= MLX5_TX_COMP_THRESH) {
WARN("%p: number of descriptors requested for TX queue %u"
" must be higher than MLX5_TX_COMP_THRESH, using"
@@ -570,6 +635,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
struct mlx5_txq_ctrl *tmpl;
+ int tso = !!(conf->offloads & DEV_TX_OFFLOAD_TCP_TSO);
tmpl = rte_calloc_socket("TXQ", 1,
sizeof(*tmpl) +
@@ -578,7 +644,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
if (!tmpl)
return NULL;
assert(desc > MLX5_TX_COMP_THRESH);
- tmpl->txq.flags = conf->txq_flags;
+ tmpl->txq.offloads = conf->offloads;
tmpl->priv = priv;
tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc);
@@ -597,8 +663,6 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
((priv->args.txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
tmpl->txq.inline_en = 1;
- /* TSO and MPS can't be enabled concurrently. */
- assert(!priv->args.tso || !priv->args.mps);
if (priv->args.mps == MLX5_MPW_ENHANCED) {
tmpl->txq.inline_max_packet_sz =
priv->args.inline_max_packet_sz;
@@ -610,7 +674,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
priv->args.inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (priv->args.tso) {
+ } else if (tso) {
int inline_diff = tmpl->txq.max_inline - max_tso_inline;
/*
@@ -646,7 +710,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
}
}
- if (priv->args.tso) {
+ if (tso) {
tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline,
max_tso_inline);
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH 3/6] net/mlx5: convert to new Rx offloads API
2017-11-23 12:02 [dpdk-dev] [PATCH 0/6] convert mlx PMDs to new ethdev offloads API Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 1/6] net/mlx5: store PMD args in private structure Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 2/6] net/mlx5: convert to new Tx offloads API Shahaf Shuler
@ 2017-11-23 12:02 ` Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 4/6] net/mlx5: fix VLAN configuration after port stop Shahaf Shuler
` (3 subsequent siblings)
6 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2017-11-23 12:02 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
drivers/net/mlx5/mlx5_ethdev.c | 22 +++++---
drivers/net/mlx5/mlx5_rxq.c | 102 +++++++++++++++++++++++++++++++-----
drivers/net/mlx5/mlx5_rxtx.h | 3 ++
drivers/net/mlx5/mlx5_vlan.c | 3 +-
4 files changed, 108 insertions(+), 22 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index decc6edfa..434130c28 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -580,6 +580,10 @@ dev_configure(struct rte_eth_dev *dev)
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t supp_rx_offloads =
+ (mlx5_get_rx_port_offloads() |
+ mlx5_priv_get_rx_queue_offloads(priv));
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
ERROR("Some Tx offloads are not supported "
@@ -587,6 +591,12 @@ dev_configure(struct rte_eth_dev *dev)
tx_offloads, supp_tx_offloads);
return ENOTSUP;
}
+ if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
+ ERROR("Some Rx offloads are not supported "
+ "requested 0x%lx supported 0x%lx\n",
+ rx_offloads, supp_rx_offloads);
+ return ENOTSUP;
+ }
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
rss_hash_default_key_len)) {
@@ -702,14 +712,10 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_offload_capa =
- (priv->hw_csum ?
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM) :
- 0) |
- (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
- DEV_RX_OFFLOAD_TIMESTAMP;
+ info->rx_queue_offload_capa =
+ mlx5_priv_get_rx_queue_offloads(priv);
+ info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
+ info->rx_queue_offload_capa);
info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 4298c1b4d..c714b0d8f 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -213,6 +213,75 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
}
/**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx5_priv_get_rx_queue_offloads(struct priv *priv)
+{
+ uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_TIMESTAMP |
+ DEV_RX_OFFLOAD_JUMBO_FRAME);
+
+ if (priv->hw_fcs_strip)
+ offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ if (priv->hw_csum)
+ offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM);
+ if (priv->hw_vlan_strip)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ return offloads;
+}
+
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx5_get_rx_port_offloads(void)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
+ uint64_t queue_supp_offloads =
+ mlx5_priv_get_rx_queue_offloads(priv);
+ uint64_t port_supp_offloads = mlx5_get_rx_port_offloads();
+
+ if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
+ offloads)
+ return 0;
+ if (((port_offloads ^ offloads) & port_supp_offloads))
+ return 0;
+ return 1;
+}
+
+/**
*
* @param dev
* Pointer to Ethernet device structure.
@@ -259,6 +328,16 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
priv_unlock(priv);
return -EOVERFLOW;
}
+ if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
+ ret = ENOTSUP;
+ ERROR("%p: Rx queue offloads 0x%lx don't match port "
+ "offloads 0x%lx or supported offloads 0x%lx",
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ (mlx5_get_rx_port_offloads() |
+ mlx5_priv_get_rx_queue_offloads(priv)));
+ goto out;
+ }
if (!mlx5_priv_rxq_releasable(priv, idx)) {
ret = EBUSY;
ERROR("%p: unable to release queue index %u",
@@ -266,7 +345,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
goto out;
}
mlx5_priv_rxq_release(priv, idx);
- rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, mp);
+ rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp);
if (!rxq_ctrl) {
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
@@ -880,7 +959,8 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
*/
struct mlx5_rxq_ctrl*
mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket, struct rte_mempool *mp)
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
{
struct rte_eth_dev *dev = priv->dev;
struct mlx5_rxq_ctrl *tmpl;
@@ -902,7 +982,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
tmpl->rxq.sges_n = 0;
- } else if (dev->data->dev_conf.rxmode.enable_scatter) {
+ } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -944,18 +1024,14 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
goto error;
}
/* Toggle RX checksum offload if hardware supports it. */
- if (priv->hw_csum)
- tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- if (priv->hw_csum_l2tun)
- tmpl->rxq.csum_l2tun =
- !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- tmpl->rxq.hw_timestamp =
- !!dev->data->dev_conf.rxmode.hw_timestamp;
+ tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
+ priv->hw_csum_l2tun);
+ tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = (priv->hw_vlan_strip &&
- !!dev->data->dev_conf.rxmode.hw_vlan_strip);
+ tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
- if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
tmpl->rxq.crc_present = 0;
} else if (priv->hw_fcs_strip) {
tmpl->rxq.crc_present = 1;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 1e0a9875f..3d5894af9 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -252,6 +252,7 @@ int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *);
int mlx5_priv_rxq_ibv_verify(struct priv *);
struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t,
uint16_t, unsigned int,
+ const struct rte_eth_rxconf *,
struct rte_mempool *);
struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t);
int mlx5_priv_rxq_release(struct priv *, uint16_t);
@@ -272,6 +273,8 @@ struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t,
uint64_t, uint16_t [], uint16_t);
int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *);
int mlx5_priv_hrxq_ibv_verify(struct priv *);
+uint64_t mlx5_get_rx_port_offloads(void);
+uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *);
/* mlx5_txq.c */
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 6fc315ef3..dc82643fc 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -191,7 +191,8 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
unsigned int i;
if (mask & ETH_VLAN_STRIP_MASK) {
- int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip;
+ int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP);
if (!priv->hw_vlan_strip) {
ERROR("VLAN stripping is not supported");
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH 4/6] net/mlx5: fix VLAN configuration after port stop
2017-11-23 12:02 [dpdk-dev] [PATCH 0/6] convert mlx PMDs to new ethdev offloads API Shahaf Shuler
` (2 preceding siblings ...)
2017-11-23 12:02 ` [dpdk-dev] [PATCH 3/6] net/mlx5: convert to new Rx " Shahaf Shuler
@ 2017-11-23 12:02 ` Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 5/6] net/mlx4: convert to new Tx offloads API Shahaf Shuler
` (2 subsequent siblings)
6 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2017-11-23 12:02 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev, stable
Ethdev layer has an API to configure vlan setting on the flight, i.e.
when the port state is start.
calling such API when the port is stopped may cause segmentation fault
as the related Verbs contexts has not been created yet.
Fixes: 09cb5b581762 ("net/mlx5: separate DPDK from verbs Rx queue objects")
Cc: nelio.laranjeiro@6wind.com
Cc: stable@dpdk.org
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
drivers/net/mlx5/mlx5_vlan.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index dc82643fc..128c2b6b6 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -127,6 +127,11 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
DEBUG("set VLAN offloads 0x%x for port %d queue %d",
vlan_offloads, rxq->port_id, idx);
+ if (!rxq_ctrl->ibv) {
+ /* Update related bits in RX queue. */
+ rxq->vlan_strip = !!on;
+ return;
+ }
mod = (struct ibv_wq_attr){
.attr_mask = IBV_WQ_ATTR_FLAGS,
.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH 5/6] net/mlx4: convert to new Tx offloads API
2017-11-23 12:02 [dpdk-dev] [PATCH 0/6] convert mlx PMDs to new ethdev offloads API Shahaf Shuler
` (3 preceding siblings ...)
2017-11-23 12:02 ` [dpdk-dev] [PATCH 4/6] net/mlx5: fix VLAN configuration after port stop Shahaf Shuler
@ 2017-11-23 12:02 ` Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 6/6] net/mlx4: convert to new Rx " Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
6 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2017-11-23 12:02 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
drivers/net/mlx4/mlx4.c | 9 +++++
drivers/net/mlx4/mlx4_ethdev.c | 7 +---
drivers/net/mlx4/mlx4_rxtx.h | 1 +
drivers/net/mlx4/mlx4_txq.c | 66 +++++++++++++++++++++++++++++++++++--
4 files changed, 75 insertions(+), 8 deletions(-)
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index f9e4f9d73..38c545b1b 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -99,8 +99,17 @@ mlx4_dev_configure(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
struct rte_flow_error error;
+ uint64_t supp_tx_offloads = mlx4_priv_get_tx_port_offloads(priv);
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
int ret;
+ if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
+ rte_errno = ENOTSUP;
+ ERROR("Some Tx offloads are not supported "
+ "requested 0x%lx supported 0x%lx\n",
+ tx_offloads, supp_tx_offloads);
+ return -rte_errno;
+ }
/* Prepare internal flow rules. */
ret = mlx4_flow_sync(priv, &error);
if (ret) {
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 2f69e7d4f..63e00b1da 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -767,17 +767,12 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
info->rx_offload_capa = 0;
- info->tx_offload_capa = 0;
+ info->tx_offload_capa = mlx4_priv_get_tx_port_offloads(priv);
if (priv->hw_csum) {
- info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM);
}
- if (priv->hw_csum_l2tun)
- info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (mlx4_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index 463df2b0b..528600a18 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -181,6 +181,7 @@ int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_txconf *conf);
void mlx4_tx_queue_release(void *dpdk_txq);
+uint64_t mlx4_priv_get_tx_port_offloads(struct priv *priv);
/**
* Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[].
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 7882a4d0b..91befb16b 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -184,6 +184,56 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
}
/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_priv_get_tx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (priv->hw_csum) {
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ }
+ if (priv->hw_csum_l2tun)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
+ uint64_t port_supp_offloads = mlx4_priv_get_tx_port_offloads(priv);
+
+ /* There are no Tx offloads which are per queue. */
+ if ((offloads & port_supp_offloads) != offloads)
+ return 0;
+ if ((port_offloads ^ offloads) & port_supp_offloads)
+ return 0;
+ return 1;
+}
+
+/**
* DPDK callback to configure a Tx queue.
*
* @param dev
@@ -234,6 +284,15 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
+ if (!priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
+ rte_errno = ENOTSUP;
+ ERROR("%p: Tx queue offloads 0x%lx don't match port "
+ "offloads 0x%lx or supported offloads 0x%lx",
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ mlx4_priv_get_tx_port_offloads(priv));
+ return -rte_errno;
+ }
if (idx >= dev->data->nb_tx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -278,8 +337,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
.elts_comp_cd_init =
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
- .csum = priv->hw_csum,
- .csum_l2tun = priv->hw_csum_l2tun,
+ .csum = !!(conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM)),
+ .csum_l2tun = !!(conf->offloads &
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
/* Enable Tx loopback for VF devices. */
.lb = !!priv->vf,
.bounce_buf = bounce_buf,
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH 6/6] net/mlx4: convert to new Rx offloads API
2017-11-23 12:02 [dpdk-dev] [PATCH 0/6] convert mlx PMDs to new ethdev offloads API Shahaf Shuler
` (4 preceding siblings ...)
2017-11-23 12:02 ` [dpdk-dev] [PATCH 5/6] net/mlx4: convert to new Tx offloads API Shahaf Shuler
@ 2017-11-23 12:02 ` Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
6 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2017-11-23 12:02 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
drivers/net/mlx4/mlx4.c | 11 ++++++
drivers/net/mlx4/mlx4_ethdev.c | 10 ++---
drivers/net/mlx4/mlx4_flow.c | 5 ++-
drivers/net/mlx4/mlx4_rxq.c | 78 ++++++++++++++++++++++++++++++++++---
drivers/net/mlx4/mlx4_rxtx.h | 2 +
5 files changed, 93 insertions(+), 13 deletions(-)
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 38c545b1b..3205b58ac 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -101,6 +101,10 @@ mlx4_dev_configure(struct rte_eth_dev *dev)
struct rte_flow_error error;
uint64_t supp_tx_offloads = mlx4_priv_get_tx_port_offloads(priv);
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t supp_rx_offloads =
+ (mlx4_get_rx_port_offloads() |
+ mlx4_priv_get_rx_queue_offloads(priv));
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
int ret;
if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
@@ -110,6 +114,13 @@ mlx4_dev_configure(struct rte_eth_dev *dev)
tx_offloads, supp_tx_offloads);
return -rte_errno;
}
+ if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
+ rte_errno = ENOTSUP;
+ ERROR("Some Rx offloads are not supported "
+ "requested 0x%lx supported 0x%lx\n",
+ rx_offloads, supp_rx_offloads);
+ return -rte_errno;
+ }
/* Prepare internal flow rules. */
ret = mlx4_flow_sync(priv, &error);
if (ret) {
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 63e00b1da..fef89e731 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -766,13 +766,11 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_offload_capa = 0;
info->tx_offload_capa = mlx4_priv_get_tx_port_offloads(priv);
- if (priv->hw_csum) {
- info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
- }
+ info->rx_queue_offload_capa =
+ mlx4_priv_get_rx_queue_offloads(priv);
+ info->rx_offload_capa = (mlx4_get_rx_port_offloads() |
+ info->rx_queue_offload_capa);
if (mlx4_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 8b87b2989..654e72df3 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -1224,7 +1224,7 @@ mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
* - MAC flow rules are generated from @p dev->data->mac_addrs
* (@p priv->mac array).
* - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
+ * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
* is enabled and VLAN filters are configured.
*
* @param priv
@@ -1292,7 +1292,8 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
};
struct ether_addr *rule_mac = ð_spec.dst;
rte_be16_t *rule_vlan =
- priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
+ (priv->dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER) &&
!priv->dev->data->promiscuous ?
&vlan_spec.tci :
NULL;
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 53313c56f..f8c1105dc 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -663,6 +663,66 @@ mlx4_rxq_detach(struct rxq *rxq)
}
/**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_priv_get_rx_queue_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_SCATTER;
+
+ if (priv->hw_csum)
+ offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ return offloads;
+}
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx4_get_rx_port_offloads(void)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
+ uint64_t queue_supp_offloads =
+ mlx4_priv_get_rx_queue_offloads(priv);
+ uint64_t port_supp_offloads = mlx4_get_rx_port_offloads();
+
+ if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
+ offloads)
+ return 0;
+ if (((port_offloads ^ offloads) & port_supp_offloads))
+ return 0;
+ return 1;
+}
+
+/**
* DPDK callback to configure a Rx queue.
*
* @param dev
@@ -707,6 +767,16 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
+ if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
+ rte_errno = ENOTSUP;
+ ERROR("%p: Rx queue offloads 0x%lx don't match port "
+ "offloads 0x%lx or supported offloads 0x%lx",
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ (mlx4_get_rx_port_offloads() |
+ mlx4_priv_get_rx_queue_offloads(priv)));
+ return -rte_errno;
+ }
if (idx >= dev->data->nb_rx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -746,10 +816,8 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.elts_n = rte_log2_u32(desc),
.elts = elts,
/* Toggle Rx checksum offload if hardware supports it. */
- .csum = (priv->hw_csum &&
- dev->data->dev_conf.rxmode.hw_ip_checksum),
- .csum_l2tun = (priv->hw_csum_l2tun &&
- dev->data->dev_conf.rxmode.hw_ip_checksum),
+ .csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ .csum_l2tun = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
.l2tun_offload = priv->hw_csum_l2tun,
.stats = {
.idx = idx,
@@ -761,7 +829,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
;
- } else if (dev->data->dev_conf.rxmode.enable_scatter) {
+ } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index 528600a18..4897e9471 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -163,6 +163,8 @@ int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
const struct rte_eth_rxconf *conf,
struct rte_mempool *mp);
void mlx4_rx_queue_release(void *dpdk_rxq);
+uint64_t mlx4_get_rx_port_offloads(void);
+uint64_t mlx4_priv_get_rx_queue_offloads(struct priv *priv);
/* mlx4_rxtx.c */
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev offloads API
2017-11-23 12:02 [dpdk-dev] [PATCH 0/6] convert mlx PMDs to new ethdev offloads API Shahaf Shuler
` (5 preceding siblings ...)
2017-11-23 12:02 ` [dpdk-dev] [PATCH 6/6] net/mlx4: convert to new Rx " Shahaf Shuler
@ 2018-01-03 7:16 ` Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 1/7] net/mlx5: change pkt burst select function prototype Shahaf Shuler
` (7 more replies)
6 siblings, 8 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-03 7:16 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
This series is to convert mlx4 and mlx5 PMDs to the new offloads API [1].
On v2:
- New design to hold PMD specific args and combine
them with offloads requested.
- Fix missing IPV4 checksum flag on vector function selection.
- Verify Txq flags ignore bit before checking for valid offloads
configuration.
- Removed strict offloads check from mlx4.
[1] http://dpdk.org/ml/archives/dev/2017-October/077329.html
Nelio Laranjeiro (1):
net/mlx5: rename counter set in configuration
Shahaf Shuler (6):
net/mlx5: change pkt burst select function prototype
net/mlx5: add device configuration structure
net/mlx5: convert to new Tx offloads API
net/mlx5: convert to new Rx offloads API
net/mlx4: convert to new Tx offloads API
net/mlx4: convert to new Rx offloads API
doc/guides/nics/mlx5.rst | 15 +-
drivers/net/mlx4/mlx4_ethdev.c | 17 +--
drivers/net/mlx4/mlx4_flow.c | 5 +-
drivers/net/mlx4/mlx4_rxq.c | 78 ++++++++++-
drivers/net/mlx4/mlx4_rxtx.h | 3 +
drivers/net/mlx4/mlx4_txq.c | 71 +++++++++-
drivers/net/mlx5/mlx5.c | 190 +++++++++----------------
drivers/net/mlx5/mlx5.h | 57 +++++---
drivers/net/mlx5/mlx5_ethdev.c | 113 ++++++++-------
drivers/net/mlx5/mlx5_flow.c | 2 +-
drivers/net/mlx5/mlx5_rxq.c | 121 +++++++++++++---
drivers/net/mlx5/mlx5_rxtx.c | 6 +-
drivers/net/mlx5/mlx5_rxtx.h | 10 +-
drivers/net/mlx5/mlx5_rxtx_vec.c | 40 +++---
drivers/net/mlx5/mlx5_rxtx_vec.h | 12 ++
drivers/net/mlx5/mlx5_trigger.c | 4 +-
drivers/net/mlx5/mlx5_txq.c | 254 +++++++++++++++++++++++++---------
drivers/net/mlx5/mlx5_vlan.c | 7 +-
18 files changed, 662 insertions(+), 343 deletions(-)
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v2 1/7] net/mlx5: change pkt burst select function prototype
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
@ 2018-01-03 7:16 ` Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 2/7] net/mlx5: add device configuration structure Shahaf Shuler
` (6 subsequent siblings)
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-03 7:16 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
Change the function prototype to return the function pointer of the
selected Tx/Rx burst function instead of assigning it directly to the
device context.
Such change will enable to use those select functions to query the burst
function that will be selected according to the device configuration.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
drivers/net/mlx5/mlx5.c | 11 ++++++++--
drivers/net/mlx5/mlx5.h | 4 ++--
drivers/net/mlx5/mlx5_ethdev.c | 41 +++++++++++++++++++++---------------
drivers/net/mlx5/mlx5_trigger.c | 4 ++--
4 files changed, 37 insertions(+), 23 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cd66fe162..0192815f2 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -712,8 +712,15 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
err = -err;
goto error;
}
- priv_dev_select_rx_function(priv, eth_dev);
- priv_dev_select_tx_function(priv, eth_dev);
+ /*
+ * Ethdev pointer is still required as input since
+ * the primary device is not accessible from the
+ * secondary process.
+ */
+ eth_dev->rx_pkt_burst =
+ priv_select_rx_function(priv, eth_dev);
+ eth_dev->tx_pkt_burst =
+ priv_select_tx_function(priv, eth_dev);
continue;
}
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e6a69b823..3e3259b55 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -206,8 +206,8 @@ void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
int mlx5_set_link_down(struct rte_eth_dev *dev);
int mlx5_set_link_up(struct rte_eth_dev *dev);
-void priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev);
-void priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev);
+eth_tx_burst_t priv_select_tx_function(struct priv *, struct rte_eth_dev *);
+eth_rx_burst_t priv_select_rx_function(struct priv *, struct rte_eth_dev *);
/* mlx5_mac.c */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 282ef241e..28183534a 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -1325,8 +1325,8 @@ priv_dev_set_link(struct priv *priv, struct rte_eth_dev *dev, int up)
err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
if (err)
return err;
- priv_dev_select_tx_function(priv, dev);
- priv_dev_select_rx_function(priv, dev);
+ dev->tx_pkt_burst = priv_select_tx_function(priv, dev);
+ dev->rx_pkt_burst = priv_select_rx_function(priv, dev);
} else {
err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
if (err)
@@ -1386,32 +1386,36 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
* Pointer to private data structure.
* @param dev
* Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * Pointer to selected Tx burst function.
*/
-void
-priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
+eth_tx_burst_t
+priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
{
+ eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
+
assert(priv != NULL);
- assert(dev != NULL);
- dev->tx_pkt_burst = mlx5_tx_burst;
/* Select appropriate TX function. */
if (priv->mps == MLX5_MPW_ENHANCED) {
if (priv_check_vec_tx_support(priv) > 0) {
if (priv_check_raw_vec_tx_support(priv) > 0)
- dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
+ tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
- dev->tx_pkt_burst = mlx5_tx_burst_vec;
+ tx_pkt_burst = mlx5_tx_burst_vec;
DEBUG("selected Enhanced MPW TX vectorized function");
} else {
- dev->tx_pkt_burst = mlx5_tx_burst_empw;
+ tx_pkt_burst = mlx5_tx_burst_empw;
DEBUG("selected Enhanced MPW TX function");
}
} else if (priv->mps && priv->txq_inline) {
- dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
+ tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
} else if (priv->mps) {
- dev->tx_pkt_burst = mlx5_tx_burst_mpw;
+ tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
+ return tx_pkt_burst;
}
/**
@@ -1421,16 +1425,19 @@ priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
* Pointer to private data structure.
* @param dev
* Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * Pointer to selected Rx burst function.
*/
-void
-priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev)
+eth_rx_burst_t
+priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
{
+ eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
+
assert(priv != NULL);
- assert(dev != NULL);
if (priv_check_vec_rx_support(priv) > 0) {
- dev->rx_pkt_burst = mlx5_rx_burst_vec;
+ rx_pkt_burst = mlx5_rx_burst_vec;
DEBUG("selected RX vectorized function");
- } else {
- dev->rx_pkt_burst = mlx5_rx_burst;
}
+ return rx_pkt_burst;
}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 88f60a01d..1a20967a2 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -152,7 +152,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
goto error;
}
/* Update send callback. */
- priv_dev_select_tx_function(priv, dev);
+ dev->tx_pkt_burst = priv_select_tx_function(priv, dev);
err = priv_rxq_start(priv);
if (err) {
ERROR("%p: RXQ allocation failed: %s",
@@ -160,7 +160,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
goto error;
}
/* Update receive callback. */
- priv_dev_select_rx_function(priv, dev);
+ dev->rx_pkt_burst = priv_select_rx_function(priv, dev);
err = priv_dev_traffic_enable(priv, dev);
if (err) {
ERROR("%p: an error occurred while configuring control flows:"
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v2 2/7] net/mlx5: add device configuration structure
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 1/7] net/mlx5: change pkt burst select function prototype Shahaf Shuler
@ 2018-01-03 7:16 ` Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 3/7] net/mlx5: rename counter set in configuration Shahaf Shuler
` (5 subsequent siblings)
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-03 7:16 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
Move device configuration and features capabilities to its own structure.
This structure is filled by mlx5_pci_probe(), outside of this function
it should be treated as *read only*.
This configuration struct will be used for the Tx/Rx queue setup to
select the Tx/Rx queue parameters based on the user configuration and
device capabilities.
In addition it will be used by the burst selection function to decide
on the best pkt burst to be used.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
drivers/net/mlx5/mlx5.c | 178 +++++++++++----------------------
drivers/net/mlx5/mlx5.h | 53 ++++++----
drivers/net/mlx5/mlx5_ethdev.c | 26 ++---
drivers/net/mlx5/mlx5_flow.c | 2 +-
drivers/net/mlx5/mlx5_rxq.c | 21 ++--
drivers/net/mlx5/mlx5_rxtx_vec.c | 10 +-
drivers/net/mlx5/mlx5_txq.c | 182 ++++++++++++++++++++++------------
drivers/net/mlx5/mlx5_vlan.c | 4 +-
8 files changed, 245 insertions(+), 231 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 0192815f2..fdd4710f1 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -94,9 +94,6 @@
/* Device parameter to enable hardware Rx vector. */
#define MLX5_RX_VEC_EN "rx_vec_en"
-/* Default PMD specific parameter value. */
-#define MLX5_ARG_UNSET (-1)
-
#ifndef HAVE_IBV_MLX5_MOD_MPW
#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
@@ -106,17 +103,6 @@
#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
#endif
-struct mlx5_args {
- int cqe_comp;
- int txq_inline;
- int txqs_inline;
- int mps;
- int mpw_hdr_dseg;
- int inline_max_packet_sz;
- int tso;
- int tx_vec_en;
- int rx_vec_en;
-};
/**
* Retrieve integer value from environment variable.
*
@@ -399,7 +385,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
static int
mlx5_args_check(const char *key, const char *val, void *opaque)
{
- struct mlx5_args *args = opaque;
+ struct mlx5_dev_config *config = opaque;
unsigned long tmp;
errno = 0;
@@ -409,23 +395,23 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
return errno;
}
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
- args->cqe_comp = !!tmp;
+ config->cqe_comp = !!tmp;
} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
- args->txq_inline = tmp;
+ config->txq_inline = tmp;
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
- args->txqs_inline = tmp;
+ config->txqs_inline = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
- args->mps = !!tmp;
+ config->mps = !!tmp ? config->mps : 0;
} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
- args->mpw_hdr_dseg = !!tmp;
+ config->mpw_hdr_dseg = !!tmp;
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
- args->inline_max_packet_sz = tmp;
+ config->inline_max_packet_sz = tmp;
} else if (strcmp(MLX5_TSO, key) == 0) {
- args->tso = !!tmp;
+ config->tso = !!tmp;
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
- args->tx_vec_en = !!tmp;
+ config->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
- args->rx_vec_en = !!tmp;
+ config->rx_vec_en = !!tmp;
} else {
WARN("%s: unknown parameter", key);
return -EINVAL;
@@ -436,8 +422,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
/**
* Parse device parameters.
*
- * @param priv
- * Pointer to private structure.
+ * @param config
+ * Pointer to device configuration structure.
* @param devargs
* Device arguments structure.
*
@@ -445,7 +431,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
* 0 on success, errno value on failure.
*/
static int
-mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
+mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
{
const char **params = (const char *[]){
MLX5_RXQ_CQE_COMP_EN,
@@ -473,7 +459,7 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
for (i = 0; (params[i] != NULL); ++i) {
if (rte_kvargs_count(kvlist, params[i])) {
ret = rte_kvargs_process(kvlist, params[i],
- mlx5_args_check, args);
+ mlx5_args_check, config);
if (ret != 0) {
rte_kvargs_free(kvlist);
return ret;
@@ -487,38 +473,6 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
static struct rte_pci_driver mlx5_driver;
/**
- * Assign parameters from args into priv, only non default
- * values are considered.
- *
- * @param[out] priv
- * Pointer to private structure.
- * @param[in] args
- * Pointer to args values.
- */
-static void
-mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
-{
- if (args->cqe_comp != MLX5_ARG_UNSET)
- priv->cqe_comp = args->cqe_comp;
- if (args->txq_inline != MLX5_ARG_UNSET)
- priv->txq_inline = args->txq_inline;
- if (args->txqs_inline != MLX5_ARG_UNSET)
- priv->txqs_inline = args->txqs_inline;
- if (args->mps != MLX5_ARG_UNSET)
- priv->mps = args->mps ? priv->mps : 0;
- if (args->mpw_hdr_dseg != MLX5_ARG_UNSET)
- priv->mpw_hdr_dseg = args->mpw_hdr_dseg;
- if (args->inline_max_packet_sz != MLX5_ARG_UNSET)
- priv->inline_max_packet_sz = args->inline_max_packet_sz;
- if (args->tso != MLX5_ARG_UNSET)
- priv->tso = args->tso;
- if (args->tx_vec_en != MLX5_ARG_UNSET)
- priv->tx_vec_en = args->tx_vec_en;
- if (args->rx_vec_en != MLX5_ARG_UNSET)
- priv->rx_vec_en = args->rx_vec_en;
-}
-
-/**
* DPDK callback to register a PCI device.
*
* This function creates an Ethernet device for each port of a given
@@ -671,16 +625,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ether_addr mac;
uint16_t num_vfs = 0;
struct ibv_device_attr_ex device_attr;
- struct mlx5_args args = {
- .cqe_comp = MLX5_ARG_UNSET,
+ struct mlx5_dev_config config = {
+ .cqe_comp = cqe_comp,
+ .mps = mps,
+ .tunnel_en = tunnel_en,
+ .tso = 0,
+ .tx_vec_en = 1,
+ .rx_vec_en = 1,
+ .mpw_hdr_dseg = 0,
.txq_inline = MLX5_ARG_UNSET,
.txqs_inline = MLX5_ARG_UNSET,
- .mps = MLX5_ARG_UNSET,
- .mpw_hdr_dseg = MLX5_ARG_UNSET,
.inline_max_packet_sz = MLX5_ARG_UNSET,
- .tso = MLX5_ARG_UNSET,
- .tx_vec_en = MLX5_ARG_UNSET,
- .rx_vec_en = MLX5_ARG_UNSET,
};
mlx5_dev[idx].ports |= test;
@@ -779,106 +734,89 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->port = port;
priv->pd = pd;
priv->mtu = ETHER_MTU;
- priv->mps = mps; /* Enable MPW by default if supported. */
- priv->cqe_comp = cqe_comp;
- priv->tunnel_en = tunnel_en;
- /* Enable vector by default if supported. */
- priv->tx_vec_en = 1;
- priv->rx_vec_en = 1;
- err = mlx5_args(&args, pci_dev->device.devargs);
+ err = mlx5_args(&config, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
strerror(err));
goto port_error;
}
- mlx5_args_assign(priv, &args);
if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {
ERROR("ibv_query_device_ex() failed");
goto port_error;
}
- priv->hw_csum =
- !!(device_attr_ex.device_cap_flags_ex &
- IBV_DEVICE_RAW_IP_CSUM);
+ config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
+ IBV_DEVICE_RAW_IP_CSUM);
DEBUG("checksum offloading is %ssupported",
- (priv->hw_csum ? "" : "not "));
+ (config.hw_csum ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
- priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
- IBV_DEVICE_VXLAN_SUPPORT);
+ config.hw_csum_l2tun =
+ !!(exp_device_attr.exp_device_cap_flags &
+ IBV_DEVICE_VXLAN_SUPPORT);
#endif
DEBUG("Rx L2 tunnel checksum offloads are %ssupported",
- (priv->hw_csum_l2tun ? "" : "not "));
+ (config.hw_csum_l2tun ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- priv->counter_set_supported = !!(device_attr.max_counter_sets);
+ config.counter_set_supported =
+ !!(device_attr.max_counter_sets);
ibv_describe_counter_set(ctx, 0, &cs_desc);
DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
cs_desc.counter_type, cs_desc.num_of_cs,
cs_desc.attributes);
#endif
- priv->ind_table_max_size =
+ config.ind_table_max_size =
device_attr_ex.rss_caps.max_rwq_indirection_table_size;
/* Remove this check once DPDK supports larger/variable
* indirection tables. */
- if (priv->ind_table_max_size >
+ if (config.ind_table_max_size >
(unsigned int)ETH_RSS_RETA_SIZE_512)
- priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+ config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
DEBUG("maximum RX indirection table size is %u",
- priv->ind_table_max_size);
- priv->hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
+ config.ind_table_max_size);
+ config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
DEBUG("VLAN stripping is %ssupported",
- (priv->hw_vlan_strip ? "" : "not "));
+ (config.hw_vlan_strip ? "" : "not "));
- priv->hw_fcs_strip =
+ config.hw_fcs_strip =
!!(device_attr_ex.orig_attr.device_cap_flags &
IBV_WQ_FLAGS_SCATTER_FCS);
DEBUG("FCS stripping configuration is %ssupported",
- (priv->hw_fcs_strip ? "" : "not "));
+ (config.hw_fcs_strip ? "" : "not "));
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
- priv->hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
+ config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
#endif
DEBUG("hardware RX end alignment padding is %ssupported",
- (priv->hw_padding ? "" : "not "));
+ (config.hw_padding ? "" : "not "));
priv_get_num_vfs(priv, &num_vfs);
- priv->sriov = (num_vfs || sriov);
- priv->tso = ((priv->tso) &&
- (device_attr_ex.tso_caps.max_tso > 0) &&
- (device_attr_ex.tso_caps.supported_qpts &
- (1 << IBV_QPT_RAW_PACKET)));
- if (priv->tso)
- priv->max_tso_payload_sz =
- device_attr_ex.tso_caps.max_tso;
- if (priv->mps && !mps) {
+ config.sriov = (num_vfs || sriov);
+ if (config.tso)
+ config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
+ (device_attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
+ if (config.tso)
+ config.tso_max_payload_sz =
+ device_attr_ex.tso_caps.max_tso;
+ if (config.mps && !mps) {
ERROR("multi-packet send not supported on this device"
" (" MLX5_TXQ_MPW_EN ")");
err = ENOTSUP;
goto port_error;
- } else if (priv->mps && priv->tso) {
+ } else if (config.mps && config.tso) {
WARN("multi-packet send not supported in conjunction "
"with TSO. MPS disabled");
- priv->mps = 0;
+ config.mps = 0;
}
INFO("%sMPS is %s",
- priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
- priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
- /* Set default values for Enhanced MPW, a.k.a MPWv2. */
- if (priv->mps == MLX5_MPW_ENHANCED) {
- if (args.txqs_inline == MLX5_ARG_UNSET)
- priv->txqs_inline = MLX5_EMPW_MIN_TXQS;
- if (args.inline_max_packet_sz == MLX5_ARG_UNSET)
- priv->inline_max_packet_sz =
- MLX5_EMPW_MAX_INLINE_LEN;
- if (args.txq_inline == MLX5_ARG_UNSET)
- priv->txq_inline = MLX5_WQE_SIZE_MAX -
- MLX5_WQE_SIZE;
- }
- if (priv->cqe_comp && !cqe_comp) {
+ config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
+ config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ if (config.cqe_comp && !cqe_comp) {
WARN("Rx CQE compression isn't supported");
- priv->cqe_comp = 0;
+ config.cqe_comp = 0;
}
/* Configure the first MAC address by default. */
if (priv_get_mac(priv, &mac.addr_bytes)) {
@@ -945,6 +883,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
DEBUG("forcing Ethernet interface up");
priv_set_flags(priv, ~IFF_UP, IFF_UP);
mlx5_link_update(priv->dev, 1);
+ /* Store device configuration on private structure. */
+ priv->config = config;
continue;
port_error:
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 3e3259b55..04f0b2557 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -90,6 +90,39 @@ struct mlx5_xstats_ctrl {
/* Flow list . */
TAILQ_HEAD(mlx5_flows, rte_flow);
+/* Default PMD specific parameter value. */
+#define MLX5_ARG_UNSET (-1)
+
+/*
+ * Device configuration structure.
+ *
+ * Merged configuration from:
+ *
+ * - Device capabilities,
+ * - User device parameters disabled features.
+ */
+struct mlx5_dev_config {
+ unsigned int hw_csum:1; /* Checksum offload is supported. */
+ unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
+ unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
+ unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
+ unsigned int hw_padding:1; /* End alignment padding is supported. */
+ unsigned int sriov:1; /* This is a VF or PF with VF devices. */
+ unsigned int mps:2; /* Multi-packet send supported mode. */
+ unsigned int tunnel_en:1; /* Whether tunnel is supported. */
+ unsigned int counter_set_supported:1; /* Counter set is supported. */
+ unsigned int cqe_comp:1; /* CQE compression is enabled. */
+ unsigned int tso:1; /* Whether TSO is enabled. */
+ unsigned int tx_vec_en:1; /* Tx vector is enabled. */
+ unsigned int rx_vec_en:1; /* Rx vector is enabled. */
+ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
+ unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
+ unsigned int ind_table_max_size; /* Maximum indirection table size. */
+ int txq_inline; /* Maximum packet size for inlining. */
+ int txqs_inline; /* Queue number threshold for inlining. */
+ int inline_max_packet_sz; /* Max packet size for inlining. */
+};
+
struct priv {
struct rte_eth_dev *dev; /* Ethernet device of master process. */
struct ibv_context *ctx; /* Verbs context. */
@@ -102,27 +135,8 @@ struct priv {
/* Device properties. */
uint16_t mtu; /* Configured MTU. */
uint8_t port; /* Physical port number. */
- unsigned int hw_csum:1; /* Checksum offload is supported. */
- unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
- unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
- unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
- unsigned int hw_padding:1; /* End alignment padding is supported. */
- unsigned int sriov:1; /* This is a VF or PF with VF devices. */
- unsigned int mps:2; /* Multi-packet send mode (0: disabled). */
- unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
- unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
unsigned int pending_alarm:1; /* An alarm is pending. */
- unsigned int tso:1; /* Whether TSO is supported. */
- unsigned int tunnel_en:1;
unsigned int isolated:1; /* Whether isolated mode is enabled. */
- unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
- unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
- unsigned int counter_set_supported:1; /* Counter set is supported. */
- /* Whether Tx offloads for tunneled packets are supported. */
- unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
- unsigned int txq_inline; /* Maximum packet size for inlining. */
- unsigned int txqs_inline; /* Queue number threshold for inlining. */
- unsigned int inline_max_packet_sz; /* Max packet size for inlining. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
@@ -149,6 +163,7 @@ struct priv {
rte_spinlock_t lock; /* Lock for control functions. */
int primary_socket; /* Unix socket for primary process. */
struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
+ struct mlx5_dev_config config; /* Device configuration. */
};
/**
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 28183534a..d2f98769e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -579,7 +579,7 @@ dev_configure(struct rte_eth_dev *dev)
(void *)dev, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
- if (rxqs_n > priv->ind_table_max_size) {
+ if (rxqs_n > priv->config.ind_table_max_size) {
ERROR("cannot handle this many RX queues (%u)", rxqs_n);
return EINVAL;
}
@@ -592,7 +592,7 @@ dev_configure(struct rte_eth_dev *dev)
* maximum indirection table size for better balancing.
* The result is always rounded to the next power of two. */
reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
- priv->ind_table_max_size :
+ priv->config.ind_table_max_size :
rxqs_n));
if (priv_rss_reta_index_resize(priv, reta_idx_n))
return ENOMEM;
@@ -641,6 +641,7 @@ void
mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
struct priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
unsigned int max;
char ifname[IF_NAMESIZE];
@@ -663,31 +664,31 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
info->rx_offload_capa =
- (priv->hw_csum ?
+ (config->hw_csum ?
(DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM) :
0) |
- (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
+ (priv->config.hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
DEV_RX_OFFLOAD_TIMESTAMP;
- if (!priv->mps)
+ if (!config->mps)
info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
- if (priv->hw_csum)
+ if (config->hw_csum)
info->tx_offload_capa |=
(DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
- if (priv->tso)
+ if (config->tso)
info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
- if (priv->tunnel_en)
+ if (config->tunnel_en)
info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
- priv->reta_idx_n : priv->ind_table_max_size;
+ priv->reta_idx_n : config->ind_table_max_size;
info->hash_key_size = priv->rss_conf.rss_key_len;
info->speed_capa = priv->link_speed_capa;
priv_unlock(priv);
@@ -1394,10 +1395,11 @@ eth_tx_burst_t
priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
{
eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
+ struct mlx5_dev_config *config = &priv->config;
assert(priv != NULL);
/* Select appropriate TX function. */
- if (priv->mps == MLX5_MPW_ENHANCED) {
+ if (config->mps == MLX5_MPW_ENHANCED) {
if (priv_check_vec_tx_support(priv) > 0) {
if (priv_check_raw_vec_tx_support(priv) > 0)
tx_pkt_burst = mlx5_tx_burst_raw_vec;
@@ -1408,10 +1410,10 @@ priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
tx_pkt_burst = mlx5_tx_burst_empw;
DEBUG("selected Enhanced MPW TX function");
}
- } else if (priv->mps && priv->txq_inline) {
+ } else if (config->mps && (config->txq_inline > 0)) {
tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
- } else if (priv->mps) {
+ } else if (config->mps) {
tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 6605cfd9f..8ad07b839 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -771,7 +771,7 @@ priv_flow_convert_actions(struct priv *priv,
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
parser->mark = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
- priv->counter_set_supported) {
+ priv->config.counter_set_supported) {
parser->count = 1;
} else {
goto exit_action_not_supported;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index a4cdd374a..81363ecd7 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -570,6 +570,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
unsigned int i;
int ret = 0;
struct mlx5dv_obj obj;
+ struct mlx5_dev_config *config = &priv->config;
assert(rxq_data);
assert(!rxq_ctrl->ibv);
@@ -606,7 +607,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0,
};
- if (priv->cqe_comp && !rxq_data->hw_timestamp) {
+ if (config->cqe_comp && !rxq_data->hw_timestamp) {
attr.cq.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
@@ -616,7 +617,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
*/
if (rxq_check_vec_support(rxq_data) < 0)
attr.cq.ibv.cqe *= 2;
- } else if (priv->cqe_comp && rxq_data->hw_timestamp) {
+ } else if (config->cqe_comp && rxq_data->hw_timestamp) {
DEBUG("Rx CQE compression is disabled for HW timestamp");
}
tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
@@ -651,7 +652,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
}
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
- if (priv->hw_padding) {
+ if (config->hw_padding) {
attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
}
@@ -878,9 +879,11 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
{
struct rte_eth_dev *dev = priv->dev;
struct mlx5_rxq_ctrl *tmpl;
- const uint16_t desc_n =
- desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+ eth_rx_burst_t rx_pkt_burst = priv_select_rx_function(priv, priv->dev);
+ const uint16_t desc_n = desc + ((rx_pkt_burst == mlx5_rx_burst_vec) ?
+ MLX5_VPMD_DESCS_PER_LOOP : 0);
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+ struct mlx5_dev_config *config = &priv->config;
tmpl = rte_calloc_socket("RXQ", 1,
sizeof(*tmpl) +
@@ -938,20 +941,20 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
goto error;
}
/* Toggle RX checksum offload if hardware supports it. */
- if (priv->hw_csum)
+ if (config->hw_csum)
tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- if (priv->hw_csum_l2tun)
+ if (config->hw_csum_l2tun)
tmpl->rxq.csum_l2tun =
!!dev->data->dev_conf.rxmode.hw_ip_checksum;
tmpl->rxq.hw_timestamp =
!!dev->data->dev_conf.rxmode.hw_timestamp;
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = (priv->hw_vlan_strip &&
+ tmpl->rxq.vlan_strip = (config->hw_vlan_strip &&
!!dev->data->dev_conf.rxmode.hw_vlan_strip);
/* By default, FCS (CRC) is stripped by hardware. */
if (dev->data->dev_conf.rxmode.hw_strip_crc) {
tmpl->rxq.crc_present = 0;
- } else if (priv->hw_fcs_strip) {
+ } else if (config->hw_fcs_strip) {
tmpl->rxq.crc_present = 1;
} else {
WARN("%p: CRC stripping has been disabled but will still"
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 8d23dae7e..761ed4971 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -287,10 +287,10 @@ priv_check_raw_vec_tx_support(struct priv *priv)
int __attribute__((cold))
priv_check_vec_tx_support(struct priv *priv)
{
- if (!priv->tx_vec_en ||
+ if (!priv->config.tx_vec_en ||
priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
- priv->mps != MLX5_MPW_ENHANCED ||
- priv->tso)
+ priv->config.mps != MLX5_MPW_ENHANCED ||
+ priv->config.tso)
return -ENOTSUP;
return 1;
}
@@ -310,7 +310,7 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq)
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
+ if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return 1;
}
@@ -329,7 +329,7 @@ priv_check_vec_rx_support(struct priv *priv)
{
uint16_t i;
- if (!priv->rx_vec_en)
+ if (!priv->config.rx_vec_en)
return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 89b16fda2..3e2075c79 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -273,6 +273,25 @@ priv_tx_uar_remap(struct priv *priv, int fd)
}
/**
+ * Check if the burst function is using eMPW.
+ *
+ * @param tx_pkt_burst
+ * Tx burst function pointer.
+ *
+ * @return
+ * 1 if the burst function is using eMPW, 0 otherwise.
+ */
+static int
+is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
+{
+ if (tx_pkt_burst == mlx5_tx_burst_raw_vec ||
+ tx_pkt_burst == mlx5_tx_burst_vec ||
+ tx_pkt_burst == mlx5_tx_burst_empw)
+ return 1;
+ return 0;
+}
+
+/**
* Create the Tx queue Verbs object.
*
* @param priv
@@ -302,6 +321,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
struct mlx5dv_cq cq_info;
struct mlx5dv_obj obj;
const int desc = 1 << txq_data->elts_n;
+ eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
int ret = 0;
assert(txq_data);
@@ -316,7 +336,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
};
cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
- if (priv->mps == MLX5_MPW_ENHANCED)
+ if (is_empw_burst_func(tx_pkt_burst))
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
@@ -541,83 +561,74 @@ mlx5_priv_txq_ibv_verify(struct priv *priv)
}
/**
- * Create a DPDK Tx queue.
+ * Set Tx queue parameters from device configuration.
*
- * @param priv
- * Pointer to private structure.
- * @param idx
- * TX queue index.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
- *
- * @return
- * A DPDK queue object on success.
+ * @param txq_ctrl
+ * Pointer to Tx queue control structure.
*/
-struct mlx5_txq_ctrl*
-mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket,
- const struct rte_eth_txconf *conf)
+static void
+txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
{
+ struct priv *priv = txq_ctrl->priv;
+ struct mlx5_dev_config *config = &priv->config;
const unsigned int max_tso_inline =
((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
- struct mlx5_txq_ctrl *tmpl;
-
- tmpl = rte_calloc_socket("TXQ", 1,
- sizeof(*tmpl) +
- desc * sizeof(struct rte_mbuf *),
- 0, socket);
- if (!tmpl)
- return NULL;
- assert(desc > MLX5_TX_COMP_THRESH);
- tmpl->txq.flags = conf->txq_flags;
- tmpl->priv = priv;
- tmpl->socket = socket;
- tmpl->txq.elts_n = log2above(desc);
- if (priv->mps == MLX5_MPW_ENHANCED)
- tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
- /* MRs will be registered in mp2mr[] later. */
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.orig_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.orig_attr.max_sge);
- if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
+ unsigned int txq_inline;
+ unsigned int txqs_inline;
+ unsigned int inline_max_packet_sz;
+ eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
+ int is_empw_func = is_empw_burst_func(tx_pkt_burst);
+
+ txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txq_inline;
+ txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txqs_inline;
+ inline_max_packet_sz =
+ (config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
+ 0 : config->inline_max_packet_sz;
+ if (is_empw_func) {
+ if (config->txq_inline == MLX5_ARG_UNSET)
+ txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
+ if (config->txqs_inline == MLX5_ARG_UNSET)
+ txqs_inline = MLX5_EMPW_MIN_TXQS;
+ if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
+ inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
+ txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
+ txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
+ }
+ if (txq_inline && priv->txqs_n >= txqs_inline) {
unsigned int ds_cnt;
- tmpl->txq.max_inline =
- ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
+ txq_ctrl->txq.max_inline =
+ ((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
/* TSO and MPS can't be enabled concurrently. */
- assert(!priv->tso || !priv->mps);
- if (priv->mps == MLX5_MPW_ENHANCED) {
- tmpl->txq.inline_max_packet_sz =
- priv->inline_max_packet_sz;
+ assert(!config->tso || !config->mps);
+ if (is_empw_func) {
/* To minimize the size of data set, avoid requesting
* too large WQ.
*/
- tmpl->max_inline_data =
- ((RTE_MIN(priv->txq_inline,
- priv->inline_max_packet_sz) +
+ txq_ctrl->max_inline_data =
+ ((RTE_MIN(txq_inline,
+ inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (priv->tso) {
- int inline_diff = tmpl->txq.max_inline - max_tso_inline;
+ } else if (config->tso) {
+ int inline_diff = txq_ctrl->txq.max_inline -
+ max_tso_inline;
/*
* Adjust inline value as Verbs aggregates
* tso_inline and txq_inline fields.
*/
- tmpl->max_inline_data = inline_diff > 0 ?
+ txq_ctrl->max_inline_data = inline_diff > 0 ?
inline_diff *
RTE_CACHE_LINE_SIZE :
0;
} else {
- tmpl->max_inline_data =
- tmpl->txq.max_inline * RTE_CACHE_LINE_SIZE;
+ txq_ctrl->max_inline_data =
+ txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
}
/*
* Check if the inline size is too large in a way which
@@ -627,7 +638,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
* WQE ETH (1 DS)
* Inline part (N DS)
*/
- ds_cnt = 2 + (tmpl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
+ ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
if (ds_cnt > MLX5_DSEG_MAX) {
unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
MLX5_WQE_DWORD_SIZE;
@@ -636,18 +647,61 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
RTE_CACHE_LINE_SIZE);
WARN("txq inline is too large (%d) setting it to "
"the maximum possible: %d\n",
- priv->txq_inline, max_inline);
- tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
+ txq_inline, max_inline);
+ txq_ctrl->txq.max_inline = max_inline /
+ RTE_CACHE_LINE_SIZE;
}
}
- if (priv->tso) {
- tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
- tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline,
- max_tso_inline);
- tmpl->txq.tso_en = 1;
+ if (config->tso) {
+ txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
+ txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
+ max_tso_inline);
+ txq_ctrl->txq.tso_en = 1;
}
- if (priv->tunnel_en)
- tmpl->txq.tunnel_en = 1;
+ txq_ctrl->txq.tunnel_en = config->tunnel_en;
+}
+
+/**
+ * Create a DPDK Tx queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ *
+ * @return
+ * A DPDK queue object on success.
+ */
+struct mlx5_txq_ctrl*
+mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_txconf *conf)
+{
+ struct mlx5_txq_ctrl *tmpl;
+
+ tmpl = rte_calloc_socket("TXQ", 1,
+ sizeof(*tmpl) +
+ desc * sizeof(struct rte_mbuf *),
+ 0, socket);
+ if (!tmpl)
+ return NULL;
+ assert(desc > MLX5_TX_COMP_THRESH);
+ tmpl->txq.flags = conf->txq_flags;
+ tmpl->priv = priv;
+ tmpl->socket = socket;
+ tmpl->txq.elts_n = log2above(desc);
+ txq_set_params(tmpl);
+ /* MRs will be registered in mp2mr[] later. */
+ DEBUG("priv->device_attr.max_qp_wr is %d",
+ priv->device_attr.orig_attr.max_qp_wr);
+ DEBUG("priv->device_attr.max_sge is %d",
+ priv->device_attr.orig_attr.max_sge);
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 198a69e3c..2ab865264 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -165,7 +165,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
struct priv *priv = dev->data->dev_private;
/* Validate hw support */
- if (!priv->hw_vlan_strip) {
+ if (!priv->config.hw_vlan_strip) {
ERROR("VLAN stripping is not supported");
return;
}
@@ -198,7 +198,7 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip;
- if (!priv->hw_vlan_strip) {
+ if (!priv->config.hw_vlan_strip) {
ERROR("VLAN stripping is not supported");
return 0;
}
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v2 3/7] net/mlx5: rename counter set in configuration
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 1/7] net/mlx5: change pkt burst select function prototype Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 2/7] net/mlx5: add device configuration structure Shahaf Shuler
@ 2018-01-03 7:16 ` Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 4/7] net/mlx5: convert to new Tx offloads API Shahaf Shuler
` (4 subsequent siblings)
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-03 7:16 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
From: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Counter_set is a counter used for flows when its support is available.
Renaming it to flow counter.
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
drivers/net/mlx5/mlx5.c | 3 +--
drivers/net/mlx5/mlx5.h | 2 +-
drivers/net/mlx5/mlx5_flow.c | 2 +-
3 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index fdd4710f1..ca44a0a59 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -759,8 +759,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
(config.hw_csum_l2tun ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- config.counter_set_supported =
- !!(device_attr.max_counter_sets);
+ config.flow_counter_en = !!(device_attr.max_counter_sets);
ibv_describe_counter_set(ctx, 0, &cs_desc);
DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
cs_desc.counter_type, cs_desc.num_of_cs,
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 04f0b2557..171b3a933 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -110,7 +110,7 @@ struct mlx5_dev_config {
unsigned int sriov:1; /* This is a VF or PF with VF devices. */
unsigned int mps:2; /* Multi-packet send supported mode. */
unsigned int tunnel_en:1; /* Whether tunnel is supported. */
- unsigned int counter_set_supported:1; /* Counter set is supported. */
+ unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
unsigned int tso:1; /* Whether TSO is enabled. */
unsigned int tx_vec_en:1; /* Tx vector is enabled. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8ad07b839..334a4f4ba 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -771,7 +771,7 @@ priv_flow_convert_actions(struct priv *priv,
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
parser->mark = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
- priv->config.counter_set_supported) {
+ priv->config.flow_counter_en) {
parser->count = 1;
} else {
goto exit_action_not_supported;
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v2 4/7] net/mlx5: convert to new Tx offloads API
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
` (2 preceding siblings ...)
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 3/7] net/mlx5: rename counter set in configuration Shahaf Shuler
@ 2018-01-03 7:16 ` Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 5/7] net/mlx5: convert to new Rx " Shahaf Shuler
` (3 subsequent siblings)
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-03 7:16 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
doc/guides/nics/mlx5.rst | 15 +++----
drivers/net/mlx5/mlx5.c | 18 ++------
drivers/net/mlx5/mlx5.h | 2 +-
drivers/net/mlx5/mlx5_ethdev.c | 37 ++++++++--------
drivers/net/mlx5/mlx5_rxtx.c | 6 ++-
drivers/net/mlx5/mlx5_rxtx.h | 7 +--
drivers/net/mlx5/mlx5_rxtx_vec.c | 32 +++++++-------
drivers/net/mlx5/mlx5_rxtx_vec.h | 12 ++++++
drivers/net/mlx5/mlx5_txq.c | 80 ++++++++++++++++++++++++++++++++---
9 files changed, 142 insertions(+), 67 deletions(-)
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 154db64d7..bdc2216c0 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -262,8 +262,9 @@ Run-time configuration
Enhanced MPS supports hybrid mode - mixing inlined packets and pointers
in the same descriptor.
- This option cannot be used in conjunction with ``tso`` below. When ``tso``
- is set, ``txq_mpw_en`` is disabled.
+ This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO,
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, DEV_TX_OFFLOAD_VLAN_INSERT``.
+ When those offloads are requested the MPS send function will not be used.
It is currently only supported on the ConnectX-4 Lx and ConnectX-5
families of adapters. Enabled by default.
@@ -284,17 +285,15 @@ Run-time configuration
Effective only when Enhanced MPS is supported. The default value is 256.
-- ``tso`` parameter [int]
-
- A nonzero value enables hardware TSO.
- When hardware TSO is enabled, packets marked with TCP segmentation
- offload will be divided into segments by the hardware. Disabled by default.
-
- ``tx_vec_en`` parameter [int]
A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of
global Tx queues on the port is lesser than MLX5_VPMD_MIN_TXQS.
+ This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO,
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, DEV_TX_OFFLOAD_VLAN_INSERT``.
+ When those offloads are requested the MPS send function will not be used.
+
Enabled by default on ConnectX-5.
- ``rx_vec_en`` parameter [int]
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index ca44a0a59..1c95f3520 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -85,9 +85,6 @@
/* Device parameter to limit the size of inlining packet. */
#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
-/* Device parameter to enable hardware TSO offload. */
-#define MLX5_TSO "tso"
-
/* Device parameter to enable hardware Tx vector. */
#define MLX5_TX_VEC_EN "tx_vec_en"
@@ -406,8 +403,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
config->mpw_hdr_dseg = !!tmp;
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
config->inline_max_packet_sz = tmp;
- } else if (strcmp(MLX5_TSO, key) == 0) {
- config->tso = !!tmp;
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
config->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
@@ -440,7 +435,6 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
MLX5_TXQ_MPW_EN,
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
- MLX5_TSO,
MLX5_TX_VEC_EN,
MLX5_RX_VEC_EN,
NULL,
@@ -629,7 +623,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
.cqe_comp = cqe_comp,
.mps = mps,
.tunnel_en = tunnel_en,
- .tso = 0,
.tx_vec_en = 1,
.rx_vec_en = 1,
.mpw_hdr_dseg = 0,
@@ -793,10 +786,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv_get_num_vfs(priv, &num_vfs);
config.sriov = (num_vfs || sriov);
- if (config.tso)
- config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
- (device_attr_ex.tso_caps.supported_qpts &
- (1 << IBV_QPT_RAW_PACKET)));
+ config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
+ (device_attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
if (config.tso)
config.tso_max_payload_sz =
device_attr_ex.tso_caps.max_tso;
@@ -805,10 +797,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
" (" MLX5_TXQ_MPW_EN ")");
err = ENOTSUP;
goto port_error;
- } else if (config.mps && config.tso) {
- WARN("multi-packet send not supported in conjunction "
- "with TSO. MPS disabled");
- config.mps = 0;
}
INFO("%sMPS is %s",
config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 171b3a933..8ee522069 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -112,7 +112,7 @@ struct mlx5_dev_config {
unsigned int tunnel_en:1; /* Whether tunnel is supported. */
unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
- unsigned int tso:1; /* Whether TSO is enabled. */
+ unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tx_vec_en:1; /* Tx vector is enabled. */
unsigned int rx_vec_en:1; /* Rx vector is enabled. */
unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index d2f98769e..8be4f43f7 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -551,7 +551,15 @@ dev_configure(struct rte_eth_dev *dev)
unsigned int reta_idx_n;
const uint8_t use_app_rss_key =
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
-
+ uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+ if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
+ ERROR("Some Tx offloads are not supported "
+ "requested 0x%lx supported 0x%lx\n",
+ tx_offloads, supp_tx_offloads);
+ return ENOTSUP;
+ }
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
rss_hash_default_key_len)) {
@@ -672,19 +680,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
(priv->config.hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
DEV_RX_OFFLOAD_TIMESTAMP;
- if (!config->mps)
- info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
- if (config->hw_csum)
- info->tx_offload_capa |=
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
- if (config->tso)
- info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
- if (config->tunnel_en)
- info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
@@ -1392,16 +1388,23 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
* Pointer to selected Tx burst function.
*/
eth_tx_burst_t
-priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
+priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
{
eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
struct mlx5_dev_config *config = &priv->config;
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO));
+ int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
assert(priv != NULL);
/* Select appropriate TX function. */
+ if (vlan_insert || tso)
+ return tx_pkt_burst;
if (config->mps == MLX5_MPW_ENHANCED) {
- if (priv_check_vec_tx_support(priv) > 0) {
- if (priv_check_raw_vec_tx_support(priv) > 0)
+ if (priv_check_vec_tx_support(priv, dev) > 0) {
+ if (priv_check_raw_vec_tx_support(priv, dev) > 0)
tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
tx_pkt_burst = mlx5_tx_burst_vec;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 67e3db168..3b8f71c28 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1994,16 +1994,18 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
int __attribute__((weak))
-priv_check_raw_vec_tx_support(struct priv *priv)
+priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
{
(void)priv;
+ (void)dev;
return -ENOTSUP;
}
int __attribute__((weak))
-priv_check_vec_tx_support(struct priv *priv)
+priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
{
(void)priv;
+ (void)dev;
return -ENOTSUP;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index e70d52361..2728e8d5e 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -201,7 +201,7 @@ struct mlx5_txq_data {
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
uint16_t mr_cache_idx; /* Index of last hit entry. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
- uint32_t flags; /* Flags for Tx Queue. */
+ uint64_t offloads; /* Offloads for Tx Queue. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
@@ -293,6 +293,7 @@ int mlx5_priv_txq_release(struct priv *, uint16_t);
int mlx5_priv_txq_releasable(struct priv *, uint16_t);
int mlx5_priv_txq_verify(struct priv *);
void txq_alloc_elts(struct mlx5_txq_ctrl *);
+uint64_t mlx5_priv_get_tx_port_offloads(struct priv *);
/* mlx5_rxtx.c */
@@ -310,8 +311,8 @@ int mlx5_rx_descriptor_status(void *, uint16_t);
int mlx5_tx_descriptor_status(void *, uint16_t);
/* Vectorized version of mlx5_rxtx.c */
-int priv_check_raw_vec_tx_support(struct priv *);
-int priv_check_vec_tx_support(struct priv *);
+int priv_check_raw_vec_tx_support(struct priv *, struct rte_eth_dev *);
+int priv_check_vec_tx_support(struct priv *, struct rte_eth_dev *);
int rxq_check_vec_support(struct mlx5_rxq_data *);
int priv_check_vec_rx_support(struct priv *);
uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 761ed4971..f0530efbe 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -160,15 +160,15 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t ret;
/* Transmit multi-seg packets in the head of pkts list. */
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
+ if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
NB_SEGS(pkts[nb_tx]) > 1)
nb_tx += txq_scatter_v(txq,
&pkts[nb_tx],
pkts_n - nb_tx);
n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
+ if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
n = txq_count_contig_single_seg(&pkts[nb_tx], n);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+ if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
nb_tx += ret;
@@ -253,24 +253,20 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
*
* @param priv
* Pointer to private structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_raw_vec_tx_support(struct priv *priv)
+priv_check_raw_vec_tx_support(__rte_unused struct priv *priv,
+ struct rte_eth_dev *dev)
{
- uint16_t i;
-
- /* All the configured queues should support. */
- for (i = 0; i < priv->txqs_n; ++i) {
- struct mlx5_txq_data *txq = (*priv->txqs)[i];
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
- !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
- break;
- }
- if (i != priv->txqs_n)
+ /* Doesn't support any offload. */
+ if (offloads)
return -ENOTSUP;
return 1;
}
@@ -280,17 +276,21 @@ priv_check_raw_vec_tx_support(struct priv *priv)
*
* @param priv
* Pointer to private structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_vec_tx_support(struct priv *priv)
+priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
{
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
if (!priv->config.tx_vec_en ||
priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
priv->config.mps != MLX5_MPW_ENHANCED ||
- priv->config.tso)
+ offloads & ~MLX5_VEC_TX_OFFLOAD_CAP)
return -ENOTSUP;
return 1;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 1f08ed0b2..7d7f016f1 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -40,6 +40,18 @@
#include "mlx5_autoconf.h"
#include "mlx5_prm.h"
+/* HW checksum offload capabilities of vectorized Tx. */
+#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
+ (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+
+/* HW offload capabilities of vectorized Tx. */
+#define MLX5_VEC_TX_OFFLOAD_CAP \
+ (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
/*
* Compile time sanity check for vectorized functions.
*/
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 3e2075c79..b81c85fed 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -116,6 +116,63 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
}
/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx5_priv_get_tx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT);
+ struct mlx5_dev_config *config = &priv->config;
+
+ if (config->hw_csum)
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ if (config->tso)
+ offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (config->tunnel_en) {
+ if (config->hw_csum)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (config->tso)
+ offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ }
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
+ uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv);
+
+ /* There are no Tx offloads which are per queue. */
+ if ((offloads & port_supp_offloads) != offloads)
+ return 0;
+ if ((port_offloads ^ offloads) & port_supp_offloads)
+ return 0;
+ return 1;
+}
+
+/**
* DPDK callback to configure a TX queue.
*
* @param dev
@@ -143,6 +200,20 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
int ret = 0;
priv_lock(priv);
+ /*
+ * Don't verify port offloads for application which
+ * use the old API.
+ */
+ if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+ !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
+ ret = ENOTSUP;
+ ERROR("%p: Tx queue offloads 0x%lx don't match port "
+ "offloads 0x%lx or supported offloads 0x%lx",
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ mlx5_priv_get_tx_port_offloads(priv));
+ goto out;
+ }
if (desc <= MLX5_TX_COMP_THRESH) {
WARN("%p: number of descriptors requested for TX queue %u"
" must be higher than MLX5_TX_COMP_THRESH, using"
@@ -579,6 +650,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
unsigned int inline_max_packet_sz;
eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
int is_empw_func = is_empw_burst_func(tx_pkt_burst);
+ int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO);
txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
0 : config->txq_inline;
@@ -603,8 +675,6 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
txq_ctrl->txq.max_inline =
((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
- /* TSO and MPS can't be enabled concurrently. */
- assert(!config->tso || !config->mps);
if (is_empw_func) {
/* To minimize the size of data set, avoid requesting
* too large WQ.
@@ -614,7 +684,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (config->tso) {
+ } else if (tso) {
int inline_diff = txq_ctrl->txq.max_inline -
max_tso_inline;
@@ -652,7 +722,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
RTE_CACHE_LINE_SIZE;
}
}
- if (config->tso) {
+ if (tso) {
txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
max_tso_inline);
@@ -692,7 +762,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
if (!tmpl)
return NULL;
assert(desc > MLX5_TX_COMP_THRESH);
- tmpl->txq.flags = conf->txq_flags;
+ tmpl->txq.offloads = conf->offloads;
tmpl->priv = priv;
tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc);
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v2 5/7] net/mlx5: convert to new Rx offloads API
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
` (3 preceding siblings ...)
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 4/7] net/mlx5: convert to new Tx offloads API Shahaf Shuler
@ 2018-01-03 7:16 ` Shahaf Shuler
2018-01-04 10:12 ` Nelio Laranjeiro
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 6/7] net/mlx4: convert to new Tx " Shahaf Shuler
` (2 subsequent siblings)
7 siblings, 1 reply; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-03 7:16 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
drivers/net/mlx5/mlx5_ethdev.c | 23 +++++---
drivers/net/mlx5/mlx5_rxq.c | 106 +++++++++++++++++++++++++++++++-----
drivers/net/mlx5/mlx5_rxtx.h | 3 +
drivers/net/mlx5/mlx5_vlan.c | 3 +-
4 files changed, 111 insertions(+), 24 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 8be4f43f7..adaa34fff 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -553,6 +553,10 @@ dev_configure(struct rte_eth_dev *dev)
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t supp_rx_offloads =
+ (mlx5_priv_get_rx_port_offloads(priv) |
+ mlx5_priv_get_rx_queue_offloads(priv));
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
ERROR("Some Tx offloads are not supported "
@@ -560,6 +564,12 @@ dev_configure(struct rte_eth_dev *dev)
tx_offloads, supp_tx_offloads);
return ENOTSUP;
}
+ if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
+ ERROR("Some Rx offloads are not supported "
+ "requested 0x%lx supported 0x%lx\n",
+ rx_offloads, supp_rx_offloads);
+ return ENOTSUP;
+ }
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
rss_hash_default_key_len)) {
@@ -671,15 +681,10 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_offload_capa =
- (config->hw_csum ?
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM) :
- 0) |
- (priv->config.hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
- DEV_RX_OFFLOAD_TIMESTAMP;
-
+ info->rx_queue_offload_capa =
+ mlx5_priv_get_rx_queue_offloads(priv);
+ info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) |
+ info->rx_queue_offload_capa);
info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 81363ecd7..232e660ce 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -213,6 +213,78 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
}
/**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx5_priv_get_rx_queue_offloads(struct priv *priv)
+{
+ struct mlx5_dev_config *config = &priv->config;
+ uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_TIMESTAMP |
+ DEV_RX_OFFLOAD_JUMBO_FRAME);
+
+ if (config->hw_fcs_strip)
+ offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ if (config->hw_csum)
+ offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM);
+ if (config->hw_vlan_strip)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ return offloads;
+}
+
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
+ uint64_t queue_supp_offloads =
+ mlx5_priv_get_rx_queue_offloads(priv);
+ uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv);
+
+ if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
+ offloads)
+ return 0;
+ if (((port_offloads ^ offloads) & port_supp_offloads))
+ return 0;
+ return 1;
+}
+
+/**
*
* @param dev
* Pointer to Ethernet device structure.
@@ -241,7 +313,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
int ret = 0;
- (void)conf;
priv_lock(priv);
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
@@ -257,6 +328,16 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
priv_unlock(priv);
return -EOVERFLOW;
}
+ if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
+ ret = ENOTSUP;
+ ERROR("%p: Rx queue offloads 0x%lx don't match port "
+ "offloads 0x%lx or supported offloads 0x%lx",
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ (mlx5_priv_get_rx_port_offloads(priv) |
+ mlx5_priv_get_rx_queue_offloads(priv)));
+ goto out;
+ }
if (!mlx5_priv_rxq_releasable(priv, idx)) {
ret = EBUSY;
ERROR("%p: unable to release queue index %u",
@@ -264,7 +345,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
goto out;
}
mlx5_priv_rxq_release(priv, idx);
- rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, mp);
+ rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp);
if (!rxq_ctrl) {
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
@@ -875,7 +956,8 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
*/
struct mlx5_rxq_ctrl*
mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket, struct rte_mempool *mp)
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
{
struct rte_eth_dev *dev = priv->dev;
struct mlx5_rxq_ctrl *tmpl;
@@ -899,7 +981,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
tmpl->rxq.sges_n = 0;
- } else if (dev->data->dev_conf.rxmode.enable_scatter) {
+ } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -941,18 +1023,14 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
goto error;
}
/* Toggle RX checksum offload if hardware supports it. */
- if (config->hw_csum)
- tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- if (config->hw_csum_l2tun)
- tmpl->rxq.csum_l2tun =
- !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- tmpl->rxq.hw_timestamp =
- !!dev->data->dev_conf.rxmode.hw_timestamp;
+ tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
+ priv->config.hw_csum_l2tun);
+ tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = (config->hw_vlan_strip &&
- !!dev->data->dev_conf.rxmode.hw_vlan_strip);
+ tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
- if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
tmpl->rxq.crc_present = 0;
} else if (config->hw_fcs_strip) {
tmpl->rxq.crc_present = 1;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 2728e8d5e..4ade8bee1 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -253,6 +253,7 @@ int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *);
int mlx5_priv_rxq_ibv_verify(struct priv *);
struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t,
uint16_t, unsigned int,
+ const struct rte_eth_rxconf *,
struct rte_mempool *);
struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t);
int mlx5_priv_rxq_release(struct priv *, uint16_t);
@@ -273,6 +274,8 @@ struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t,
uint64_t, uint16_t [], uint16_t);
int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *);
int mlx5_priv_hrxq_ibv_verify(struct priv *);
+uint64_t mlx5_priv_get_rx_port_offloads(struct priv *);
+uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *);
/* mlx5_txq.c */
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 2ab865264..9443e4f03 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -196,7 +196,8 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
unsigned int i;
if (mask & ETH_VLAN_STRIP_MASK) {
- int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip;
+ int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP);
if (!priv->config.hw_vlan_strip) {
ERROR("VLAN stripping is not supported");
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v2 6/7] net/mlx4: convert to new Tx offloads API
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
` (4 preceding siblings ...)
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 5/7] net/mlx5: convert to new Rx " Shahaf Shuler
@ 2018-01-03 7:16 ` Shahaf Shuler
2018-01-03 17:29 ` Adrien Mazarguil
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 7/7] net/mlx4: convert to new Rx " Shahaf Shuler
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
7 siblings, 1 reply; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-03 7:16 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
drivers/net/mlx4/mlx4_ethdev.c | 7 +---
drivers/net/mlx4/mlx4_rxtx.h | 1 +
drivers/net/mlx4/mlx4_txq.c | 71 +++++++++++++++++++++++++++++++++++--
3 files changed, 70 insertions(+), 9 deletions(-)
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 2f69e7d4f..63e00b1da 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -767,17 +767,12 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
info->rx_offload_capa = 0;
- info->tx_offload_capa = 0;
+ info->tx_offload_capa = mlx4_priv_get_tx_port_offloads(priv);
if (priv->hw_csum) {
- info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM);
}
- if (priv->hw_csum_l2tun)
- info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (mlx4_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index b93e2bcda..91971c4fb 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -184,6 +184,7 @@ int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_txconf *conf);
void mlx4_tx_queue_release(void *dpdk_txq);
+uint64_t mlx4_priv_get_tx_port_offloads(struct priv *priv);
/**
* Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[].
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index d651e4980..f74e4a735 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -182,6 +182,53 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
}
/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_priv_get_tx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (priv->hw_csum) {
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ }
+ if (priv->hw_csum_l2tun)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
+ uint64_t port_supp_offloads = mlx4_priv_get_tx_port_offloads(priv);
+
+ if ((port_offloads ^ offloads) & port_supp_offloads)
+ return 0;
+ return 1;
+}
+
+/**
* DPDK callback to configure a Tx queue.
*
* @param dev
@@ -229,9 +276,22 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
};
int ret;
- (void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
+ /*
+ * Don't verify port offloads for application which
+ * use the old API.
+ */
+ if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+ !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
+ rte_errno = ENOTSUP;
+ ERROR("%p: Tx queue offloads 0x%lx don't match port "
+ "offloads 0x%lx or supported offloads 0x%lx",
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ mlx4_priv_get_tx_port_offloads(priv));
+ return -rte_errno;
+ }
if (idx >= dev->data->nb_tx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -281,8 +341,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
.elts_comp_cd_init =
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
- .csum = priv->hw_csum,
- .csum_l2tun = priv->hw_csum_l2tun,
+ .csum = priv->hw_csum &&
+ (conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM)),
+ .csum_l2tun = priv->hw_csum_l2tun &&
+ (conf->offloads &
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
/* Enable Tx loopback for VF devices. */
.lb = !!priv->vf,
.bounce_buf = bounce_buf,
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v2 7/7] net/mlx4: convert to new Rx offloads API
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
` (5 preceding siblings ...)
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 6/7] net/mlx4: convert to new Tx " Shahaf Shuler
@ 2018-01-03 7:16 ` Shahaf Shuler
2018-01-03 17:29 ` Adrien Mazarguil
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
7 siblings, 1 reply; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-03 7:16 UTC (permalink / raw)
To: nelio.laranjeiro, yskoh, adrien.mazarguil; +Cc: dev
Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
drivers/net/mlx4/mlx4_ethdev.c | 10 ++---
drivers/net/mlx4/mlx4_flow.c | 5 ++-
drivers/net/mlx4/mlx4_rxq.c | 78 ++++++++++++++++++++++++++++++++++---
drivers/net/mlx4/mlx4_rxtx.h | 2 +
4 files changed, 82 insertions(+), 13 deletions(-)
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 63e00b1da..39a23ee7b 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -766,13 +766,11 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_offload_capa = 0;
info->tx_offload_capa = mlx4_priv_get_tx_port_offloads(priv);
- if (priv->hw_csum) {
- info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
- }
+ info->rx_queue_offload_capa =
+ mlx4_priv_get_rx_queue_offloads(priv);
+ info->rx_offload_capa = (mlx4_priv_get_rx_port_offloads(priv) |
+ info->rx_queue_offload_capa);
if (mlx4_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 69025da42..96a6a6fa7 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -1232,7 +1232,7 @@ mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
* - MAC flow rules are generated from @p dev->data->mac_addrs
* (@p priv->mac array).
* - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
+ * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
* is enabled and VLAN filters are configured.
*
* @param priv
@@ -1300,7 +1300,8 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
};
struct ether_addr *rule_mac = ð_spec.dst;
rte_be16_t *rule_vlan =
- priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
+ (priv->dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER) &&
!priv->dev->data->promiscuous ?
&vlan_spec.tci :
NULL;
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 53313c56f..0cad28269 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -663,6 +663,64 @@ mlx4_rxq_detach(struct rxq *rxq)
}
/**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_priv_get_rx_queue_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_SCATTER;
+
+ if (priv->hw_csum)
+ offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ return offloads;
+}
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private strucute.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx4_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
+ uint64_t port_supp_offloads = mlx4_priv_get_rx_port_offloads(priv);
+
+ if (((port_offloads ^ offloads) & port_supp_offloads))
+ return 0;
+ return 1;
+}
+
+/**
* DPDK callback to configure a Rx queue.
*
* @param dev
@@ -707,6 +765,16 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
+ if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
+ rte_errno = ENOTSUP;
+ ERROR("%p: Rx queue offloads 0x%lx don't match port "
+ "offloads 0x%lx or supported offloads 0x%lx",
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ (mlx4_priv_get_rx_port_offloads(priv) |
+ mlx4_priv_get_rx_queue_offloads(priv)));
+ return -rte_errno;
+ }
if (idx >= dev->data->nb_rx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -746,10 +814,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.elts_n = rte_log2_u32(desc),
.elts = elts,
/* Toggle Rx checksum offload if hardware supports it. */
- .csum = (priv->hw_csum &&
- dev->data->dev_conf.rxmode.hw_ip_checksum),
- .csum_l2tun = (priv->hw_csum_l2tun &&
- dev->data->dev_conf.rxmode.hw_ip_checksum),
+ .csum = priv->hw_csum &&
+ (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ .csum_l2tun = priv->hw_csum_l2tun &&
+ (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
.l2tun_offload = priv->hw_csum_l2tun,
.stats = {
.idx = idx,
@@ -761,7 +829,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
;
- } else if (dev->data->dev_conf.rxmode.enable_scatter) {
+ } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index 91971c4fb..bcb76ee27 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -166,6 +166,8 @@ int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
const struct rte_eth_rxconf *conf,
struct rte_mempool *mp);
void mlx4_rx_queue_release(void *dpdk_rxq);
+uint64_t mlx4_priv_get_rx_port_offloads(struct priv *priv);
+uint64_t mlx4_priv_get_rx_queue_offloads(struct priv *priv);
/* mlx4_rxtx.c */
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [dpdk-dev] [PATCH v2 6/7] net/mlx4: convert to new Tx offloads API
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 6/7] net/mlx4: convert to new Tx " Shahaf Shuler
@ 2018-01-03 17:29 ` Adrien Mazarguil
2018-01-04 11:55 ` Shahaf Shuler
0 siblings, 1 reply; 29+ messages in thread
From: Adrien Mazarguil @ 2018-01-03 17:29 UTC (permalink / raw)
To: Shahaf Shuler; +Cc: nelio.laranjeiro, yskoh, dev
Hi Shahaf,
Some relatively minor nits mostly unrelated to functionality, please see
below.
On Wed, Jan 03, 2018 at 09:16:16AM +0200, Shahaf Shuler wrote:
> Ethdev Tx offloads API has changed since:
>
> commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
>
> This commit support the new Tx offloads API.
>
> Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
> ---
> drivers/net/mlx4/mlx4_ethdev.c | 7 +---
> drivers/net/mlx4/mlx4_rxtx.h | 1 +
> drivers/net/mlx4/mlx4_txq.c | 71 +++++++++++++++++++++++++++++++++++--
> 3 files changed, 70 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
> index 2f69e7d4f..63e00b1da 100644
> --- a/drivers/net/mlx4/mlx4_ethdev.c
> +++ b/drivers/net/mlx4/mlx4_ethdev.c
> @@ -767,17 +767,12 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
> info->max_tx_queues = max;
> info->max_mac_addrs = RTE_DIM(priv->mac);
> info->rx_offload_capa = 0;
> - info->tx_offload_capa = 0;
> + info->tx_offload_capa = mlx4_priv_get_tx_port_offloads(priv);
> if (priv->hw_csum) {
> - info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
> - DEV_TX_OFFLOAD_UDP_CKSUM |
> - DEV_TX_OFFLOAD_TCP_CKSUM);
> info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM);
> }
> - if (priv->hw_csum_l2tun)
> - info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
> if (mlx4_get_ifname(priv, &ifname) == 0)
> info->if_index = if_nametoindex(ifname);
> info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
> diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
> index b93e2bcda..91971c4fb 100644
> --- a/drivers/net/mlx4/mlx4_rxtx.h
> +++ b/drivers/net/mlx4/mlx4_rxtx.h
> @@ -184,6 +184,7 @@ int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
> uint16_t desc, unsigned int socket,
> const struct rte_eth_txconf *conf);
> void mlx4_tx_queue_release(void *dpdk_txq);
> +uint64_t mlx4_priv_get_tx_port_offloads(struct priv *priv);
No need for "priv_" prefixes (or anywhere in function names) in the mlx4 PMD
anymore since DPDK 17.11. Visible symbols only need to be prefixed with
"mlx4_" to tell them apart from mlx5's.
Also the declaration in mlx4_rxtx.h should appear in the same order as in
mlx4_txq.c, that is, before mlx4_tx_queue_setup().
> /**
> * Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[].
> diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
> index d651e4980..f74e4a735 100644
> --- a/drivers/net/mlx4/mlx4_txq.c
> +++ b/drivers/net/mlx4/mlx4_txq.c
> @@ -182,6 +182,53 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
> }
>
> /**
> + * Returns the per-port supported offloads.
> + *
> + * @param priv
> + * Pointer to private structure.
> + *
> + * @return
> + * Supported Tx offloads.
> + */
> +uint64_t
> +mlx4_priv_get_tx_port_offloads(struct priv *priv)
> +{
Please remove "priv_" as previously described.
> + uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
> +
> + if (priv->hw_csum) {
> + offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
> + DEV_TX_OFFLOAD_UDP_CKSUM |
> + DEV_TX_OFFLOAD_TCP_CKSUM);
> + }
> + if (priv->hw_csum_l2tun)
> + offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
> +
Unnecessary empty line.
> + return offloads;
> +}
> +
> +/**
> + * Checks if the per-queue offload configuration is valid.
> + *
> + * @param priv
> + * Pointer to private structure.
> + * @param offloads
> + * Per-queue offloads configuration.
> + *
> + * @return
> + * 1 if the configuration is valid, 0 otherwise.
Better described as "Nonzero when configuration is valid."
> + */
> +static int
> +priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
s/priv_/mlx4_/ (no prefix also allowed since it's static)
Not be super picky, "is" followed by "offloads allowed" sounds odd, how
about:
[mlx4_]check_tx_queue_offloads()
> +{
> + uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
> + uint64_t port_supp_offloads = mlx4_priv_get_tx_port_offloads(priv);
Instead of a redundant "port_", how about clarifying it all as follows:
offloads -> requested
port_offloads -> mandatory
port_supp_offloads -> supported
> +
> + if ((port_offloads ^ offloads) & port_supp_offloads)
> + return 0;
> + return 1;
And simplify this as:
return !((mandatory ^ requested) & supported);
Maybe I missed something, but there seems to be an inconsistency,
e.g. requesting an unsupported offload does not necessarily fail:
mandatory = 0x00
requested = 0x40
supported = 0x10
=> valid but shouldn't be
And requesting a supported offload when there are no mandatory ones should
not be a problem:
mandatory = 0x00
requested = 0x10
supported = 0x10
=> invalid but it should be
A naive translation of the above requirements results in the following
expression:
return (requested | supported) == supported &&
(requested & mandatory) == mandatory;
What's your opinion?
> +}
> +
> +/**
> * DPDK callback to configure a Tx queue.
> *
> * @param dev
> @@ -229,9 +276,22 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
> };
> int ret;
>
> - (void)conf; /* Thresholds configuration (ignored). */
> DEBUG("%p: configuring queue %u for %u descriptors",
> (void *)dev, idx, desc);
> + /*
> + * Don't verify port offloads for application which
> + * use the old API.
> + */
> + if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
Enclosing "!!(...)" seems unnecessary, only the fact the result is nonzero
matters.
> + !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
> + rte_errno = ENOTSUP;
> + ERROR("%p: Tx queue offloads 0x%lx don't match port "
> + "offloads 0x%lx or supported offloads 0x%lx",
"%lx" may cause a compilation error depending on the platform, you need to
use "%" PRIx64 after including inttypes.h.
> + (void *)dev, conf->offloads,
> + dev->data->dev_conf.txmode.offloads,
> + mlx4_priv_get_tx_port_offloads(priv));
> + return -rte_errno;
> + }
> if (idx >= dev->data->nb_tx_queues) {
> rte_errno = EOVERFLOW;
> ERROR("%p: queue index out of range (%u >= %u)",
> @@ -281,8 +341,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
> RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
> .elts_comp_cd_init =
> RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
> - .csum = priv->hw_csum,
> - .csum_l2tun = priv->hw_csum_l2tun,
> + .csum = priv->hw_csum &&
> + (conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
> + DEV_TX_OFFLOAD_UDP_CKSUM |
> + DEV_TX_OFFLOAD_TCP_CKSUM)),
> + .csum_l2tun = priv->hw_csum_l2tun &&
> + (conf->offloads &
> + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
> /* Enable Tx loopback for VF devices. */
> .lb = !!priv->vf,
> .bounce_buf = bounce_buf,
> --
> 2.12.0
>
--
Adrien Mazarguil
6WIND
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [dpdk-dev] [PATCH v2 7/7] net/mlx4: convert to new Rx offloads API
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 7/7] net/mlx4: convert to new Rx " Shahaf Shuler
@ 2018-01-03 17:29 ` Adrien Mazarguil
0 siblings, 0 replies; 29+ messages in thread
From: Adrien Mazarguil @ 2018-01-03 17:29 UTC (permalink / raw)
To: Shahaf Shuler; +Cc: nelio.laranjeiro, yskoh, dev
In short, same comments as the TX patch, more below.
On Wed, Jan 03, 2018 at 09:16:17AM +0200, Shahaf Shuler wrote:
> Ethdev Rx offloads API has changed since:
>
> commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
>
> This commit support the new Rx offloads API.
>
> Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
> ---
> drivers/net/mlx4/mlx4_ethdev.c | 10 ++---
> drivers/net/mlx4/mlx4_flow.c | 5 ++-
> drivers/net/mlx4/mlx4_rxq.c | 78 ++++++++++++++++++++++++++++++++++---
> drivers/net/mlx4/mlx4_rxtx.h | 2 +
> 4 files changed, 82 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
> index 63e00b1da..39a23ee7b 100644
> --- a/drivers/net/mlx4/mlx4_ethdev.c
> +++ b/drivers/net/mlx4/mlx4_ethdev.c
> @@ -766,13 +766,11 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
> info->max_rx_queues = max;
> info->max_tx_queues = max;
> info->max_mac_addrs = RTE_DIM(priv->mac);
> - info->rx_offload_capa = 0;
> info->tx_offload_capa = mlx4_priv_get_tx_port_offloads(priv);
> - if (priv->hw_csum) {
> - info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
> - DEV_RX_OFFLOAD_UDP_CKSUM |
> - DEV_RX_OFFLOAD_TCP_CKSUM);
> - }
> + info->rx_queue_offload_capa =
> + mlx4_priv_get_rx_queue_offloads(priv);
> + info->rx_offload_capa = (mlx4_priv_get_rx_port_offloads(priv) |
> + info->rx_queue_offload_capa);
> if (mlx4_get_ifname(priv, &ifname) == 0)
> info->if_index = if_nametoindex(ifname);
> info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
> diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
> index 69025da42..96a6a6fa7 100644
> --- a/drivers/net/mlx4/mlx4_flow.c
> +++ b/drivers/net/mlx4/mlx4_flow.c
> @@ -1232,7 +1232,7 @@ mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
> * - MAC flow rules are generated from @p dev->data->mac_addrs
> * (@p priv->mac array).
> * - An additional flow rule for Ethernet broadcasts is also generated.
> - * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
> + * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
> * is enabled and VLAN filters are configured.
> *
> * @param priv
> @@ -1300,7 +1300,8 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
> };
> struct ether_addr *rule_mac = ð_spec.dst;
> rte_be16_t *rule_vlan =
> - priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
> + (priv->dev->data->dev_conf.rxmode.offloads &
> + DEV_RX_OFFLOAD_VLAN_FILTER) &&
> !priv->dev->data->promiscuous ?
> &vlan_spec.tci :
> NULL;
> diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
> index 53313c56f..0cad28269 100644
> --- a/drivers/net/mlx4/mlx4_rxq.c
> +++ b/drivers/net/mlx4/mlx4_rxq.c
> @@ -663,6 +663,64 @@ mlx4_rxq_detach(struct rxq *rxq)
> }
>
> /**
> + * Returns the per-queue supported offloads.
> + *
> + * @param priv
> + * Pointer to private structure.
> + *
> + * @return
> + * Supported Tx offloads.
> + */
> +uint64_t
> +mlx4_priv_get_rx_queue_offloads(struct priv *priv)
You should drop "priv", e.g.:
mlx4_get_rx_queue_offloads()
> +{
> + uint64_t offloads = DEV_RX_OFFLOAD_SCATTER;
> +
> + if (priv->hw_csum)
> + offloads |= DEV_RX_OFFLOAD_CHECKSUM;
> + return offloads;
> +}
> +
> +/**
> + * Returns the per-port supported offloads.
> + *
> + * @param priv
> + * Pointer to private strucute.
strucute -> structure
> + *
> + * @return
> + * Supported Rx offloads.
> + */
> +uint64_t
> +mlx4_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
Same comment about "priv".
Using (void) instead of __rte_unused for consistency with the rest of the
PMD code is fine by the way. A subsequent patch can convert them all at once
to __rte_unused if needed.
> +{
> + uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
> +
> + return offloads;
> +}
> +
> +/**
> + * Checks if the per-queue offload configuration is valid.
> + *
> + * @param priv
> + * Pointer to private structure.
> + * @param offloads
> + * Per-queue offloads configuration.
> + *
> + * @return
> + * 1 if the configuration is valid, 0 otherwise.
Better described as "Nonzero when configuration is valid."
> + */
> +static int
> +priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
Ditto, with the same suggestion as for TX:
[mlx4_]check_rx_queue_offloads()
> +{
> + uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
> + uint64_t port_supp_offloads = mlx4_priv_get_rx_port_offloads(priv);
> +
> + if (((port_offloads ^ offloads) & port_supp_offloads))
> + return 0;
> + return 1;
> +}
Same comment and questions regarding this condition as for the TX patch
(mandatory/requested/supported).
> +
> +/**
> * DPDK callback to configure a Rx queue.
> *
> * @param dev
> @@ -707,6 +765,16 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
> (void)conf; /* Thresholds configuration (ignored). */
> DEBUG("%p: configuring queue %u for %u descriptors",
> (void *)dev, idx, desc);
> + if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
> + rte_errno = ENOTSUP;
> + ERROR("%p: Rx queue offloads 0x%lx don't match port "
> + "offloads 0x%lx or supported offloads 0x%lx",
> + (void *)dev, conf->offloads,
> + dev->data->dev_conf.rxmode.offloads,
> + (mlx4_priv_get_rx_port_offloads(priv) |
> + mlx4_priv_get_rx_queue_offloads(priv)));
Should use "%" PRIx64 instead of "%lx".
> + return -rte_errno;
> + }
> if (idx >= dev->data->nb_rx_queues) {
> rte_errno = EOVERFLOW;
> ERROR("%p: queue index out of range (%u >= %u)",
> @@ -746,10 +814,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
> .elts_n = rte_log2_u32(desc),
> .elts = elts,
> /* Toggle Rx checksum offload if hardware supports it. */
> - .csum = (priv->hw_csum &&
> - dev->data->dev_conf.rxmode.hw_ip_checksum),
> - .csum_l2tun = (priv->hw_csum_l2tun &&
> - dev->data->dev_conf.rxmode.hw_ip_checksum),
> + .csum = priv->hw_csum &&
> + (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
> + .csum_l2tun = priv->hw_csum_l2tun &&
> + (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
> .l2tun_offload = priv->hw_csum_l2tun,
> .stats = {
> .idx = idx,
> @@ -761,7 +829,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
> if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
> (mb_len - RTE_PKTMBUF_HEADROOM)) {
> ;
> - } else if (dev->data->dev_conf.rxmode.enable_scatter) {
> + } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
> uint32_t size =
> RTE_PKTMBUF_HEADROOM +
> dev->data->dev_conf.rxmode.max_rx_pkt_len;
> diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
> index 91971c4fb..bcb76ee27 100644
> --- a/drivers/net/mlx4/mlx4_rxtx.h
> +++ b/drivers/net/mlx4/mlx4_rxtx.h
> @@ -166,6 +166,8 @@ int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
> const struct rte_eth_rxconf *conf,
> struct rte_mempool *mp);
> void mlx4_rx_queue_release(void *dpdk_rxq);
> +uint64_t mlx4_priv_get_rx_port_offloads(struct priv *priv);
> +uint64_t mlx4_priv_get_rx_queue_offloads(struct priv *priv);
Declarations should come in the same order as in mlx4_rxq.c.
>
> /* mlx4_rxtx.c */
>
> --
> 2.12.0
>
--
Adrien Mazarguil
6WIND
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [dpdk-dev] [PATCH v2 5/7] net/mlx5: convert to new Rx offloads API
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 5/7] net/mlx5: convert to new Rx " Shahaf Shuler
@ 2018-01-04 10:12 ` Nelio Laranjeiro
0 siblings, 0 replies; 29+ messages in thread
From: Nelio Laranjeiro @ 2018-01-04 10:12 UTC (permalink / raw)
To: Shahaf Shuler; +Cc: yskoh, adrien.mazarguil, dev
Hi Shahaf,
Please see bellow,
On Wed, Jan 03, 2018 at 09:16:15AM +0200, Shahaf Shuler wrote:
> Ethdev Rx offloads API has changed since:
>
> commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
>
> This commit support the new Rx offloads API.
>
> Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
> Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> ---
I have to remove my acked-by on this patch, there is an issue.
<snip/>
> +/**
> + * Checks if the per-queue offload configuration is valid.
> + *
> + * @param priv
> + * Pointer to private structure.
> + * @param offloads
> + * Per-queue offloads configuration.
> + *
> + * @return
> + * 1 if the configuration is valid, 0 otherwise.
> + */
> +static int
> +priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
> +{
> + uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
> + uint64_t queue_supp_offloads =
> + mlx5_priv_get_rx_queue_offloads(priv);
> + uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv);
> +
> + if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
> + offloads)
> + return 0;
> + if (((port_offloads ^ offloads) & port_supp_offloads))
port_offloads is wrongly named, it should be device_offloads and
this make things clearer.
Seems the mask is wrong as port_supp_offloads is a subset of
device_offloads.
For my understanding you want this statement to be true when the request
offloads is not supported, in such situation it should be a negative
mask i.e. ~(port_supp_offloads | queue_supp_offloads).
> + return 0;
> + return 1;
> +}
The same issue is present in Tx side.
Regards,
--
Nélio Laranjeiro
6WIND
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [dpdk-dev] [PATCH v2 6/7] net/mlx4: convert to new Tx offloads API
2018-01-03 17:29 ` Adrien Mazarguil
@ 2018-01-04 11:55 ` Shahaf Shuler
2018-01-09 10:35 ` Nelio Laranjeiro
0 siblings, 1 reply; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-04 11:55 UTC (permalink / raw)
To: Adrien Mazarguil; +Cc: Nélio Laranjeiro, Yongseok Koh, dev
Hi Adrien and Nelio,
See below comment regarding your output on the offload check.
Rest of the comments were accepted.
Wednesday, January 3, 2018 7:29 PM, Adrien Mazarguil :
[...]
>
> > +{
> > + uint64_t port_offloads = priv->dev->data-
> >dev_conf.txmode.offloads;
> > + uint64_t port_supp_offloads =
> mlx4_priv_get_tx_port_offloads(priv);
>
> Instead of a redundant "port_", how about clarifying it all as follows:
>
> offloads -> requested
> port_offloads -> mandatory
> port_supp_offloads -> supported
>
> > +
> > + if ((port_offloads ^ offloads) & port_supp_offloads)
> > + return 0;
> > + return 1;
>
> And simplify this as:
>
> return !((mandatory ^ requested) & supported);
>
> Maybe I missed something, but there seems to be an inconsistency, e.g.
You are correct that the purpose of this function is to check if the offload configuration is correct.
However the current code being done on mlx4 does not validate if the queue offloads configured are supported.
It only validates if the port offloads configuration matches the queue offload configuration.
The reason it lack the supported offloads check was discussed in internal mail (you both CC I believe). Generally it was due to the fact that CRC and VLAN strip offloads are not supported by the PMD, however set for almost every example/application in DPDK.
For the complete check look on mlx5 patches on this series.
> requesting an unsupported offload does not necessarily fail:
>
> mandatory = 0x00
> requested = 0x40
> supported = 0x10
>
> => valid but shouldn't be
It should if the offload is per-queue offload.
>
> And requesting a supported offload when there are no mandatory ones
> should not be a problem:
>
> mandatory = 0x00
> requested = 0x10
> supported = 0x10
>
> => invalid but it should be
It is invalid indeed. If the application declare some port offload not to be set on dev_configure, it cannot enable it from the queue setup.
Port offloads can be set only on device configuration, and when set every queue should have them set as well.
>
> A naive translation of the above requirements results in the following
> expression:
>
> return (requested | supported) == supported &&
> (requested & mandatory) == mandatory;
>
> What's your opinion?
>
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [dpdk-dev] [PATCH v2 6/7] net/mlx4: convert to new Tx offloads API
2018-01-04 11:55 ` Shahaf Shuler
@ 2018-01-09 10:35 ` Nelio Laranjeiro
0 siblings, 0 replies; 29+ messages in thread
From: Nelio Laranjeiro @ 2018-01-09 10:35 UTC (permalink / raw)
To: Shahaf Shuler; +Cc: Adrien Mazarguil, Yongseok Koh, dev
Hi Shahaf,
On Thu, Jan 04, 2018 at 11:55:17AM +0000, Shahaf Shuler wrote:
> Hi Adrien and Nelio,
>
> See below comment regarding your output on the offload check.
> Rest of the comments were accepted.
>
> Wednesday, January 3, 2018 7:29 PM, Adrien Mazarguil :
>
> [...]
>
> >
> > > +{
> > > + uint64_t port_offloads = priv->dev->data-
> > >dev_conf.txmode.offloads;
> > > + uint64_t port_supp_offloads =
> > mlx4_priv_get_tx_port_offloads(priv);
> >
> > Instead of a redundant "port_", how about clarifying it all as follows:
> >
> > offloads -> requested
> > port_offloads -> mandatory
> > port_supp_offloads -> supported
> >
> > > +
> > > + if ((port_offloads ^ offloads) & port_supp_offloads)
> > > + return 0;
> > > + return 1;
> >
> > And simplify this as:
> >
> > return !((mandatory ^ requested) & supported);
> >
> > Maybe I missed something, but there seems to be an inconsistency, e.g.
>
> You are correct that the purpose of this function is to check if the offload configuration is correct.
> However the current code being done on mlx4 does not validate if the queue offloads configured are supported.
> It only validates if the port offloads configuration matches the queue offload configuration.
>
> The reason it lack the supported offloads check was discussed in internal mail (you both CC I believe). Generally it was due to the fact that CRC and VLAN strip offloads are not supported by the PMD, however set for almost every example/application in DPDK.
> For the complete check look on mlx5 patches on this series.
>
> > requesting an unsupported offload does not necessarily fail:
> >
> > mandatory = 0x00
> > requested = 0x40
> > supported = 0x10
> >
> > => valid but shouldn't be
>
> It should if the offload is per-queue offload.
>
>
> >
> > And requesting a supported offload when there are no mandatory ones
> > should not be a problem:
> >
> > mandatory = 0x00
> > requested = 0x10
> > supported = 0x10
> >
> > => invalid but it should be
>
> It is invalid indeed. If the application declare some port offload not to be set on dev_configure, it cannot enable it from the queue setup.
> Port offloads can be set only on device configuration, and when set every queue should have them set as well.
>
> >
> > A naive translation of the above requirements results in the following
> > expression:
> >
> > return (requested | supported) == supported &&
> > (requested & mandatory) == mandatory;
> >
> > What's your opinion?
> >
>From an application point of view, it seems strange to provide an
already configured offload when configuring the queues, i.e.
rte_eth_dev_configure() is called before rte_eth_{tx,rx}_queue_setup().
I think this "mandatory" information should be removed from the API
documentation letting the application the capability to request a null
offload when configuring the queue.
As this modification does not break the API/ABI it only needs eventually
a modification in the driver, it can be done in the future.
For mlx5 part:
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
--
Nélio Laranjeiro
6WIND
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev offloads API
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
` (6 preceding siblings ...)
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 7/7] net/mlx4: convert to new Rx " Shahaf Shuler
@ 2018-01-10 9:16 ` Shahaf Shuler
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 1/7] net/mlx5: change pkt burst select function prototype Shahaf Shuler
` (7 more replies)
7 siblings, 8 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-10 9:16 UTC (permalink / raw)
To: nelio.laranjeiro, adrien.mazarguil, yskoh; +Cc: dev
This series is to convert mlx4 and mlx5 PMDs to the new offloads API [1].
On v3:
- address almost all of Adrien's comments on mlx4.
On v2:
- New design to hold PMD specific args and combine
them with offloads requested.
- Fix missing IPV4 checksum flag on vector function selection.
- Verify Txq flags ignore bit before checking for valid offloads
configuration.
- Removed strict offloads check from mlx4.
[1] http://dpdk.org/ml/archives/dev/2017-October/077329.html
Nelio Laranjeiro (1):
net/mlx5: rename counter set in configuration
Shahaf Shuler (6):
net/mlx5: change pkt burst select function prototype
net/mlx5: add device configuration structure
net/mlx5: convert to new Tx offloads API
net/mlx5: convert to new Rx offloads API
net/mlx4: convert to new Tx offloads API
net/mlx4: convert to new Rx offloads API
doc/guides/nics/mlx5.rst | 15 +-
drivers/net/mlx4/mlx4_ethdev.c | 16 +--
drivers/net/mlx4/mlx4_flow.c | 5 +-
drivers/net/mlx4/mlx4_rxq.c | 77 ++++++++++-
drivers/net/mlx4/mlx4_rxtx.h | 3 +
drivers/net/mlx4/mlx4_txq.c | 69 ++++++++-
drivers/net/mlx5/mlx5.c | 190 +++++++++----------------
drivers/net/mlx5/mlx5.h | 57 +++++---
drivers/net/mlx5/mlx5_ethdev.c | 113 ++++++++-------
drivers/net/mlx5/mlx5_flow.c | 2 +-
drivers/net/mlx5/mlx5_rxq.c | 124 ++++++++++++++---
drivers/net/mlx5/mlx5_rxtx.c | 6 +-
drivers/net/mlx5/mlx5_rxtx.h | 10 +-
drivers/net/mlx5/mlx5_rxtx_vec.c | 40 +++---
drivers/net/mlx5/mlx5_rxtx_vec.h | 12 ++
drivers/net/mlx5/mlx5_trigger.c | 4 +-
drivers/net/mlx5/mlx5_txq.c | 254 +++++++++++++++++++++++++---------
drivers/net/mlx5/mlx5_vlan.c | 7 +-
18 files changed, 661 insertions(+), 343 deletions(-)
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v3 1/7] net/mlx5: change pkt burst select function prototype
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
@ 2018-01-10 9:16 ` Shahaf Shuler
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 2/7] net/mlx5: add device configuration structure Shahaf Shuler
` (6 subsequent siblings)
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-10 9:16 UTC (permalink / raw)
To: nelio.laranjeiro, adrien.mazarguil, yskoh; +Cc: dev
Change the function prototype to return the function pointer of the
selected Tx/Rx burst function instead of assigning it directly to the
device context.
Such change will enable to use those select functions to query the burst
function that will be selected according to the device configuration.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
drivers/net/mlx5/mlx5.c | 11 ++++++++--
drivers/net/mlx5/mlx5.h | 4 ++--
drivers/net/mlx5/mlx5_ethdev.c | 41 +++++++++++++++++++++---------------
drivers/net/mlx5/mlx5_trigger.c | 4 ++--
4 files changed, 37 insertions(+), 23 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cd66fe162..0192815f2 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -712,8 +712,15 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
err = -err;
goto error;
}
- priv_dev_select_rx_function(priv, eth_dev);
- priv_dev_select_tx_function(priv, eth_dev);
+ /*
+ * Ethdev pointer is still required as input since
+ * the primary device is not accessible from the
+ * secondary process.
+ */
+ eth_dev->rx_pkt_burst =
+ priv_select_rx_function(priv, eth_dev);
+ eth_dev->tx_pkt_burst =
+ priv_select_tx_function(priv, eth_dev);
continue;
}
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e6a69b823..3e3259b55 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -206,8 +206,8 @@ void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
int mlx5_set_link_down(struct rte_eth_dev *dev);
int mlx5_set_link_up(struct rte_eth_dev *dev);
-void priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev);
-void priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev);
+eth_tx_burst_t priv_select_tx_function(struct priv *, struct rte_eth_dev *);
+eth_rx_burst_t priv_select_rx_function(struct priv *, struct rte_eth_dev *);
/* mlx5_mac.c */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 282ef241e..28183534a 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -1325,8 +1325,8 @@ priv_dev_set_link(struct priv *priv, struct rte_eth_dev *dev, int up)
err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
if (err)
return err;
- priv_dev_select_tx_function(priv, dev);
- priv_dev_select_rx_function(priv, dev);
+ dev->tx_pkt_burst = priv_select_tx_function(priv, dev);
+ dev->rx_pkt_burst = priv_select_rx_function(priv, dev);
} else {
err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
if (err)
@@ -1386,32 +1386,36 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
* Pointer to private data structure.
* @param dev
* Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * Pointer to selected Tx burst function.
*/
-void
-priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
+eth_tx_burst_t
+priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
{
+ eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
+
assert(priv != NULL);
- assert(dev != NULL);
- dev->tx_pkt_burst = mlx5_tx_burst;
/* Select appropriate TX function. */
if (priv->mps == MLX5_MPW_ENHANCED) {
if (priv_check_vec_tx_support(priv) > 0) {
if (priv_check_raw_vec_tx_support(priv) > 0)
- dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
+ tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
- dev->tx_pkt_burst = mlx5_tx_burst_vec;
+ tx_pkt_burst = mlx5_tx_burst_vec;
DEBUG("selected Enhanced MPW TX vectorized function");
} else {
- dev->tx_pkt_burst = mlx5_tx_burst_empw;
+ tx_pkt_burst = mlx5_tx_burst_empw;
DEBUG("selected Enhanced MPW TX function");
}
} else if (priv->mps && priv->txq_inline) {
- dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
+ tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
} else if (priv->mps) {
- dev->tx_pkt_burst = mlx5_tx_burst_mpw;
+ tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
+ return tx_pkt_burst;
}
/**
@@ -1421,16 +1425,19 @@ priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
* Pointer to private data structure.
* @param dev
* Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * Pointer to selected Rx burst function.
*/
-void
-priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev)
+eth_rx_burst_t
+priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
{
+ eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
+
assert(priv != NULL);
- assert(dev != NULL);
if (priv_check_vec_rx_support(priv) > 0) {
- dev->rx_pkt_burst = mlx5_rx_burst_vec;
+ rx_pkt_burst = mlx5_rx_burst_vec;
DEBUG("selected RX vectorized function");
- } else {
- dev->rx_pkt_burst = mlx5_rx_burst;
}
+ return rx_pkt_burst;
}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 88f60a01d..1a20967a2 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -152,7 +152,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
goto error;
}
/* Update send callback. */
- priv_dev_select_tx_function(priv, dev);
+ dev->tx_pkt_burst = priv_select_tx_function(priv, dev);
err = priv_rxq_start(priv);
if (err) {
ERROR("%p: RXQ allocation failed: %s",
@@ -160,7 +160,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
goto error;
}
/* Update receive callback. */
- priv_dev_select_rx_function(priv, dev);
+ dev->rx_pkt_burst = priv_select_rx_function(priv, dev);
err = priv_dev_traffic_enable(priv, dev);
if (err) {
ERROR("%p: an error occurred while configuring control flows:"
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v3 2/7] net/mlx5: add device configuration structure
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 1/7] net/mlx5: change pkt burst select function prototype Shahaf Shuler
@ 2018-01-10 9:16 ` Shahaf Shuler
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 3/7] net/mlx5: rename counter set in configuration Shahaf Shuler
` (5 subsequent siblings)
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-10 9:16 UTC (permalink / raw)
To: nelio.laranjeiro, adrien.mazarguil, yskoh; +Cc: dev
Move device configuration and features capabilities to its own structure.
This structure is filled by mlx5_pci_probe(), outside of this function
it should be treated as *read only*.
This configuration struct will be used for the Tx/Rx queue setup to
select the Tx/Rx queue parameters based on the user configuration and
device capabilities.
In addition it will be used by the burst selection function to decide
on the best pkt burst to be used.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
drivers/net/mlx5/mlx5.c | 178 +++++++++++----------------------
drivers/net/mlx5/mlx5.h | 53 ++++++----
drivers/net/mlx5/mlx5_ethdev.c | 26 ++---
drivers/net/mlx5/mlx5_flow.c | 2 +-
drivers/net/mlx5/mlx5_rxq.c | 24 +++--
drivers/net/mlx5/mlx5_rxtx_vec.c | 10 +-
drivers/net/mlx5/mlx5_txq.c | 182 ++++++++++++++++++++++------------
drivers/net/mlx5/mlx5_vlan.c | 4 +-
8 files changed, 248 insertions(+), 231 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 0192815f2..fdd4710f1 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -94,9 +94,6 @@
/* Device parameter to enable hardware Rx vector. */
#define MLX5_RX_VEC_EN "rx_vec_en"
-/* Default PMD specific parameter value. */
-#define MLX5_ARG_UNSET (-1)
-
#ifndef HAVE_IBV_MLX5_MOD_MPW
#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
@@ -106,17 +103,6 @@
#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
#endif
-struct mlx5_args {
- int cqe_comp;
- int txq_inline;
- int txqs_inline;
- int mps;
- int mpw_hdr_dseg;
- int inline_max_packet_sz;
- int tso;
- int tx_vec_en;
- int rx_vec_en;
-};
/**
* Retrieve integer value from environment variable.
*
@@ -399,7 +385,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
static int
mlx5_args_check(const char *key, const char *val, void *opaque)
{
- struct mlx5_args *args = opaque;
+ struct mlx5_dev_config *config = opaque;
unsigned long tmp;
errno = 0;
@@ -409,23 +395,23 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
return errno;
}
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
- args->cqe_comp = !!tmp;
+ config->cqe_comp = !!tmp;
} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
- args->txq_inline = tmp;
+ config->txq_inline = tmp;
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
- args->txqs_inline = tmp;
+ config->txqs_inline = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
- args->mps = !!tmp;
+ config->mps = !!tmp ? config->mps : 0;
} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
- args->mpw_hdr_dseg = !!tmp;
+ config->mpw_hdr_dseg = !!tmp;
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
- args->inline_max_packet_sz = tmp;
+ config->inline_max_packet_sz = tmp;
} else if (strcmp(MLX5_TSO, key) == 0) {
- args->tso = !!tmp;
+ config->tso = !!tmp;
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
- args->tx_vec_en = !!tmp;
+ config->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
- args->rx_vec_en = !!tmp;
+ config->rx_vec_en = !!tmp;
} else {
WARN("%s: unknown parameter", key);
return -EINVAL;
@@ -436,8 +422,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
/**
* Parse device parameters.
*
- * @param priv
- * Pointer to private structure.
+ * @param config
+ * Pointer to device configuration structure.
* @param devargs
* Device arguments structure.
*
@@ -445,7 +431,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
* 0 on success, errno value on failure.
*/
static int
-mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
+mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
{
const char **params = (const char *[]){
MLX5_RXQ_CQE_COMP_EN,
@@ -473,7 +459,7 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
for (i = 0; (params[i] != NULL); ++i) {
if (rte_kvargs_count(kvlist, params[i])) {
ret = rte_kvargs_process(kvlist, params[i],
- mlx5_args_check, args);
+ mlx5_args_check, config);
if (ret != 0) {
rte_kvargs_free(kvlist);
return ret;
@@ -487,38 +473,6 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
static struct rte_pci_driver mlx5_driver;
/**
- * Assign parameters from args into priv, only non default
- * values are considered.
- *
- * @param[out] priv
- * Pointer to private structure.
- * @param[in] args
- * Pointer to args values.
- */
-static void
-mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
-{
- if (args->cqe_comp != MLX5_ARG_UNSET)
- priv->cqe_comp = args->cqe_comp;
- if (args->txq_inline != MLX5_ARG_UNSET)
- priv->txq_inline = args->txq_inline;
- if (args->txqs_inline != MLX5_ARG_UNSET)
- priv->txqs_inline = args->txqs_inline;
- if (args->mps != MLX5_ARG_UNSET)
- priv->mps = args->mps ? priv->mps : 0;
- if (args->mpw_hdr_dseg != MLX5_ARG_UNSET)
- priv->mpw_hdr_dseg = args->mpw_hdr_dseg;
- if (args->inline_max_packet_sz != MLX5_ARG_UNSET)
- priv->inline_max_packet_sz = args->inline_max_packet_sz;
- if (args->tso != MLX5_ARG_UNSET)
- priv->tso = args->tso;
- if (args->tx_vec_en != MLX5_ARG_UNSET)
- priv->tx_vec_en = args->tx_vec_en;
- if (args->rx_vec_en != MLX5_ARG_UNSET)
- priv->rx_vec_en = args->rx_vec_en;
-}
-
-/**
* DPDK callback to register a PCI device.
*
* This function creates an Ethernet device for each port of a given
@@ -671,16 +625,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ether_addr mac;
uint16_t num_vfs = 0;
struct ibv_device_attr_ex device_attr;
- struct mlx5_args args = {
- .cqe_comp = MLX5_ARG_UNSET,
+ struct mlx5_dev_config config = {
+ .cqe_comp = cqe_comp,
+ .mps = mps,
+ .tunnel_en = tunnel_en,
+ .tso = 0,
+ .tx_vec_en = 1,
+ .rx_vec_en = 1,
+ .mpw_hdr_dseg = 0,
.txq_inline = MLX5_ARG_UNSET,
.txqs_inline = MLX5_ARG_UNSET,
- .mps = MLX5_ARG_UNSET,
- .mpw_hdr_dseg = MLX5_ARG_UNSET,
.inline_max_packet_sz = MLX5_ARG_UNSET,
- .tso = MLX5_ARG_UNSET,
- .tx_vec_en = MLX5_ARG_UNSET,
- .rx_vec_en = MLX5_ARG_UNSET,
};
mlx5_dev[idx].ports |= test;
@@ -779,106 +734,89 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->port = port;
priv->pd = pd;
priv->mtu = ETHER_MTU;
- priv->mps = mps; /* Enable MPW by default if supported. */
- priv->cqe_comp = cqe_comp;
- priv->tunnel_en = tunnel_en;
- /* Enable vector by default if supported. */
- priv->tx_vec_en = 1;
- priv->rx_vec_en = 1;
- err = mlx5_args(&args, pci_dev->device.devargs);
+ err = mlx5_args(&config, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
strerror(err));
goto port_error;
}
- mlx5_args_assign(priv, &args);
if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {
ERROR("ibv_query_device_ex() failed");
goto port_error;
}
- priv->hw_csum =
- !!(device_attr_ex.device_cap_flags_ex &
- IBV_DEVICE_RAW_IP_CSUM);
+ config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
+ IBV_DEVICE_RAW_IP_CSUM);
DEBUG("checksum offloading is %ssupported",
- (priv->hw_csum ? "" : "not "));
+ (config.hw_csum ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
- priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
- IBV_DEVICE_VXLAN_SUPPORT);
+ config.hw_csum_l2tun =
+ !!(exp_device_attr.exp_device_cap_flags &
+ IBV_DEVICE_VXLAN_SUPPORT);
#endif
DEBUG("Rx L2 tunnel checksum offloads are %ssupported",
- (priv->hw_csum_l2tun ? "" : "not "));
+ (config.hw_csum_l2tun ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- priv->counter_set_supported = !!(device_attr.max_counter_sets);
+ config.counter_set_supported =
+ !!(device_attr.max_counter_sets);
ibv_describe_counter_set(ctx, 0, &cs_desc);
DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
cs_desc.counter_type, cs_desc.num_of_cs,
cs_desc.attributes);
#endif
- priv->ind_table_max_size =
+ config.ind_table_max_size =
device_attr_ex.rss_caps.max_rwq_indirection_table_size;
/* Remove this check once DPDK supports larger/variable
* indirection tables. */
- if (priv->ind_table_max_size >
+ if (config.ind_table_max_size >
(unsigned int)ETH_RSS_RETA_SIZE_512)
- priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+ config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
DEBUG("maximum RX indirection table size is %u",
- priv->ind_table_max_size);
- priv->hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
+ config.ind_table_max_size);
+ config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
DEBUG("VLAN stripping is %ssupported",
- (priv->hw_vlan_strip ? "" : "not "));
+ (config.hw_vlan_strip ? "" : "not "));
- priv->hw_fcs_strip =
+ config.hw_fcs_strip =
!!(device_attr_ex.orig_attr.device_cap_flags &
IBV_WQ_FLAGS_SCATTER_FCS);
DEBUG("FCS stripping configuration is %ssupported",
- (priv->hw_fcs_strip ? "" : "not "));
+ (config.hw_fcs_strip ? "" : "not "));
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
- priv->hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
+ config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
#endif
DEBUG("hardware RX end alignment padding is %ssupported",
- (priv->hw_padding ? "" : "not "));
+ (config.hw_padding ? "" : "not "));
priv_get_num_vfs(priv, &num_vfs);
- priv->sriov = (num_vfs || sriov);
- priv->tso = ((priv->tso) &&
- (device_attr_ex.tso_caps.max_tso > 0) &&
- (device_attr_ex.tso_caps.supported_qpts &
- (1 << IBV_QPT_RAW_PACKET)));
- if (priv->tso)
- priv->max_tso_payload_sz =
- device_attr_ex.tso_caps.max_tso;
- if (priv->mps && !mps) {
+ config.sriov = (num_vfs || sriov);
+ if (config.tso)
+ config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
+ (device_attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
+ if (config.tso)
+ config.tso_max_payload_sz =
+ device_attr_ex.tso_caps.max_tso;
+ if (config.mps && !mps) {
ERROR("multi-packet send not supported on this device"
" (" MLX5_TXQ_MPW_EN ")");
err = ENOTSUP;
goto port_error;
- } else if (priv->mps && priv->tso) {
+ } else if (config.mps && config.tso) {
WARN("multi-packet send not supported in conjunction "
"with TSO. MPS disabled");
- priv->mps = 0;
+ config.mps = 0;
}
INFO("%sMPS is %s",
- priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
- priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
- /* Set default values for Enhanced MPW, a.k.a MPWv2. */
- if (priv->mps == MLX5_MPW_ENHANCED) {
- if (args.txqs_inline == MLX5_ARG_UNSET)
- priv->txqs_inline = MLX5_EMPW_MIN_TXQS;
- if (args.inline_max_packet_sz == MLX5_ARG_UNSET)
- priv->inline_max_packet_sz =
- MLX5_EMPW_MAX_INLINE_LEN;
- if (args.txq_inline == MLX5_ARG_UNSET)
- priv->txq_inline = MLX5_WQE_SIZE_MAX -
- MLX5_WQE_SIZE;
- }
- if (priv->cqe_comp && !cqe_comp) {
+ config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
+ config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ if (config.cqe_comp && !cqe_comp) {
WARN("Rx CQE compression isn't supported");
- priv->cqe_comp = 0;
+ config.cqe_comp = 0;
}
/* Configure the first MAC address by default. */
if (priv_get_mac(priv, &mac.addr_bytes)) {
@@ -945,6 +883,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
DEBUG("forcing Ethernet interface up");
priv_set_flags(priv, ~IFF_UP, IFF_UP);
mlx5_link_update(priv->dev, 1);
+ /* Store device configuration on private structure. */
+ priv->config = config;
continue;
port_error:
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 3e3259b55..04f0b2557 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -90,6 +90,39 @@ struct mlx5_xstats_ctrl {
/* Flow list . */
TAILQ_HEAD(mlx5_flows, rte_flow);
+/* Default PMD specific parameter value. */
+#define MLX5_ARG_UNSET (-1)
+
+/*
+ * Device configuration structure.
+ *
+ * Merged configuration from:
+ *
+ * - Device capabilities,
+ * - User device parameters disabled features.
+ */
+struct mlx5_dev_config {
+ unsigned int hw_csum:1; /* Checksum offload is supported. */
+ unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
+ unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
+ unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
+ unsigned int hw_padding:1; /* End alignment padding is supported. */
+ unsigned int sriov:1; /* This is a VF or PF with VF devices. */
+ unsigned int mps:2; /* Multi-packet send supported mode. */
+ unsigned int tunnel_en:1; /* Whether tunnel is supported. */
+ unsigned int counter_set_supported:1; /* Counter set is supported. */
+ unsigned int cqe_comp:1; /* CQE compression is enabled. */
+ unsigned int tso:1; /* Whether TSO is enabled. */
+ unsigned int tx_vec_en:1; /* Tx vector is enabled. */
+ unsigned int rx_vec_en:1; /* Rx vector is enabled. */
+ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
+ unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
+ unsigned int ind_table_max_size; /* Maximum indirection table size. */
+ int txq_inline; /* Maximum packet size for inlining. */
+ int txqs_inline; /* Queue number threshold for inlining. */
+ int inline_max_packet_sz; /* Max packet size for inlining. */
+};
+
struct priv {
struct rte_eth_dev *dev; /* Ethernet device of master process. */
struct ibv_context *ctx; /* Verbs context. */
@@ -102,27 +135,8 @@ struct priv {
/* Device properties. */
uint16_t mtu; /* Configured MTU. */
uint8_t port; /* Physical port number. */
- unsigned int hw_csum:1; /* Checksum offload is supported. */
- unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
- unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
- unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
- unsigned int hw_padding:1; /* End alignment padding is supported. */
- unsigned int sriov:1; /* This is a VF or PF with VF devices. */
- unsigned int mps:2; /* Multi-packet send mode (0: disabled). */
- unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
- unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
unsigned int pending_alarm:1; /* An alarm is pending. */
- unsigned int tso:1; /* Whether TSO is supported. */
- unsigned int tunnel_en:1;
unsigned int isolated:1; /* Whether isolated mode is enabled. */
- unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
- unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
- unsigned int counter_set_supported:1; /* Counter set is supported. */
- /* Whether Tx offloads for tunneled packets are supported. */
- unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
- unsigned int txq_inline; /* Maximum packet size for inlining. */
- unsigned int txqs_inline; /* Queue number threshold for inlining. */
- unsigned int inline_max_packet_sz; /* Max packet size for inlining. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
@@ -149,6 +163,7 @@ struct priv {
rte_spinlock_t lock; /* Lock for control functions. */
int primary_socket; /* Unix socket for primary process. */
struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
+ struct mlx5_dev_config config; /* Device configuration. */
};
/**
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 28183534a..d2f98769e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -579,7 +579,7 @@ dev_configure(struct rte_eth_dev *dev)
(void *)dev, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
- if (rxqs_n > priv->ind_table_max_size) {
+ if (rxqs_n > priv->config.ind_table_max_size) {
ERROR("cannot handle this many RX queues (%u)", rxqs_n);
return EINVAL;
}
@@ -592,7 +592,7 @@ dev_configure(struct rte_eth_dev *dev)
* maximum indirection table size for better balancing.
* The result is always rounded to the next power of two. */
reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
- priv->ind_table_max_size :
+ priv->config.ind_table_max_size :
rxqs_n));
if (priv_rss_reta_index_resize(priv, reta_idx_n))
return ENOMEM;
@@ -641,6 +641,7 @@ void
mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
struct priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
unsigned int max;
char ifname[IF_NAMESIZE];
@@ -663,31 +664,31 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
info->rx_offload_capa =
- (priv->hw_csum ?
+ (config->hw_csum ?
(DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM) :
0) |
- (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
+ (priv->config.hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
DEV_RX_OFFLOAD_TIMESTAMP;
- if (!priv->mps)
+ if (!config->mps)
info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
- if (priv->hw_csum)
+ if (config->hw_csum)
info->tx_offload_capa |=
(DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
- if (priv->tso)
+ if (config->tso)
info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
- if (priv->tunnel_en)
+ if (config->tunnel_en)
info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
- priv->reta_idx_n : priv->ind_table_max_size;
+ priv->reta_idx_n : config->ind_table_max_size;
info->hash_key_size = priv->rss_conf.rss_key_len;
info->speed_capa = priv->link_speed_capa;
priv_unlock(priv);
@@ -1394,10 +1395,11 @@ eth_tx_burst_t
priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
{
eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
+ struct mlx5_dev_config *config = &priv->config;
assert(priv != NULL);
/* Select appropriate TX function. */
- if (priv->mps == MLX5_MPW_ENHANCED) {
+ if (config->mps == MLX5_MPW_ENHANCED) {
if (priv_check_vec_tx_support(priv) > 0) {
if (priv_check_raw_vec_tx_support(priv) > 0)
tx_pkt_burst = mlx5_tx_burst_raw_vec;
@@ -1408,10 +1410,10 @@ priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
tx_pkt_burst = mlx5_tx_burst_empw;
DEBUG("selected Enhanced MPW TX function");
}
- } else if (priv->mps && priv->txq_inline) {
+ } else if (config->mps && (config->txq_inline > 0)) {
tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
- } else if (priv->mps) {
+ } else if (config->mps) {
tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index e0775172c..ec179bd30 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -778,7 +778,7 @@ priv_flow_convert_actions(struct priv *priv,
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
parser->mark = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
- priv->counter_set_supported) {
+ priv->config.counter_set_supported) {
parser->count = 1;
} else {
goto exit_action_not_supported;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index a4cdd374a..057156d84 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -570,6 +570,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
unsigned int i;
int ret = 0;
struct mlx5dv_obj obj;
+ struct mlx5_dev_config *config = &priv->config;
assert(rxq_data);
assert(!rxq_ctrl->ibv);
@@ -606,7 +607,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0,
};
- if (priv->cqe_comp && !rxq_data->hw_timestamp) {
+ if (config->cqe_comp && !rxq_data->hw_timestamp) {
attr.cq.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
@@ -616,7 +617,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
*/
if (rxq_check_vec_support(rxq_data) < 0)
attr.cq.ibv.cqe *= 2;
- } else if (priv->cqe_comp && rxq_data->hw_timestamp) {
+ } else if (config->cqe_comp && rxq_data->hw_timestamp) {
DEBUG("Rx CQE compression is disabled for HW timestamp");
}
tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
@@ -651,7 +652,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
}
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
- if (priv->hw_padding) {
+ if (config->hw_padding) {
attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
}
@@ -878,9 +879,14 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
{
struct rte_eth_dev *dev = priv->dev;
struct mlx5_rxq_ctrl *tmpl;
- const uint16_t desc_n =
- desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+ struct mlx5_dev_config *config = &priv->config;
+ /*
+ * Always allocate extra slots, even if eventually
+ * the vector Rx will not be used.
+ */
+ const uint16_t desc_n =
+ desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
tmpl = rte_calloc_socket("RXQ", 1,
sizeof(*tmpl) +
@@ -938,20 +944,20 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
goto error;
}
/* Toggle RX checksum offload if hardware supports it. */
- if (priv->hw_csum)
+ if (config->hw_csum)
tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- if (priv->hw_csum_l2tun)
+ if (config->hw_csum_l2tun)
tmpl->rxq.csum_l2tun =
!!dev->data->dev_conf.rxmode.hw_ip_checksum;
tmpl->rxq.hw_timestamp =
!!dev->data->dev_conf.rxmode.hw_timestamp;
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = (priv->hw_vlan_strip &&
+ tmpl->rxq.vlan_strip = (config->hw_vlan_strip &&
!!dev->data->dev_conf.rxmode.hw_vlan_strip);
/* By default, FCS (CRC) is stripped by hardware. */
if (dev->data->dev_conf.rxmode.hw_strip_crc) {
tmpl->rxq.crc_present = 0;
- } else if (priv->hw_fcs_strip) {
+ } else if (config->hw_fcs_strip) {
tmpl->rxq.crc_present = 1;
} else {
WARN("%p: CRC stripping has been disabled but will still"
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 8d23dae7e..761ed4971 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -287,10 +287,10 @@ priv_check_raw_vec_tx_support(struct priv *priv)
int __attribute__((cold))
priv_check_vec_tx_support(struct priv *priv)
{
- if (!priv->tx_vec_en ||
+ if (!priv->config.tx_vec_en ||
priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
- priv->mps != MLX5_MPW_ENHANCED ||
- priv->tso)
+ priv->config.mps != MLX5_MPW_ENHANCED ||
+ priv->config.tso)
return -ENOTSUP;
return 1;
}
@@ -310,7 +310,7 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq)
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
+ if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return 1;
}
@@ -329,7 +329,7 @@ priv_check_vec_rx_support(struct priv *priv)
{
uint16_t i;
- if (!priv->rx_vec_en)
+ if (!priv->config.rx_vec_en)
return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 89b16fda2..3e2075c79 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -273,6 +273,25 @@ priv_tx_uar_remap(struct priv *priv, int fd)
}
/**
+ * Check if the burst function is using eMPW.
+ *
+ * @param tx_pkt_burst
+ * Tx burst function pointer.
+ *
+ * @return
+ * 1 if the burst function is using eMPW, 0 otherwise.
+ */
+static int
+is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
+{
+ if (tx_pkt_burst == mlx5_tx_burst_raw_vec ||
+ tx_pkt_burst == mlx5_tx_burst_vec ||
+ tx_pkt_burst == mlx5_tx_burst_empw)
+ return 1;
+ return 0;
+}
+
+/**
* Create the Tx queue Verbs object.
*
* @param priv
@@ -302,6 +321,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
struct mlx5dv_cq cq_info;
struct mlx5dv_obj obj;
const int desc = 1 << txq_data->elts_n;
+ eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
int ret = 0;
assert(txq_data);
@@ -316,7 +336,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
};
cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
- if (priv->mps == MLX5_MPW_ENHANCED)
+ if (is_empw_burst_func(tx_pkt_burst))
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
@@ -541,83 +561,74 @@ mlx5_priv_txq_ibv_verify(struct priv *priv)
}
/**
- * Create a DPDK Tx queue.
+ * Set Tx queue parameters from device configuration.
*
- * @param priv
- * Pointer to private structure.
- * @param idx
- * TX queue index.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
- *
- * @return
- * A DPDK queue object on success.
+ * @param txq_ctrl
+ * Pointer to Tx queue control structure.
*/
-struct mlx5_txq_ctrl*
-mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket,
- const struct rte_eth_txconf *conf)
+static void
+txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
{
+ struct priv *priv = txq_ctrl->priv;
+ struct mlx5_dev_config *config = &priv->config;
const unsigned int max_tso_inline =
((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
- struct mlx5_txq_ctrl *tmpl;
-
- tmpl = rte_calloc_socket("TXQ", 1,
- sizeof(*tmpl) +
- desc * sizeof(struct rte_mbuf *),
- 0, socket);
- if (!tmpl)
- return NULL;
- assert(desc > MLX5_TX_COMP_THRESH);
- tmpl->txq.flags = conf->txq_flags;
- tmpl->priv = priv;
- tmpl->socket = socket;
- tmpl->txq.elts_n = log2above(desc);
- if (priv->mps == MLX5_MPW_ENHANCED)
- tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
- /* MRs will be registered in mp2mr[] later. */
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.orig_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.orig_attr.max_sge);
- if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
+ unsigned int txq_inline;
+ unsigned int txqs_inline;
+ unsigned int inline_max_packet_sz;
+ eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
+ int is_empw_func = is_empw_burst_func(tx_pkt_burst);
+
+ txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txq_inline;
+ txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txqs_inline;
+ inline_max_packet_sz =
+ (config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
+ 0 : config->inline_max_packet_sz;
+ if (is_empw_func) {
+ if (config->txq_inline == MLX5_ARG_UNSET)
+ txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
+ if (config->txqs_inline == MLX5_ARG_UNSET)
+ txqs_inline = MLX5_EMPW_MIN_TXQS;
+ if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
+ inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
+ txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
+ txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
+ }
+ if (txq_inline && priv->txqs_n >= txqs_inline) {
unsigned int ds_cnt;
- tmpl->txq.max_inline =
- ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
+ txq_ctrl->txq.max_inline =
+ ((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
/* TSO and MPS can't be enabled concurrently. */
- assert(!priv->tso || !priv->mps);
- if (priv->mps == MLX5_MPW_ENHANCED) {
- tmpl->txq.inline_max_packet_sz =
- priv->inline_max_packet_sz;
+ assert(!config->tso || !config->mps);
+ if (is_empw_func) {
/* To minimize the size of data set, avoid requesting
* too large WQ.
*/
- tmpl->max_inline_data =
- ((RTE_MIN(priv->txq_inline,
- priv->inline_max_packet_sz) +
+ txq_ctrl->max_inline_data =
+ ((RTE_MIN(txq_inline,
+ inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (priv->tso) {
- int inline_diff = tmpl->txq.max_inline - max_tso_inline;
+ } else if (config->tso) {
+ int inline_diff = txq_ctrl->txq.max_inline -
+ max_tso_inline;
/*
* Adjust inline value as Verbs aggregates
* tso_inline and txq_inline fields.
*/
- tmpl->max_inline_data = inline_diff > 0 ?
+ txq_ctrl->max_inline_data = inline_diff > 0 ?
inline_diff *
RTE_CACHE_LINE_SIZE :
0;
} else {
- tmpl->max_inline_data =
- tmpl->txq.max_inline * RTE_CACHE_LINE_SIZE;
+ txq_ctrl->max_inline_data =
+ txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
}
/*
* Check if the inline size is too large in a way which
@@ -627,7 +638,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
* WQE ETH (1 DS)
* Inline part (N DS)
*/
- ds_cnt = 2 + (tmpl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
+ ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
if (ds_cnt > MLX5_DSEG_MAX) {
unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
MLX5_WQE_DWORD_SIZE;
@@ -636,18 +647,61 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
RTE_CACHE_LINE_SIZE);
WARN("txq inline is too large (%d) setting it to "
"the maximum possible: %d\n",
- priv->txq_inline, max_inline);
- tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
+ txq_inline, max_inline);
+ txq_ctrl->txq.max_inline = max_inline /
+ RTE_CACHE_LINE_SIZE;
}
}
- if (priv->tso) {
- tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
- tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline,
- max_tso_inline);
- tmpl->txq.tso_en = 1;
+ if (config->tso) {
+ txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
+ txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
+ max_tso_inline);
+ txq_ctrl->txq.tso_en = 1;
}
- if (priv->tunnel_en)
- tmpl->txq.tunnel_en = 1;
+ txq_ctrl->txq.tunnel_en = config->tunnel_en;
+}
+
+/**
+ * Create a DPDK Tx queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ *
+ * @return
+ * A DPDK queue object on success.
+ */
+struct mlx5_txq_ctrl*
+mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_txconf *conf)
+{
+ struct mlx5_txq_ctrl *tmpl;
+
+ tmpl = rte_calloc_socket("TXQ", 1,
+ sizeof(*tmpl) +
+ desc * sizeof(struct rte_mbuf *),
+ 0, socket);
+ if (!tmpl)
+ return NULL;
+ assert(desc > MLX5_TX_COMP_THRESH);
+ tmpl->txq.flags = conf->txq_flags;
+ tmpl->priv = priv;
+ tmpl->socket = socket;
+ tmpl->txq.elts_n = log2above(desc);
+ txq_set_params(tmpl);
+ /* MRs will be registered in mp2mr[] later. */
+ DEBUG("priv->device_attr.max_qp_wr is %d",
+ priv->device_attr.orig_attr.max_qp_wr);
+ DEBUG("priv->device_attr.max_sge is %d",
+ priv->device_attr.orig_attr.max_sge);
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 198a69e3c..2ab865264 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -165,7 +165,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
struct priv *priv = dev->data->dev_private;
/* Validate hw support */
- if (!priv->hw_vlan_strip) {
+ if (!priv->config.hw_vlan_strip) {
ERROR("VLAN stripping is not supported");
return;
}
@@ -198,7 +198,7 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip;
- if (!priv->hw_vlan_strip) {
+ if (!priv->config.hw_vlan_strip) {
ERROR("VLAN stripping is not supported");
return 0;
}
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v3 3/7] net/mlx5: rename counter set in configuration
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 1/7] net/mlx5: change pkt burst select function prototype Shahaf Shuler
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 2/7] net/mlx5: add device configuration structure Shahaf Shuler
@ 2018-01-10 9:16 ` Shahaf Shuler
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 4/7] net/mlx5: convert to new Tx offloads API Shahaf Shuler
` (4 subsequent siblings)
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-10 9:16 UTC (permalink / raw)
To: nelio.laranjeiro, adrien.mazarguil, yskoh; +Cc: dev
From: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Counter_set is a counter used for flows when its support is available.
Renaming it to flow counter.
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
drivers/net/mlx5/mlx5.c | 3 +--
drivers/net/mlx5/mlx5.h | 2 +-
drivers/net/mlx5/mlx5_flow.c | 2 +-
3 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index fdd4710f1..ca44a0a59 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -759,8 +759,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
(config.hw_csum_l2tun ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- config.counter_set_supported =
- !!(device_attr.max_counter_sets);
+ config.flow_counter_en = !!(device_attr.max_counter_sets);
ibv_describe_counter_set(ctx, 0, &cs_desc);
DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
cs_desc.counter_type, cs_desc.num_of_cs,
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 04f0b2557..171b3a933 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -110,7 +110,7 @@ struct mlx5_dev_config {
unsigned int sriov:1; /* This is a VF or PF with VF devices. */
unsigned int mps:2; /* Multi-packet send supported mode. */
unsigned int tunnel_en:1; /* Whether tunnel is supported. */
- unsigned int counter_set_supported:1; /* Counter set is supported. */
+ unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
unsigned int tso:1; /* Whether TSO is enabled. */
unsigned int tx_vec_en:1; /* Tx vector is enabled. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ec179bd30..305b2ec01 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -778,7 +778,7 @@ priv_flow_convert_actions(struct priv *priv,
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
parser->mark = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
- priv->config.counter_set_supported) {
+ priv->config.flow_counter_en) {
parser->count = 1;
} else {
goto exit_action_not_supported;
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v3 4/7] net/mlx5: convert to new Tx offloads API
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
` (2 preceding siblings ...)
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 3/7] net/mlx5: rename counter set in configuration Shahaf Shuler
@ 2018-01-10 9:17 ` Shahaf Shuler
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 5/7] net/mlx5: convert to new Rx " Shahaf Shuler
` (3 subsequent siblings)
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-10 9:17 UTC (permalink / raw)
To: nelio.laranjeiro, adrien.mazarguil, yskoh; +Cc: dev
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
doc/guides/nics/mlx5.rst | 15 +++----
drivers/net/mlx5/mlx5.c | 18 ++------
drivers/net/mlx5/mlx5.h | 2 +-
drivers/net/mlx5/mlx5_ethdev.c | 37 ++++++++--------
drivers/net/mlx5/mlx5_rxtx.c | 6 ++-
drivers/net/mlx5/mlx5_rxtx.h | 7 +--
drivers/net/mlx5/mlx5_rxtx_vec.c | 32 +++++++-------
drivers/net/mlx5/mlx5_rxtx_vec.h | 12 ++++++
drivers/net/mlx5/mlx5_txq.c | 80 ++++++++++++++++++++++++++++++++---
9 files changed, 142 insertions(+), 67 deletions(-)
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 154db64d7..bdc2216c0 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -262,8 +262,9 @@ Run-time configuration
Enhanced MPS supports hybrid mode - mixing inlined packets and pointers
in the same descriptor.
- This option cannot be used in conjunction with ``tso`` below. When ``tso``
- is set, ``txq_mpw_en`` is disabled.
+ This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO,
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, DEV_TX_OFFLOAD_VLAN_INSERT``.
+ When those offloads are requested the MPS send function will not be used.
It is currently only supported on the ConnectX-4 Lx and ConnectX-5
families of adapters. Enabled by default.
@@ -284,17 +285,15 @@ Run-time configuration
Effective only when Enhanced MPS is supported. The default value is 256.
-- ``tso`` parameter [int]
-
- A nonzero value enables hardware TSO.
- When hardware TSO is enabled, packets marked with TCP segmentation
- offload will be divided into segments by the hardware. Disabled by default.
-
- ``tx_vec_en`` parameter [int]
A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of
global Tx queues on the port is lesser than MLX5_VPMD_MIN_TXQS.
+ This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO,
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, DEV_TX_OFFLOAD_VLAN_INSERT``.
+ When those offloads are requested the MPS send function will not be used.
+
Enabled by default on ConnectX-5.
- ``rx_vec_en`` parameter [int]
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index ca44a0a59..1c95f3520 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -85,9 +85,6 @@
/* Device parameter to limit the size of inlining packet. */
#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
-/* Device parameter to enable hardware TSO offload. */
-#define MLX5_TSO "tso"
-
/* Device parameter to enable hardware Tx vector. */
#define MLX5_TX_VEC_EN "tx_vec_en"
@@ -406,8 +403,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
config->mpw_hdr_dseg = !!tmp;
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
config->inline_max_packet_sz = tmp;
- } else if (strcmp(MLX5_TSO, key) == 0) {
- config->tso = !!tmp;
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
config->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
@@ -440,7 +435,6 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
MLX5_TXQ_MPW_EN,
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
- MLX5_TSO,
MLX5_TX_VEC_EN,
MLX5_RX_VEC_EN,
NULL,
@@ -629,7 +623,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
.cqe_comp = cqe_comp,
.mps = mps,
.tunnel_en = tunnel_en,
- .tso = 0,
.tx_vec_en = 1,
.rx_vec_en = 1,
.mpw_hdr_dseg = 0,
@@ -793,10 +786,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv_get_num_vfs(priv, &num_vfs);
config.sriov = (num_vfs || sriov);
- if (config.tso)
- config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
- (device_attr_ex.tso_caps.supported_qpts &
- (1 << IBV_QPT_RAW_PACKET)));
+ config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
+ (device_attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
if (config.tso)
config.tso_max_payload_sz =
device_attr_ex.tso_caps.max_tso;
@@ -805,10 +797,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
" (" MLX5_TXQ_MPW_EN ")");
err = ENOTSUP;
goto port_error;
- } else if (config.mps && config.tso) {
- WARN("multi-packet send not supported in conjunction "
- "with TSO. MPS disabled");
- config.mps = 0;
}
INFO("%sMPS is %s",
config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 171b3a933..8ee522069 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -112,7 +112,7 @@ struct mlx5_dev_config {
unsigned int tunnel_en:1; /* Whether tunnel is supported. */
unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
- unsigned int tso:1; /* Whether TSO is enabled. */
+ unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tx_vec_en:1; /* Tx vector is enabled. */
unsigned int rx_vec_en:1; /* Rx vector is enabled. */
unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index d2f98769e..7b1b7aa0e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -551,7 +551,15 @@ dev_configure(struct rte_eth_dev *dev)
unsigned int reta_idx_n;
const uint8_t use_app_rss_key =
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
-
+ uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+ if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
+ ERROR("Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ tx_offloads, supp_tx_offloads);
+ return ENOTSUP;
+ }
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
rss_hash_default_key_len)) {
@@ -672,19 +680,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
(priv->config.hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
DEV_RX_OFFLOAD_TIMESTAMP;
- if (!config->mps)
- info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
- if (config->hw_csum)
- info->tx_offload_capa |=
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
- if (config->tso)
- info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
- if (config->tunnel_en)
- info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
@@ -1392,16 +1388,23 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
* Pointer to selected Tx burst function.
*/
eth_tx_burst_t
-priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
+priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
{
eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
struct mlx5_dev_config *config = &priv->config;
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO));
+ int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
assert(priv != NULL);
/* Select appropriate TX function. */
+ if (vlan_insert || tso)
+ return tx_pkt_burst;
if (config->mps == MLX5_MPW_ENHANCED) {
- if (priv_check_vec_tx_support(priv) > 0) {
- if (priv_check_raw_vec_tx_support(priv) > 0)
+ if (priv_check_vec_tx_support(priv, dev) > 0) {
+ if (priv_check_raw_vec_tx_support(priv, dev) > 0)
tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
tx_pkt_burst = mlx5_tx_burst_vec;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 67e3db168..3b8f71c28 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1994,16 +1994,18 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
int __attribute__((weak))
-priv_check_raw_vec_tx_support(struct priv *priv)
+priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
{
(void)priv;
+ (void)dev;
return -ENOTSUP;
}
int __attribute__((weak))
-priv_check_vec_tx_support(struct priv *priv)
+priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
{
(void)priv;
+ (void)dev;
return -ENOTSUP;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 90f129168..5045f28f3 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -201,7 +201,7 @@ struct mlx5_txq_data {
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
uint16_t mr_cache_idx; /* Index of last hit entry. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
- uint32_t flags; /* Flags for Tx Queue. */
+ uint64_t offloads; /* Offloads for Tx Queue. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
@@ -293,6 +293,7 @@ int mlx5_priv_txq_release(struct priv *, uint16_t);
int mlx5_priv_txq_releasable(struct priv *, uint16_t);
int mlx5_priv_txq_verify(struct priv *);
void txq_alloc_elts(struct mlx5_txq_ctrl *);
+uint64_t mlx5_priv_get_tx_port_offloads(struct priv *);
/* mlx5_rxtx.c */
@@ -310,8 +311,8 @@ int mlx5_rx_descriptor_status(void *, uint16_t);
int mlx5_tx_descriptor_status(void *, uint16_t);
/* Vectorized version of mlx5_rxtx.c */
-int priv_check_raw_vec_tx_support(struct priv *);
-int priv_check_vec_tx_support(struct priv *);
+int priv_check_raw_vec_tx_support(struct priv *, struct rte_eth_dev *);
+int priv_check_vec_tx_support(struct priv *, struct rte_eth_dev *);
int rxq_check_vec_support(struct mlx5_rxq_data *);
int priv_check_vec_rx_support(struct priv *);
uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 761ed4971..f0530efbe 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -160,15 +160,15 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t ret;
/* Transmit multi-seg packets in the head of pkts list. */
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
+ if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
NB_SEGS(pkts[nb_tx]) > 1)
nb_tx += txq_scatter_v(txq,
&pkts[nb_tx],
pkts_n - nb_tx);
n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
+ if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
n = txq_count_contig_single_seg(&pkts[nb_tx], n);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+ if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
nb_tx += ret;
@@ -253,24 +253,20 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
*
* @param priv
* Pointer to private structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_raw_vec_tx_support(struct priv *priv)
+priv_check_raw_vec_tx_support(__rte_unused struct priv *priv,
+ struct rte_eth_dev *dev)
{
- uint16_t i;
-
- /* All the configured queues should support. */
- for (i = 0; i < priv->txqs_n; ++i) {
- struct mlx5_txq_data *txq = (*priv->txqs)[i];
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
- !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
- break;
- }
- if (i != priv->txqs_n)
+ /* Doesn't support any offload. */
+ if (offloads)
return -ENOTSUP;
return 1;
}
@@ -280,17 +276,21 @@ priv_check_raw_vec_tx_support(struct priv *priv)
*
* @param priv
* Pointer to private structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_vec_tx_support(struct priv *priv)
+priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
{
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
if (!priv->config.tx_vec_en ||
priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
priv->config.mps != MLX5_MPW_ENHANCED ||
- priv->config.tso)
+ offloads & ~MLX5_VEC_TX_OFFLOAD_CAP)
return -ENOTSUP;
return 1;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 1f08ed0b2..7d7f016f1 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -40,6 +40,18 @@
#include "mlx5_autoconf.h"
#include "mlx5_prm.h"
+/* HW checksum offload capabilities of vectorized Tx. */
+#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
+ (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+
+/* HW offload capabilities of vectorized Tx. */
+#define MLX5_VEC_TX_OFFLOAD_CAP \
+ (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
/*
* Compile time sanity check for vectorized functions.
*/
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 3e2075c79..26db15a4f 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -116,6 +116,63 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
}
/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx5_priv_get_tx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT);
+ struct mlx5_dev_config *config = &priv->config;
+
+ if (config->hw_csum)
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ if (config->tso)
+ offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (config->tunnel_en) {
+ if (config->hw_csum)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (config->tso)
+ offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ }
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
+ uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv);
+
+ /* There are no Tx offloads which are per queue. */
+ if ((offloads & port_supp_offloads) != offloads)
+ return 0;
+ if ((port_offloads ^ offloads) & port_supp_offloads)
+ return 0;
+ return 1;
+}
+
+/**
* DPDK callback to configure a TX queue.
*
* @param dev
@@ -143,6 +200,20 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
int ret = 0;
priv_lock(priv);
+ /*
+ * Don't verify port offloads for application which
+ * use the old API.
+ */
+ if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+ !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
+ ret = ENOTSUP;
+ ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ mlx5_priv_get_tx_port_offloads(priv));
+ goto out;
+ }
if (desc <= MLX5_TX_COMP_THRESH) {
WARN("%p: number of descriptors requested for TX queue %u"
" must be higher than MLX5_TX_COMP_THRESH, using"
@@ -579,6 +650,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
unsigned int inline_max_packet_sz;
eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
int is_empw_func = is_empw_burst_func(tx_pkt_burst);
+ int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO);
txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
0 : config->txq_inline;
@@ -603,8 +675,6 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
txq_ctrl->txq.max_inline =
((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
- /* TSO and MPS can't be enabled concurrently. */
- assert(!config->tso || !config->mps);
if (is_empw_func) {
/* To minimize the size of data set, avoid requesting
* too large WQ.
@@ -614,7 +684,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (config->tso) {
+ } else if (tso) {
int inline_diff = txq_ctrl->txq.max_inline -
max_tso_inline;
@@ -652,7 +722,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
RTE_CACHE_LINE_SIZE;
}
}
- if (config->tso) {
+ if (tso) {
txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
max_tso_inline);
@@ -692,7 +762,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
if (!tmpl)
return NULL;
assert(desc > MLX5_TX_COMP_THRESH);
- tmpl->txq.flags = conf->txq_flags;
+ tmpl->txq.offloads = conf->offloads;
tmpl->priv = priv;
tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc);
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v3 5/7] net/mlx5: convert to new Rx offloads API
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
` (3 preceding siblings ...)
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 4/7] net/mlx5: convert to new Tx offloads API Shahaf Shuler
@ 2018-01-10 9:17 ` Shahaf Shuler
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 6/7] net/mlx4: convert to new Tx " Shahaf Shuler
` (2 subsequent siblings)
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-10 9:17 UTC (permalink / raw)
To: nelio.laranjeiro, adrien.mazarguil, yskoh; +Cc: dev
Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
drivers/net/mlx5/mlx5_ethdev.c | 23 +++++---
drivers/net/mlx5/mlx5_rxq.c | 106 +++++++++++++++++++++++++++++++-----
drivers/net/mlx5/mlx5_rxtx.h | 3 +
drivers/net/mlx5/mlx5_vlan.c | 3 +-
4 files changed, 111 insertions(+), 24 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 7b1b7aa0e..278a4dfc3 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -553,6 +553,10 @@ dev_configure(struct rte_eth_dev *dev)
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t supp_rx_offloads =
+ (mlx5_priv_get_rx_port_offloads(priv) |
+ mlx5_priv_get_rx_queue_offloads(priv));
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
ERROR("Some Tx offloads are not supported "
@@ -560,6 +564,12 @@ dev_configure(struct rte_eth_dev *dev)
tx_offloads, supp_tx_offloads);
return ENOTSUP;
}
+ if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
+ ERROR("Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ rx_offloads, supp_rx_offloads);
+ return ENOTSUP;
+ }
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
rss_hash_default_key_len)) {
@@ -671,15 +681,10 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_offload_capa =
- (config->hw_csum ?
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM) :
- 0) |
- (priv->config.hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
- DEV_RX_OFFLOAD_TIMESTAMP;
-
+ info->rx_queue_offload_capa =
+ mlx5_priv_get_rx_queue_offloads(priv);
+ info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) |
+ info->rx_queue_offload_capa);
info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 057156d84..950472754 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -213,6 +213,78 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
}
/**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx5_priv_get_rx_queue_offloads(struct priv *priv)
+{
+ struct mlx5_dev_config *config = &priv->config;
+ uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_TIMESTAMP |
+ DEV_RX_OFFLOAD_JUMBO_FRAME);
+
+ if (config->hw_fcs_strip)
+ offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ if (config->hw_csum)
+ offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM);
+ if (config->hw_vlan_strip)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ return offloads;
+}
+
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
+ uint64_t queue_supp_offloads =
+ mlx5_priv_get_rx_queue_offloads(priv);
+ uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv);
+
+ if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
+ offloads)
+ return 0;
+ if (((port_offloads ^ offloads) & port_supp_offloads))
+ return 0;
+ return 1;
+}
+
+/**
*
* @param dev
* Pointer to Ethernet device structure.
@@ -241,7 +313,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
int ret = 0;
- (void)conf;
priv_lock(priv);
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
@@ -257,6 +328,16 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
priv_unlock(priv);
return -EOVERFLOW;
}
+ if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
+ ret = ENOTSUP;
+ ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ (mlx5_priv_get_rx_port_offloads(priv) |
+ mlx5_priv_get_rx_queue_offloads(priv)));
+ goto out;
+ }
if (!mlx5_priv_rxq_releasable(priv, idx)) {
ret = EBUSY;
ERROR("%p: unable to release queue index %u",
@@ -264,7 +345,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
goto out;
}
mlx5_priv_rxq_release(priv, idx);
- rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, mp);
+ rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp);
if (!rxq_ctrl) {
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
@@ -875,7 +956,8 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
*/
struct mlx5_rxq_ctrl*
mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket, struct rte_mempool *mp)
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
{
struct rte_eth_dev *dev = priv->dev;
struct mlx5_rxq_ctrl *tmpl;
@@ -902,7 +984,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
tmpl->rxq.sges_n = 0;
- } else if (dev->data->dev_conf.rxmode.enable_scatter) {
+ } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -944,18 +1026,14 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
goto error;
}
/* Toggle RX checksum offload if hardware supports it. */
- if (config->hw_csum)
- tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- if (config->hw_csum_l2tun)
- tmpl->rxq.csum_l2tun =
- !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- tmpl->rxq.hw_timestamp =
- !!dev->data->dev_conf.rxmode.hw_timestamp;
+ tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
+ priv->config.hw_csum_l2tun);
+ tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = (config->hw_vlan_strip &&
- !!dev->data->dev_conf.rxmode.hw_vlan_strip);
+ tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
- if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
tmpl->rxq.crc_present = 0;
} else if (config->hw_fcs_strip) {
tmpl->rxq.crc_present = 1;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 5045f28f3..d85a570f4 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -253,6 +253,7 @@ int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *);
int mlx5_priv_rxq_ibv_verify(struct priv *);
struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t,
uint16_t, unsigned int,
+ const struct rte_eth_rxconf *,
struct rte_mempool *);
struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t);
int mlx5_priv_rxq_release(struct priv *, uint16_t);
@@ -273,6 +274,8 @@ struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t,
uint64_t, uint16_t [], uint16_t);
int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *);
int mlx5_priv_hrxq_ibv_verify(struct priv *);
+uint64_t mlx5_priv_get_rx_port_offloads(struct priv *);
+uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *);
/* mlx5_txq.c */
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 2ab865264..9443e4f03 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -196,7 +196,8 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
unsigned int i;
if (mask & ETH_VLAN_STRIP_MASK) {
- int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip;
+ int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP);
if (!priv->config.hw_vlan_strip) {
ERROR("VLAN stripping is not supported");
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v3 6/7] net/mlx4: convert to new Tx offloads API
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
` (4 preceding siblings ...)
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 5/7] net/mlx5: convert to new Rx " Shahaf Shuler
@ 2018-01-10 9:17 ` Shahaf Shuler
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 7/7] net/mlx4: convert to new Rx " Shahaf Shuler
2018-01-10 15:24 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-10 9:17 UTC (permalink / raw)
To: nelio.laranjeiro, adrien.mazarguil, yskoh; +Cc: dev
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
drivers/net/mlx4/mlx4_ethdev.c | 7 +---
drivers/net/mlx4/mlx4_rxtx.h | 1 +
drivers/net/mlx4/mlx4_txq.c | 69 +++++++++++++++++++++++++++++++++++--
3 files changed, 68 insertions(+), 9 deletions(-)
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 2f69e7d4f..3602f0ad8 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -767,17 +767,12 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
info->rx_offload_capa = 0;
- info->tx_offload_capa = 0;
+ info->tx_offload_capa = mlx4_get_tx_port_offloads(priv);
if (priv->hw_csum) {
- info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM);
}
- if (priv->hw_csum_l2tun)
- info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (mlx4_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index b93e2bcda..bff5ae43d 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -180,6 +180,7 @@ uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
/* mlx4_txq.c */
+uint64_t mlx4_get_tx_port_offloads(struct priv *priv);
int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_txconf *conf);
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index d651e4980..7664c3e1a 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -41,6 +41,7 @@
#include <stddef.h>
#include <stdint.h>
#include <string.h>
+#include <inttypes.h>
/* Verbs headers do not support -pedantic. */
#ifdef PEDANTIC
@@ -182,6 +183,50 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
}
/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_get_tx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (priv->hw_csum) {
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ }
+ if (priv->hw_csum_l2tun)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param requested
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * Nonzero when configuration is valid.
+ */
+static int
+mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
+{
+ uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
+ uint64_t supported = mlx4_get_tx_port_offloads(priv);
+
+ return !((mandatory ^ requested) & supported);
+}
+
+/**
* DPDK callback to configure a Tx queue.
*
* @param dev
@@ -229,9 +274,22 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
};
int ret;
- (void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
+ /*
+ * Don't verify port offloads for application which
+ * use the old API.
+ */
+ if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+ !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
+ rte_errno = ENOTSUP;
+ ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ mlx4_get_tx_port_offloads(priv));
+ return -rte_errno;
+ }
if (idx >= dev->data->nb_tx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -281,8 +339,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
.elts_comp_cd_init =
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
- .csum = priv->hw_csum,
- .csum_l2tun = priv->hw_csum_l2tun,
+ .csum = priv->hw_csum &&
+ (conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM)),
+ .csum_l2tun = priv->hw_csum_l2tun &&
+ (conf->offloads &
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
/* Enable Tx loopback for VF devices. */
.lb = !!priv->vf,
.bounce_buf = bounce_buf,
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* [dpdk-dev] [PATCH v3 7/7] net/mlx4: convert to new Rx offloads API
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
` (5 preceding siblings ...)
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 6/7] net/mlx4: convert to new Tx " Shahaf Shuler
@ 2018-01-10 9:17 ` Shahaf Shuler
2018-01-10 15:24 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-10 9:17 UTC (permalink / raw)
To: nelio.laranjeiro, adrien.mazarguil, yskoh; +Cc: dev
Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
drivers/net/mlx4/mlx4_ethdev.c | 9 ++---
drivers/net/mlx4/mlx4_flow.c | 5 ++-
drivers/net/mlx4/mlx4_rxq.c | 77 ++++++++++++++++++++++++++++++++++---
drivers/net/mlx4/mlx4_rxtx.h | 2 +
4 files changed, 80 insertions(+), 13 deletions(-)
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 3602f0ad8..c80eab5a8 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -766,13 +766,10 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_offload_capa = 0;
info->tx_offload_capa = mlx4_get_tx_port_offloads(priv);
- if (priv->hw_csum) {
- info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
- }
+ info->rx_queue_offload_capa = mlx4_get_rx_queue_offloads(priv);
+ info->rx_offload_capa = (mlx4_get_rx_port_offloads(priv) |
+ info->rx_queue_offload_capa);
if (mlx4_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 69025da42..96a6a6fa7 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -1232,7 +1232,7 @@ mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
* - MAC flow rules are generated from @p dev->data->mac_addrs
* (@p priv->mac array).
* - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
+ * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
* is enabled and VLAN filters are configured.
*
* @param priv
@@ -1300,7 +1300,8 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
};
struct ether_addr *rule_mac = ð_spec.dst;
rte_be16_t *rule_vlan =
- priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
+ (priv->dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER) &&
!priv->dev->data->promiscuous ?
&vlan_spec.tci :
NULL;
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 53313c56f..98ab1d266 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -663,6 +663,63 @@ mlx4_rxq_detach(struct rxq *rxq)
}
/**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_get_rx_queue_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_SCATTER;
+
+ if (priv->hw_csum)
+ offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ return offloads;
+}
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx4_get_rx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ (void)priv;
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param requested
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * Nonzero when configuration is valid.
+ */
+static int
+mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
+{
+ uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported = mlx4_get_rx_port_offloads(priv);
+
+ return !((mandatory ^ requested) & supported);
+}
+
+/**
* DPDK callback to configure a Rx queue.
*
* @param dev
@@ -707,6 +764,16 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
+ if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
+ rte_errno = ENOTSUP;
+ ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ (mlx4_get_rx_port_offloads(priv) |
+ mlx4_get_rx_queue_offloads(priv)));
+ return -rte_errno;
+ }
if (idx >= dev->data->nb_rx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -746,10 +813,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.elts_n = rte_log2_u32(desc),
.elts = elts,
/* Toggle Rx checksum offload if hardware supports it. */
- .csum = (priv->hw_csum &&
- dev->data->dev_conf.rxmode.hw_ip_checksum),
- .csum_l2tun = (priv->hw_csum_l2tun &&
- dev->data->dev_conf.rxmode.hw_ip_checksum),
+ .csum = priv->hw_csum &&
+ (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ .csum_l2tun = priv->hw_csum_l2tun &&
+ (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
.l2tun_offload = priv->hw_csum_l2tun,
.stats = {
.idx = idx,
@@ -761,7 +828,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
;
- } else if (dev->data->dev_conf.rxmode.enable_scatter) {
+ } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index bff5ae43d..900cab372 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -161,6 +161,8 @@ int mlx4_rss_attach(struct mlx4_rss *rss);
void mlx4_rss_detach(struct mlx4_rss *rss);
int mlx4_rxq_attach(struct rxq *rxq);
void mlx4_rxq_detach(struct rxq *rxq);
+uint64_t mlx4_get_rx_port_offloads(struct priv *priv);
+uint64_t mlx4_get_rx_queue_offloads(struct priv *priv);
int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf,
--
2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev offloads API
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
` (6 preceding siblings ...)
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 7/7] net/mlx4: convert to new Rx " Shahaf Shuler
@ 2018-01-10 15:24 ` Shahaf Shuler
7 siblings, 0 replies; 29+ messages in thread
From: Shahaf Shuler @ 2018-01-10 15:24 UTC (permalink / raw)
To: Shahaf Shuler, Nélio Laranjeiro, Adrien Mazarguil, Yongseok Koh; +Cc: dev
Wednesday, January 10, 2018 11:17 AM, Shahaf Shuler:
> This series is to convert mlx4 and mlx5 PMDs to the new offloads API [1].
>
> On v3:
> - address almost all of Adrien's comments on mlx4.
>
> On v2:
> - New design to hold PMD specific args and combine
> them with offloads requested.
> - Fix missing IPV4 checksum flag on vector function selection.
> - Verify Txq flags ignore bit before checking for valid offloads
> configuration.
> - Removed strict offloads check from mlx4.
>
Series applied to next-net-mlx, thanks.
> [1]
> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fdpd
> k.org%2Fml%2Farchives%2Fdev%2F2017-
> October%2F077329.html&data=02%7C01%7Cshahafs%40mellanox.com%7C9f
> 60ab680af948a3c91808d5580af81e%7Ca652971c7d2e4d9ba6a4d149256f461b
> %7C0%7C0%7C636511726496258941&sdata=QSJ5m%2Fgv%2B2yH6iYSOv0eE2
> ecsXBrMxWrhChF0vgQXJM%3D&reserved=0
>
> Nelio Laranjeiro (1):
> net/mlx5: rename counter set in configuration
>
> Shahaf Shuler (6):
> net/mlx5: change pkt burst select function prototype
> net/mlx5: add device configuration structure
> net/mlx5: convert to new Tx offloads API
> net/mlx5: convert to new Rx offloads API
> net/mlx4: convert to new Tx offloads API
> net/mlx4: convert to new Rx offloads API
>
> doc/guides/nics/mlx5.rst | 15 +-
> drivers/net/mlx4/mlx4_ethdev.c | 16 +--
> drivers/net/mlx4/mlx4_flow.c | 5 +-
> drivers/net/mlx4/mlx4_rxq.c | 77 ++++++++++-
> drivers/net/mlx4/mlx4_rxtx.h | 3 +
> drivers/net/mlx4/mlx4_txq.c | 69 ++++++++-
> drivers/net/mlx5/mlx5.c | 190 +++++++++----------------
> drivers/net/mlx5/mlx5.h | 57 +++++---
> drivers/net/mlx5/mlx5_ethdev.c | 113 ++++++++-------
> drivers/net/mlx5/mlx5_flow.c | 2 +-
> drivers/net/mlx5/mlx5_rxq.c | 124 ++++++++++++++---
> drivers/net/mlx5/mlx5_rxtx.c | 6 +-
> drivers/net/mlx5/mlx5_rxtx.h | 10 +-
> drivers/net/mlx5/mlx5_rxtx_vec.c | 40 +++---
> drivers/net/mlx5/mlx5_rxtx_vec.h | 12 ++
> drivers/net/mlx5/mlx5_trigger.c | 4 +-
> drivers/net/mlx5/mlx5_txq.c | 254 +++++++++++++++++++++++++-------
> --
> drivers/net/mlx5/mlx5_vlan.c | 7 +-
> 18 files changed, 661 insertions(+), 343 deletions(-)
>
> --
> 2.12.0
^ permalink raw reply [flat|nested] 29+ messages in thread
end of thread, other threads:[~2018-01-10 15:24 UTC | newest]
Thread overview: 29+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-11-23 12:02 [dpdk-dev] [PATCH 0/6] convert mlx PMDs to new ethdev offloads API Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 1/6] net/mlx5: store PMD args in private structure Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 2/6] net/mlx5: convert to new Tx offloads API Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 3/6] net/mlx5: convert to new Rx " Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 4/6] net/mlx5: fix VLAN configuration after port stop Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 5/6] net/mlx4: convert to new Tx offloads API Shahaf Shuler
2017-11-23 12:02 ` [dpdk-dev] [PATCH 6/6] net/mlx4: convert to new Rx " Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 1/7] net/mlx5: change pkt burst select function prototype Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 2/7] net/mlx5: add device configuration structure Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 3/7] net/mlx5: rename counter set in configuration Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 4/7] net/mlx5: convert to new Tx offloads API Shahaf Shuler
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 5/7] net/mlx5: convert to new Rx " Shahaf Shuler
2018-01-04 10:12 ` Nelio Laranjeiro
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 6/7] net/mlx4: convert to new Tx " Shahaf Shuler
2018-01-03 17:29 ` Adrien Mazarguil
2018-01-04 11:55 ` Shahaf Shuler
2018-01-09 10:35 ` Nelio Laranjeiro
2018-01-03 7:16 ` [dpdk-dev] [PATCH v2 7/7] net/mlx4: convert to new Rx " Shahaf Shuler
2018-01-03 17:29 ` Adrien Mazarguil
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 1/7] net/mlx5: change pkt burst select function prototype Shahaf Shuler
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 2/7] net/mlx5: add device configuration structure Shahaf Shuler
2018-01-10 9:16 ` [dpdk-dev] [PATCH v3 3/7] net/mlx5: rename counter set in configuration Shahaf Shuler
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 4/7] net/mlx5: convert to new Tx offloads API Shahaf Shuler
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 5/7] net/mlx5: convert to new Rx " Shahaf Shuler
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 6/7] net/mlx4: convert to new Tx " Shahaf Shuler
2018-01-10 9:17 ` [dpdk-dev] [PATCH v3 7/7] net/mlx4: convert to new Rx " Shahaf Shuler
2018-01-10 15:24 ` [dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev " Shahaf Shuler
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).