* [dpdk-dev] [DPDK] net/i40e: convert to new Tx offloads API
@ 2018-03-02 9:05 Yanglong Wu
2018-03-27 8:49 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
` (5 more replies)
0 siblings, 6 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-02 9:05 UTC (permalink / raw)
To: dev; +Cc: wei.dai, beilei.xing, wenzhuo.lu, Yanglong Wu
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev_vf.c | 1 +
drivers/net/i40e/i40e_rxtx.c | 22 ++++++++++++++++++++++
drivers/net/i40e/i40e_rxtx.h | 1 +
4 files changed, 25 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 3cfc6a5b6..3fb99764a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3191,6 +3191,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index d4f9bde1a..a028006df 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2202,6 +2202,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 6492368ae..3024691fb 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1996,6 +1996,18 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2013,6 +2025,16 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ dev_info.tx_offload_capa);
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index cb5f8c714..10feec4a2 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -149,6 +149,7 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx offloads API
2018-03-02 9:05 [dpdk-dev] [DPDK] net/i40e: convert to new Tx offloads API Yanglong Wu
@ 2018-03-27 8:49 ` Yanglong Wu
2018-03-27 8:51 ` [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx " Yanglong Wu
` (4 subsequent siblings)
5 siblings, 0 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-27 8:49 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Rx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 27 +++++++++++++++++++--------
drivers/net/i40e/i40e_ethdev_vf.c | 21 +++++++++++++--------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 33 ++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 1 +
5 files changed, 65 insertions(+), 20 deletions(-)
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dc473d701..06f11dd23 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3219,6 +3219,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3226,7 +3227,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3253,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3372,7 +3377,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3420,9 +3426,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3430,14 +3438,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3684,6 +3692,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3702,7 +3711,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11354,9 +11363,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index c4dae4262..cb338def9 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1527,7 +1527,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1561,7 +1561,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1718,7 +1718,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1738,7 +1738,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -2174,6 +2174,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2181,7 +2182,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2204,6 +2207,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2634,10 +2638,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b88baa9cd..aa658eb14 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1939,7 +1939,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a61..60cbb4d9c 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1692,6 +1692,20 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+ if ((requested & dev_info.rx_queue_offload_capa) != requested)
+ return 0;
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2469,7 +2495,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2773,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd79233..cb5f8c714 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx offloads API
2018-03-02 9:05 [dpdk-dev] [DPDK] net/i40e: convert to new Tx offloads API Yanglong Wu
2018-03-27 8:49 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
@ 2018-03-27 8:51 ` Yanglong Wu
2018-03-28 2:04 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
` (3 subsequent siblings)
5 siblings, 0 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-27 8:51 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev_vf.c | 1 +
drivers/net/i40e/i40e_rxtx.c | 24 ++++++++++++++++++++++++
drivers/net/i40e/i40e_rxtx.h | 1 +
4 files changed, 27 insertions(+)
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 06f11dd23..5f520d2bc 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3231,6 +3231,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index cb338def9..d04cd28ae 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2185,6 +2185,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 60cbb4d9c..a68be4ab1 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1998,6 +1998,20 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
+ if ((requested & dev_info.tx_queue_offload_capa) != requested)
+ return 0;
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2015,6 +2029,16 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ dev_info.tx_offload_capa);
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index cb5f8c714..10feec4a2 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -149,6 +149,7 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx offloads API
2018-03-02 9:05 [dpdk-dev] [DPDK] net/i40e: convert to new Tx offloads API Yanglong Wu
2018-03-27 8:49 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
2018-03-27 8:51 ` [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx " Yanglong Wu
@ 2018-03-28 2:04 ` Yanglong Wu
2018-03-29 9:14 ` [dpdk-dev] [PATCH v3 " Yanglong Wu
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 0/2] net/i40e: convert to new Rx/Tx " Yanglong Wu
2018-03-28 2:04 ` [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx " Yanglong Wu
` (2 subsequent siblings)
5 siblings, 2 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-28 2:04 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Rx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 27 +++++++++++++++++++--------
drivers/net/i40e/i40e_ethdev_vf.c | 21 +++++++++++++--------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 33 ++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 1 +
5 files changed, 65 insertions(+), 20 deletions(-)
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dc473d701..06f11dd23 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3219,6 +3219,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3226,7 +3227,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3253,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3372,7 +3377,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3420,9 +3426,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3430,14 +3438,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3684,6 +3692,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3702,7 +3711,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11354,9 +11363,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index c4dae4262..cb338def9 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1527,7 +1527,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1561,7 +1561,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1718,7 +1718,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1738,7 +1738,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -2174,6 +2174,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2181,7 +2182,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2204,6 +2207,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2634,10 +2638,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b88baa9cd..aa658eb14 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1939,7 +1939,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a61..60cbb4d9c 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1692,6 +1692,20 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+ if ((requested & dev_info.rx_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2469,7 +2495,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2773,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd79233..cb5f8c714 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx offloads API
2018-03-02 9:05 [dpdk-dev] [DPDK] net/i40e: convert to new Tx offloads API Yanglong Wu
` (2 preceding siblings ...)
2018-03-28 2:04 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
@ 2018-03-28 2:04 ` Yanglong Wu
2018-03-29 9:17 ` [dpdk-dev] [PATCH v3 " Yanglong Wu
2018-03-29 5:23 ` [dpdk-dev] [PATCH v2 " Yanglong Wu
2018-03-29 5:26 ` Yanglong Wu
5 siblings, 1 reply; 22+ messages in thread
From: Yanglong Wu @ 2018-03-28 2:04 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev_vf.c | 1 +
drivers/net/i40e/i40e_rxtx.c | 24 ++++++++++++++++++++++++
drivers/net/i40e/i40e_rxtx.h | 1 +
4 files changed, 27 insertions(+)
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 06f11dd23..5f520d2bc 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3231,6 +3231,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index cb338def9..d04cd28ae 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2185,6 +2185,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 60cbb4d9c..a68be4ab1 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1998,6 +1998,20 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
+ if ((requested & dev_info.tx_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2015,6 +2029,16 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ dev_info.tx_offload_capa);
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index cb5f8c714..10feec4a2 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -149,6 +149,7 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx offloads API
2018-03-02 9:05 [dpdk-dev] [DPDK] net/i40e: convert to new Tx offloads API Yanglong Wu
` (3 preceding siblings ...)
2018-03-28 2:04 ` [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx " Yanglong Wu
@ 2018-03-29 5:23 ` Yanglong Wu
2018-03-29 5:26 ` Yanglong Wu
5 siblings, 0 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-29 5:23 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev_vf.c | 1 +
drivers/net/i40e/i40e_rxtx.c | 24 ++++++++++++++++++++++++
drivers/net/i40e/i40e_rxtx.h | 1 +
4 files changed, 27 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 06f11dd23..5f520d2bc 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3231,6 +3231,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index cb338def9..d04cd28ae 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2185,6 +2185,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 60cbb4d9c..d96810a9b 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1998,6 +1998,20 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
+ if ((requested & dev_info.tx_queue_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2015,6 +2029,16 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ dev_info.tx_offload_capa);}
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index cb5f8c714..10feec4a2 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -149,6 +149,7 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx offloads API
2018-03-02 9:05 [dpdk-dev] [DPDK] net/i40e: convert to new Tx offloads API Yanglong Wu
` (4 preceding siblings ...)
2018-03-29 5:23 ` [dpdk-dev] [PATCH v2 " Yanglong Wu
@ 2018-03-29 5:26 ` Yanglong Wu
2018-03-29 5:51 ` Zhang, Qi Z
5 siblings, 1 reply; 22+ messages in thread
From: Yanglong Wu @ 2018-03-29 5:26 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev_vf.c | 1 +
drivers/net/i40e/i40e_rxtx.c | 24 ++++++++++++++++++++++++
drivers/net/i40e/i40e_rxtx.h | 1 +
4 files changed, 27 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 06f11dd23..5f520d2bc 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3231,6 +3231,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index cb338def9..d04cd28ae 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2185,6 +2185,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 60cbb4d9c..d96810a9b 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1998,6 +1998,20 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
+ if ((requested & dev_info.tx_queue_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2015,6 +2029,16 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ dev_info.tx_offload_capa); }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index cb5f8c714..10feec4a2 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -149,6 +149,7 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx offloads API
2018-03-29 5:26 ` Yanglong Wu
@ 2018-03-29 5:51 ` Zhang, Qi Z
0 siblings, 0 replies; 22+ messages in thread
From: Zhang, Qi Z @ 2018-03-29 5:51 UTC (permalink / raw)
To: Wu, Yanglong, dev; +Cc: Dai, Wei, Xing, Beilei
> -----Original Message-----
> From: Wu, Yanglong
> Sent: Thursday, March 29, 2018 1:27 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Xing,
> Beilei <beilei.xing@intel.com>; Wu, Yanglong <yanglong.wu@intel.com>
> Subject: [PATCH v2 2/2] net/i40e: convert to new Tx offloads API
I think this is v3, not v2, please keep this correct and always update change log
>
>
> +static int
> +i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t
> +requested) {
> + struct rte_eth_dev_info dev_info;
> + uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
> + uint64_t supported; /* All per port offloads */
> +
> + dev->dev_ops->dev_infos_get(dev, &dev_info);
> + supported = dev_info.tx_offload_capa ^
> dev_info.tx_queue_offload_capa;
> + if ((requested & dev_info.tx_queue_offload_capa) != requested)
> + return 0; /* requested range check */
I think the fix for rx should also be applied on tx, right?
Regards
Qi
> + return !((mandatory ^ requested) & supported); }
> +
> int
> i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
> @@ -2015,6 +2029,16 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev
> *dev,
> uint16_t tx_rs_thresh, tx_free_thresh;
> uint16_t reg_idx, i, base, bsf, tc_mapping;
> int q_offset;
> + struct rte_eth_dev_info dev_info;
> +
> + if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
> + dev->dev_ops->dev_infos_get(dev, &dev_info);
> + PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
> + " don't match port offloads 0x%" PRIx64
> + " or supported offloads 0x%" PRIx64,
> + (void *)dev, tx_conf->offloads,
> + dev->data->dev_conf.txmode.offloads,
> + dev_info.tx_offload_capa); }
>
> if (hw->mac.type == I40E_MAC_VF || hw->mac.type ==
> I40E_MAC_X722_VF) {
> vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
> diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index
> cb5f8c714..10feec4a2 100644
> --- a/drivers/net/i40e/i40e_rxtx.h
> +++ b/drivers/net/i40e/i40e_rxtx.h
> @@ -149,6 +149,7 @@ struct i40e_tx_queue {
> bool q_set; /**< indicate if tx queue has been configured */
> bool tx_deferred_start; /**< don't start this queue in dev start */
> uint8_t dcb_tc; /**< Traffic class of tx queue */
> + uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
> };
>
> /** Offload features */
> --
> 2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v3 1/2] net/i40e: convert to new Rx offloads API
2018-03-28 2:04 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
@ 2018-03-29 9:14 ` Yanglong Wu
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 0/2] net/i40e: convert to new Rx/Tx " Yanglong Wu
1 sibling, 0 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-29 9:14 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Rx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
v3:
fix error
---
drivers/net/i40e/i40e_ethdev.c | 27 +++++++++++++++++++--------
drivers/net/i40e/i40e_ethdev_vf.c | 21 +++++++++++++--------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 33 ++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 1 +
5 files changed, 65 insertions(+), 20 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dc473d701..06f11dd23 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3219,6 +3219,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3226,7 +3227,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3253,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3372,7 +3377,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3420,9 +3426,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3430,14 +3438,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3684,6 +3692,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3702,7 +3711,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11354,9 +11363,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index c4dae4262..cb338def9 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1527,7 +1527,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1561,7 +1561,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1718,7 +1718,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1738,7 +1738,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -2174,6 +2174,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2181,7 +2182,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2204,6 +2207,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2634,10 +2638,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b88baa9cd..aa658eb14 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1939,7 +1939,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a61..4fdef69c1 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1692,6 +1692,20 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+ if ((requested & dev_info.rx_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2469,7 +2495,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2773,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd79233..cb5f8c714 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v3 2/2] net/i40e: convert to new Tx offloads API
2018-03-28 2:04 ` [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx " Yanglong Wu
@ 2018-03-29 9:17 ` Yanglong Wu
0 siblings, 0 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-29 9:17 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
v3:
fix error
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev_vf.c | 1 +
drivers/net/i40e/i40e_rxtx.c | 24 ++++++++++++++++++++++++
drivers/net/i40e/i40e_rxtx.h | 1 +
4 files changed, 27 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 06f11dd23..5f520d2bc 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3231,6 +3231,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index cb338def9..d04cd28ae 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2185,6 +2185,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 4fdef69c1..b72b81350 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1998,6 +1998,20 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
+ if ((requested & dev_info.tx_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2015,6 +2029,16 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ dev_info.tx_offload_capa); }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index cb5f8c714..10feec4a2 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -149,6 +149,7 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v3 0/2] net/i40e: convert to new Rx/Tx offloads API
2018-03-28 2:04 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
2018-03-29 9:14 ` [dpdk-dev] [PATCH v3 " Yanglong Wu
@ 2018-03-30 5:39 ` Yanglong Wu
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 1/2] net/i40e: convert to new Rx " Yanglong Wu
` (2 more replies)
1 sibling, 3 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-30 5:39 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
This patch support new offloads API in i40e PF and VF.
Yanglong Wu (2):
net/i40e: convert to new Rx offloads API
net/i40e: convert to new Tx offloads API
drivers/net/i40e/i40e_ethdev.c | 28 +++++++++++++------
drivers/net/i40e/i40e_ethdev_vf.c | 22 +++++++++------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 57 ++++++++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 2 ++
5 files changed, 92 insertions(+), 20 deletions(-)
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v3 1/2] net/i40e: convert to new Rx offloads API
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 0/2] net/i40e: convert to new Rx/Tx " Yanglong Wu
@ 2018-03-30 5:39 ` Yanglong Wu
2018-03-30 7:25 ` Zhang, Qi Z
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 2/2] net/i40e: convert to new Tx " Yanglong Wu
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx " Yanglong Wu
2 siblings, 1 reply; 22+ messages in thread
From: Yanglong Wu @ 2018-03-30 5:39 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Rx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
v3:
fix error
---
---
drivers/net/i40e/i40e_ethdev.c | 27 +++++++++++++++++++--------
drivers/net/i40e/i40e_ethdev_vf.c | 21 +++++++++++++--------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 33 ++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 1 +
5 files changed, 65 insertions(+), 20 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dc473d701..06f11dd23 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3219,6 +3219,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3226,7 +3227,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3253,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3372,7 +3377,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3420,9 +3426,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3430,14 +3438,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3684,6 +3692,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3702,7 +3711,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11354,9 +11363,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index c4dae4262..cb338def9 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1527,7 +1527,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1561,7 +1561,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1718,7 +1718,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1738,7 +1738,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -2174,6 +2174,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2181,7 +2182,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2204,6 +2207,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2634,10 +2638,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b88baa9cd..aa658eb14 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1939,7 +1939,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a61..4fdef69c1 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1692,6 +1692,20 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+ if ((requested & dev_info.rx_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2469,7 +2495,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2773,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd79233..cb5f8c714 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v3 2/2] net/i40e: convert to new Tx offloads API
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 0/2] net/i40e: convert to new Rx/Tx " Yanglong Wu
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 1/2] net/i40e: convert to new Rx " Yanglong Wu
@ 2018-03-30 5:39 ` Yanglong Wu
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx " Yanglong Wu
2 siblings, 0 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-30 5:39 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
v3:
fix error
---
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev_vf.c | 1 +
drivers/net/i40e/i40e_rxtx.c | 24 ++++++++++++++++++++++++
drivers/net/i40e/i40e_rxtx.h | 1 +
4 files changed, 27 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 06f11dd23..5f520d2bc 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3231,6 +3231,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index cb338def9..d04cd28ae 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2185,6 +2185,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 4fdef69c1..b72b81350 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1998,6 +1998,20 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
+ if ((requested & dev_info.tx_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2015,6 +2029,16 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ dev_info.tx_offload_capa); }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index cb5f8c714..10feec4a2 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -149,6 +149,7 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/i40e: convert to new Rx offloads API
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 1/2] net/i40e: convert to new Rx " Yanglong Wu
@ 2018-03-30 7:25 ` Zhang, Qi Z
2018-03-30 8:03 ` Wu, Yanglong
0 siblings, 1 reply; 22+ messages in thread
From: Zhang, Qi Z @ 2018-03-30 7:25 UTC (permalink / raw)
To: Wu, Yanglong, dev; +Cc: Dai, Wei, Xing, Beilei
Hi Yanglong
>
> if (frame_size > ETHER_MAX_LEN)
> - dev_data->dev_conf.rxmode.jumbo_frame = 1;
> + dev_data->dev_conf.rxmode.offloads |=
> + DEV_RX_OFFLOAD_JUMBO_FRAME;
> else
> - dev_data->dev_conf.rxmode.jumbo_frame = 0;
> + dev_data->dev_conf.rxmode.jumbo_frame &=
> + ~DEV_RX_OFFLOAD_JUMBO_FRAME;
Typo here, should be
dev_conf.rxmode.offloads &=
>
> dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
>
> diff --git a/drivers/net/i40e/i40e_ethdev_vf.c
> b/drivers/net/i40e/i40e_ethdev_vf.c
> index c4dae4262..cb338def9 100644
> --- a/drivers/net/i40e/i40e_ethdev_vf.c
> +++ b/drivers/net/i40e/i40e_ethdev_vf.c
>
> if (frame_size > ETHER_MAX_LEN)
> - dev_data->dev_conf.rxmode.jumbo_frame = 1;
> + dev_data->dev_conf.rxmode.offloads |=
> + DEV_RX_OFFLOAD_JUMBO_FRAME;
> else
> - dev_data->dev_conf.rxmode.jumbo_frame = 0;
> -
> + dev_data->dev_conf.rxmode.jumbo_frame &=
> + ~DEV_RX_OFFLOAD_JUMBO_FRAME;
Same as above
Regards
Qi
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/i40e: convert to new Rx offloads API
2018-03-30 7:25 ` Zhang, Qi Z
@ 2018-03-30 8:03 ` Wu, Yanglong
0 siblings, 0 replies; 22+ messages in thread
From: Wu, Yanglong @ 2018-03-30 8:03 UTC (permalink / raw)
To: Zhang, Qi Z, dev; +Cc: Dai, Wei, Xing, Beilei
Hi,
-----Original Message-----
From: Zhang, Qi Z
Sent: Friday, March 30, 2018 3:25 PM
To: Wu, Yanglong <yanglong.wu@intel.com>; dev@dpdk.org
Cc: Dai, Wei <wei.dai@intel.com>; Xing, Beilei <beilei.xing@intel.com>
Subject: RE: [PATCH v3 1/2] net/i40e: convert to new Rx offloads API
Hi Yanglong
>
> if (frame_size > ETHER_MAX_LEN)
> - dev_data->dev_conf.rxmode.jumbo_frame = 1;
> + dev_data->dev_conf.rxmode.offloads |=
> + DEV_RX_OFFLOAD_JUMBO_FRAME;
> else
> - dev_data->dev_conf.rxmode.jumbo_frame = 0;
> + dev_data->dev_conf.rxmode.jumbo_frame &=
> + ~DEV_RX_OFFLOAD_JUMBO_FRAME;
Typo here, should be
dev_conf.rxmode.offloads &=
thx I will fix it!
>
> dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
>
> diff --git a/drivers/net/i40e/i40e_ethdev_vf.c
> b/drivers/net/i40e/i40e_ethdev_vf.c
> index c4dae4262..cb338def9 100644
> --- a/drivers/net/i40e/i40e_ethdev_vf.c
> +++ b/drivers/net/i40e/i40e_ethdev_vf.c
>
> if (frame_size > ETHER_MAX_LEN)
> - dev_data->dev_conf.rxmode.jumbo_frame = 1;
> + dev_data->dev_conf.rxmode.offloads |=
> + DEV_RX_OFFLOAD_JUMBO_FRAME;
> else
> - dev_data->dev_conf.rxmode.jumbo_frame = 0;
> -
> + dev_data->dev_conf.rxmode.jumbo_frame &=
> + ~DEV_RX_OFFLOAD_JUMBO_FRAME;
Same as above
Regards
Qi
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx offloads API
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 0/2] net/i40e: convert to new Rx/Tx " Yanglong Wu
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 1/2] net/i40e: convert to new Rx " Yanglong Wu
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 2/2] net/i40e: convert to new Tx " Yanglong Wu
@ 2018-03-30 8:22 ` Yanglong Wu
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 1/2] net/i40e: convert to new Rx " Yanglong Wu
` (2 more replies)
2 siblings, 3 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-30 8:22 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
This patch support new offloads API in i40e PF and VF.
Yanglong Wu (2):
net/i40e: convert to new Rx offloads API
net/i40e: convert to new Tx offloads API
drivers/net/i40e/i40e_ethdev.c | 28 +++++++++++++------
drivers/net/i40e/i40e_ethdev_vf.c | 22 +++++++++------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 57 ++++++++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 2 ++
5 files changed, 92 insertions(+), 20 deletions(-)
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v4 1/2] net/i40e: convert to new Rx offloads API
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx " Yanglong Wu
@ 2018-03-30 8:22 ` Yanglong Wu
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 2/2] net/i40e: convert to new Tx " Yanglong Wu
2018-03-30 10:33 ` [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx " Zhang, Qi Z
2 siblings, 0 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-30 8:22 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Rx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
v3:
fix error
---
v4:
fix error
---
drivers/net/i40e/i40e_ethdev.c | 27 +++++++++++++++++++--------
drivers/net/i40e/i40e_ethdev_vf.c | 21 +++++++++++++--------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 33 ++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 1 +
5 files changed, 65 insertions(+), 20 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dc473d701..e06331d4d 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3219,6 +3219,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3226,7 +3227,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3253,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3372,7 +3377,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3420,9 +3426,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3430,14 +3438,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3684,6 +3692,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3702,7 +3711,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11354,9 +11363,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index c4dae4262..8e131d850 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1527,7 +1527,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1561,7 +1561,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1718,7 +1718,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1738,7 +1738,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -2174,6 +2174,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2181,7 +2182,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2204,6 +2207,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2634,10 +2638,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b88baa9cd..aa658eb14 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1939,7 +1939,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a61..4fdef69c1 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1692,6 +1692,20 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+ if ((requested & dev_info.rx_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2469,7 +2495,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2773,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd79233..cb5f8c714 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v4 2/2] net/i40e: convert to new Tx offloads API
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx " Yanglong Wu
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 1/2] net/i40e: convert to new Rx " Yanglong Wu
@ 2018-03-30 8:22 ` Yanglong Wu
2018-03-30 10:33 ` [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx " Zhang, Qi Z
2 siblings, 0 replies; 22+ messages in thread
From: Yanglong Wu @ 2018-03-30 8:22 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
v3:
fix error
---
v4:
fix error
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev_vf.c | 1 +
drivers/net/i40e/i40e_rxtx.c | 24 ++++++++++++++++++++++++
drivers/net/i40e/i40e_rxtx.h | 1 +
4 files changed, 27 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index e06331d4d..1c9b09e85 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3231,6 +3231,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 8e131d850..3faa746d0 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2185,6 +2185,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 4fdef69c1..b72b81350 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1998,6 +1998,20 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
+ if ((requested & dev_info.tx_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2015,6 +2029,16 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ dev_info.tx_offload_capa); }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index cb5f8c714..10feec4a2 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -149,6 +149,7 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx offloads API
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx " Yanglong Wu
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 1/2] net/i40e: convert to new Rx " Yanglong Wu
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 2/2] net/i40e: convert to new Tx " Yanglong Wu
@ 2018-03-30 10:33 ` Zhang, Qi Z
2018-03-30 15:18 ` Zhang, Helin
2 siblings, 1 reply; 22+ messages in thread
From: Zhang, Qi Z @ 2018-03-30 10:33 UTC (permalink / raw)
To: Wu, Yanglong, dev; +Cc: Dai, Wei, Xing, Beilei
> -----Original Message-----
> From: Wu, Yanglong
> Sent: Friday, March 30, 2018 4:22 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Xing,
> Beilei <beilei.xing@intel.com>; Wu, Yanglong <yanglong.wu@intel.com>
> Subject: [PATCH v4 0/2] convert to new Rx/Tx offloads API
>
> This patch support new offloads API in i40e PF and VF.
>
> Yanglong Wu (2):
> net/i40e: convert to new Rx offloads API
> net/i40e: convert to new Tx offloads API
>
> drivers/net/i40e/i40e_ethdev.c | 28 +++++++++++++------
> drivers/net/i40e/i40e_ethdev_vf.c | 22 +++++++++------
> drivers/net/i40e/i40e_flow.c | 3 ++-
> drivers/net/i40e/i40e_rxtx.c | 57
> ++++++++++++++++++++++++++++++++++++---
> drivers/net/i40e/i40e_rxtx.h | 2 ++
> 5 files changed, 92 insertions(+), 20 deletions(-)
>
> --
> 2.11.0
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx offloads API
2018-03-30 10:33 ` [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx " Zhang, Qi Z
@ 2018-03-30 15:18 ` Zhang, Helin
0 siblings, 0 replies; 22+ messages in thread
From: Zhang, Helin @ 2018-03-30 15:18 UTC (permalink / raw)
To: Zhang, Qi Z, Wu, Yanglong, dev; +Cc: Dai, Wei, Xing, Beilei
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhang, Qi Z
> Sent: Friday, March 30, 2018 6:33 PM
> To: Wu, Yanglong; dev@dpdk.org
> Cc: Dai, Wei; Xing, Beilei
> Subject: Re: [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx offloads API
>
>
>
> > -----Original Message-----
> > From: Wu, Yanglong
> > Sent: Friday, March 30, 2018 4:22 PM
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>;
> > Xing, Beilei <beilei.xing@intel.com>; Wu, Yanglong
> > <yanglong.wu@intel.com>
> > Subject: [PATCH v4 0/2] convert to new Rx/Tx offloads API
> >
> > This patch support new offloads API in i40e PF and VF.
> >
> > Yanglong Wu (2):
> > net/i40e: convert to new Rx offloads API
> > net/i40e: convert to new Tx offloads API
> >
> > drivers/net/i40e/i40e_ethdev.c | 28 +++++++++++++------
> > drivers/net/i40e/i40e_ethdev_vf.c | 22 +++++++++------
> > drivers/net/i40e/i40e_flow.c | 3 ++-
> > drivers/net/i40e/i40e_rxtx.c | 57
> > ++++++++++++++++++++++++++++++++++++---
> > drivers/net/i40e/i40e_rxtx.h | 2 ++
> > 5 files changed, 92 insertions(+), 20 deletions(-)
> >
> > --
> > 2.11.0
>
> Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Series applied to dpdk-next-net-intel, thanks!
/Helin
>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx offloads API
2018-03-27 8:31 ` [dpdk-dev] [PATCH v2 1/2] " Yanglong Wu
@ 2018-03-27 12:41 ` Zhang, Qi Z
0 siblings, 0 replies; 22+ messages in thread
From: Zhang, Qi Z @ 2018-03-27 12:41 UTC (permalink / raw)
To: Wu, Yanglong, dev; +Cc: Dai, Wei, Xing, Beilei
> +static int
> +i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t
> +requested) {
> + struct rte_eth_dev_info dev_info;
> + uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
> + uint64_t supported; /* All per port offloads */
> +
> + dev->dev_ops->dev_infos_get(dev, &dev_info);
> + supported = dev_info.rx_offload_capa ^
> dev_info.rx_queue_offload_capa;
> + if ((requested & dev_info.rx_queue_offload_capa) != requested)
Should be dev_info.rx_offload_capa here but not dev_info.rx_queue_offload_capa.
> + return 0;
> + return !((mandatory ^ requested) & supported); }
> +
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx offloads API
2018-03-02 8:20 [dpdk-dev] [DPDK] net/i40e: convert to new Rx " Yanglong Wu
@ 2018-03-27 8:31 ` Yanglong Wu
2018-03-27 12:41 ` Zhang, Qi Z
0 siblings, 1 reply; 22+ messages in thread
From: Yanglong Wu @ 2018-03-27 8:31 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Rx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 27 +++++++++++++++++++--------
drivers/net/i40e/i40e_ethdev_vf.c | 21 +++++++++++++--------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 33 ++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 1 +
5 files changed, 65 insertions(+), 20 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dc473d701..06f11dd23 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3219,6 +3219,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3226,7 +3227,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3253,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3372,7 +3377,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3420,9 +3426,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3430,14 +3438,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3684,6 +3692,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3702,7 +3711,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11354,9 +11363,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index c4dae4262..cb338def9 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1527,7 +1527,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1561,7 +1561,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1718,7 +1718,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1738,7 +1738,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -2174,6 +2174,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2181,7 +2182,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2204,6 +2207,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2634,10 +2638,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b88baa9cd..aa658eb14 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1939,7 +1939,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a61..60cbb4d9c 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1692,6 +1692,20 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+ if ((requested & dev_info.rx_queue_offload_capa) != requested)
+ return 0;
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2469,7 +2495,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2773,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd79233..cb5f8c714 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
--
2.11.0
^ permalink raw reply [flat|nested] 22+ messages in thread
end of thread, other threads:[~2018-03-30 15:18 UTC | newest]
Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-03-02 9:05 [dpdk-dev] [DPDK] net/i40e: convert to new Tx offloads API Yanglong Wu
2018-03-27 8:49 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
2018-03-27 8:51 ` [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx " Yanglong Wu
2018-03-28 2:04 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
2018-03-29 9:14 ` [dpdk-dev] [PATCH v3 " Yanglong Wu
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 0/2] net/i40e: convert to new Rx/Tx " Yanglong Wu
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 1/2] net/i40e: convert to new Rx " Yanglong Wu
2018-03-30 7:25 ` Zhang, Qi Z
2018-03-30 8:03 ` Wu, Yanglong
2018-03-30 5:39 ` [dpdk-dev] [PATCH v3 2/2] net/i40e: convert to new Tx " Yanglong Wu
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx " Yanglong Wu
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 1/2] net/i40e: convert to new Rx " Yanglong Wu
2018-03-30 8:22 ` [dpdk-dev] [PATCH v4 2/2] net/i40e: convert to new Tx " Yanglong Wu
2018-03-30 10:33 ` [dpdk-dev] [PATCH v4 0/2] convert to new Rx/Tx " Zhang, Qi Z
2018-03-30 15:18 ` Zhang, Helin
2018-03-28 2:04 ` [dpdk-dev] [PATCH v2 2/2] net/i40e: convert to new Tx " Yanglong Wu
2018-03-29 9:17 ` [dpdk-dev] [PATCH v3 " Yanglong Wu
2018-03-29 5:23 ` [dpdk-dev] [PATCH v2 " Yanglong Wu
2018-03-29 5:26 ` Yanglong Wu
2018-03-29 5:51 ` Zhang, Qi Z
-- strict thread matches above, loose matches on Subject: below --
2018-03-02 8:20 [dpdk-dev] [DPDK] net/i40e: convert to new Rx " Yanglong Wu
2018-03-27 8:31 ` [dpdk-dev] [PATCH v2 1/2] " Yanglong Wu
2018-03-27 12:41 ` Zhang, Qi Z
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).