* [dpdk-dev] [DPDK] net/i40e: convert to new Rx offloads API
@ 2018-03-02 8:20 Yanglong Wu
2018-03-19 6:12 ` Zhang, Qi Z
2018-03-27 8:31 ` [dpdk-dev] [PATCH v2 1/2] " Yanglong Wu
0 siblings, 2 replies; 6+ messages in thread
From: Yanglong Wu @ 2018-03-02 8:20 UTC (permalink / raw)
To: dev; +Cc: wei.dai, beilei.xing, wenzhuo.lu, Yanglong Wu
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 30 ++++++++++++++++++++++--------
drivers/net/i40e/i40e_ethdev_vf.c | 22 +++++++++++++++-------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 32 ++++++++++++++++++++++++++++----
drivers/net/i40e/i40e_rxtx.h | 1 +
5 files changed, 68 insertions(+), 20 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 508b4171c..3cfc6a5b6 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3176,6 +3176,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3183,7 +3184,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3210,6 +3217,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3329,7 +3337,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3377,9 +3386,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3387,14 +3398,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3641,6 +3652,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3659,7 +3671,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11312,9 +11324,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index fd003fe01..d4f9bde1a 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1541,7 +1541,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1575,7 +1575,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1732,7 +1732,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1752,7 +1752,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -2189,6 +2189,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2196,7 +2197,11 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+ if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2219,6 +2224,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2649,9 +2655,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 16c47cf73..7b9f2bc1a 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1939,7 +1939,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a61..6492368ae 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1692,6 +1692,18 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1724,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1784,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2339,7 +2363,6 @@ i40e_reset_tx_queue(struct i40e_tx_queue *txq)
txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
-
txq->tx_tail = 0;
txq->nb_tx_used = 0;
@@ -2469,7 +2492,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2770,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd79233..cb5f8c714 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
--
2.11.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [DPDK] net/i40e: convert to new Rx offloads API
2018-03-02 8:20 [dpdk-dev] [DPDK] net/i40e: convert to new Rx offloads API Yanglong Wu
@ 2018-03-19 6:12 ` Zhang, Qi Z
2018-03-27 8:31 ` [dpdk-dev] [PATCH v2 1/2] " Yanglong Wu
1 sibling, 0 replies; 6+ messages in thread
From: Zhang, Qi Z @ 2018-03-19 6:12 UTC (permalink / raw)
To: Wu, Yanglong, dev; +Cc: Dai, Wei, Xing, Beilei, Lu, Wenzhuo, Wu, Yanglong
Hi Yanglong:
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Yanglong Wu
> Sent: Friday, March 2, 2018 4:20 PM
> To: dev@dpdk.org
> Cc: Dai, Wei <wei.dai@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Lu,
> Wenzhuo <wenzhuo.lu@intel.com>; Wu, Yanglong <yanglong.wu@intel.com>
> Subject: [dpdk-dev] [DPDK] net/i40e: convert to new Rx offloads API
>
> Ethdev Tx offloads API has changed since:
> commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This
> commit support the new Tx offloads API.
>
> Signed-off-by: Yanglong Wu <yanglong.wu@intel.com<mailto:yanglong.wu@intel.com>>
> ---
> drivers/net/i40e/i40e_ethdev.c | 30 ++++++++++++++++++++++--------
> drivers/net/i40e/i40e_ethdev_vf.c | 22 +++++++++++++++-------
> drivers/net/i40e/i40e_flow.c | 3 ++-
> drivers/net/i40e/i40e_rxtx.c | 32
> ++++++++++++++++++++++++++++----
> drivers/net/i40e/i40e_rxtx.h | 1 +
> 5 files changed, 68 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 508b4171c..3cfc6a5b6 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -3176,6 +3176,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info)
> dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
> dev_info->max_mac_addrs = vsi->max_macaddrs;
> dev_info->max_vfs = pci_dev->max_vfs;
> + dev_info->rx_queue_offload_capa = 0;
> dev_info->rx_offload_capa =
> DEV_RX_OFFLOAD_VLAN_STRIP |
> DEV_RX_OFFLOAD_QINQ_STRIP |
> @@ -3183,7 +3184,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
> DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
> - DEV_RX_OFFLOAD_CRC_STRIP;
> + DEV_RX_OFFLOAD_CRC_STRIP |
> + DEV_RX_OFFLOAD_VLAN_EXTEND |
> + DEV_RX_OFFLOAD_VLAN_FILTER;
> +
> + if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
> + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
If statement is not necessary here since, max_rx_pktlen = I40E_FRAME_SIZE_MAX
> +
> dev_info->tx_offload_capa =
> DEV_TX_OFFLOAD_VLAN_INSERT |
> DEV_TX_OFFLOAD_QINQ_INSERT |
> @@ -3210,6 +3217,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info)
> },
> .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
> .rx_drop_en = 0,
> + .offloads = 0,
> };
>
> dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3329,7
> +3337,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, {
> struct i40e_hw *hw =
> I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> struct i40e_pf *pf =
> I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
> + int qinq = dev->data->dev_conf.rxmode.offloads &
> + DEV_RX_OFFLOAD_VLAN_EXTEND;
> int ret = 0;
>
> if ((vlan_type != ETH_VLAN_TYPE_INNER && @@ -3377,9 +3386,11
> @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) {
> struct i40e_pf *pf =
> I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> struct i40e_vsi *vsi = pf->main_vsi;
> + struct rte_eth_rxmode *rxmode;
>
> + rxmode = &dev->data->dev_conf.rxmode;
> if (mask & ETH_VLAN_FILTER_MASK) {
> - if (dev->data->dev_conf.rxmode.hw_vlan_filter)
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
> i40e_vsi_config_vlan_filter(vsi, TRUE);
> else
> i40e_vsi_config_vlan_filter(vsi, FALSE); @@ -3387,14
> +3398,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
>
> if (mask & ETH_VLAN_STRIP_MASK) {
> /* Enable or disable VLAN stripping */
> - if (dev->data->dev_conf.rxmode.hw_vlan_strip)
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
> i40e_vsi_config_vlan_stripping(vsi, TRUE);
> else
> i40e_vsi_config_vlan_stripping(vsi, FALSE);
> }
>
> if (mask & ETH_VLAN_EXTEND_MASK) {
> - if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
> i40e_vsi_config_double_vlan(vsi, TRUE);
> /* Set global registers with default ethertype. */
> i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, @@
> -3641,6 +3652,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
> struct i40e_pf *pf =
> I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> struct i40e_mac_filter_info mac_filter;
> struct i40e_vsi *vsi;
> + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
> int ret;
>
> /* If VMDQ not enabled or configured, return */ @@ -3659,7 +3671,7
> @@ i40e_macaddr_add(struct rte_eth_dev *dev,
> }
>
> rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
> - if (dev->data->dev_conf.rxmode.hw_vlan_filter)
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
> mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
> else
> mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; @@ -11312,9
> +11324,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
> }
>
> if (frame_size > ETHER_MAX_LEN)
> - dev_data->dev_conf.rxmode.jumbo_frame = 1;
> + dev_data->dev_conf.rxmode.offloads |=
> + DEV_RX_OFFLOAD_JUMBO_FRAME;
> else
> - dev_data->dev_conf.rxmode.jumbo_frame = 0;
> + dev_data->dev_conf.rxmode.jumbo_frame &=
> + ~DEV_RX_OFFLOAD_JUMBO_FRAME;
>
> dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
>
> diff --git a/drivers/net/i40e/i40e_ethdev_vf.c
> b/drivers/net/i40e/i40e_ethdev_vf.c
> index fd003fe01..d4f9bde1a 100644
> --- a/drivers/net/i40e/i40e_ethdev_vf.c
> +++ b/drivers/net/i40e/i40e_ethdev_vf.c
> @@ -1541,7 +1541,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
> /* For non-DPDK PF drivers, VF has no ability to disable HW
> * CRC strip, and is implicitly enabled by the PF.
> */
> - if (!conf->rxmode.hw_strip_crc) {
> + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
> vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
> if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
> (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) { @@
> -1575,7 +1575,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int
> mask)
> /* Vlan stripping setting */
> if (mask & ETH_VLAN_STRIP_MASK) {
> /* Enable or disable VLAN stripping */
> - if (dev_conf->rxmode.hw_vlan_strip)
> + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
> i40evf_enable_vlan_strip(dev);
> else
> i40evf_disable_vlan_strip(dev);
> @@ -1732,7 +1732,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct
> i40e_rx_queue *rxq)
> /**
> * Check if the jumbo frame and maximum packet length are set
> correctly
> */
> - if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
> + if (dev_data->dev_conf.rxmode.offloads &
> DEV_RX_OFFLOAD_JUMBO_FRAME) {
> if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
> rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
> PMD_DRV_LOG(ERR, "maximum packet length must be "
> @@ -1752,7 +1752,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct
> i40e_rx_queue *rxq)
> }
> }
>
> - if (dev_data->dev_conf.rxmode.enable_scatter ||
> + if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
> ||
> (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
> dev_data->scattered_rx = 1;
> }
> @@ -2189,6 +2189,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info)
> dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
> dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
> dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
> + dev_info->rx_queue_offload_capa = 0;
> dev_info->rx_offload_capa =
> DEV_RX_OFFLOAD_VLAN_STRIP |
> DEV_RX_OFFLOAD_QINQ_STRIP |
> @@ -2196,7 +2197,11 @@ i40evf_dev_info_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
> DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
> - DEV_RX_OFFLOAD_CRC_STRIP;
> + DEV_RX_OFFLOAD_CRC_STRIP |
> + DEV_RX_OFFLOAD_SCATTER;
> + if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
> + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
Same as above.
> +
> dev_info->tx_offload_capa =
> DEV_TX_OFFLOAD_VLAN_INSERT |
> DEV_TX_OFFLOAD_QINQ_INSERT |
> @@ -2219,6 +2224,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info)
> },
> .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
> .rx_drop_en = 0,
> + .offloads = 0,
> };
>
> dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2649,9
> +2655,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
> }
>
> if (frame_size > ETHER_MAX_LEN)
> - dev_data->dev_conf.rxmode.jumbo_frame = 1;
> + dev_data->dev_conf.rxmode.offloads |=
> + DEV_RX_OFFLOAD_JUMBO_FRAME;
> else
> - dev_data->dev_conf.rxmode.jumbo_frame = 0;
> + dev_data->dev_conf.rxmode.jumbo_frame &=
> + ~DEV_RX_OFFLOAD_JUMBO_FRAME;
>
> dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
>
> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index
> 16c47cf73..7b9f2bc1a 100644
> --- a/drivers/net/i40e/i40e_flow.c
> +++ b/drivers/net/i40e/i40e_flow.c
> @@ -1939,7 +1939,8 @@ static uint16_t
> i40e_get_outer_vlan(struct rte_eth_dev *dev) {
> struct i40e_hw *hw =
> I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
> + int qinq = dev->data->dev_conf.rxmode.offloads &
> + DEV_RX_OFFLOAD_VLAN_EXTEND;
> uint64_t reg_r = 0;
> uint16_t reg_id;
> uint16_t tpid;
> diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index
> 1217e5a61..6492368ae 100644
> --- a/drivers/net/i40e/i40e_rxtx.c
> +++ b/drivers/net/i40e/i40e_rxtx.c
> @@ -1692,6 +1692,18 @@ i40e_dev_supported_ptypes_get(struct
> rte_eth_dev *dev)
> return NULL;
> }
>
> +static int
> +i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t
> +requested) {
> + struct rte_eth_dev_info dev_info;
> + uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
> + uint64_t supported; /* All per port offloads */
> +
> + dev->dev_ops->dev_infos_get(dev, &dev_info);
> + supported = dev_info.rx_offload_capa ^
> dev_info.rx_queue_offload_capa;
> + return !((mandatory ^ requested) & supported); }
Would you explain the logic here, seems it's hard to understand
and it's better to add more comment even the code is correct.
Regards
Qi
> +
> int
> i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
> @@ -1712,6 +1724,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev
> *dev,
> uint16_t len, i;
> uint16_t reg_idx, base, bsf, tc_mapping;
> int q_offset, use_def_burst_func = 1;
> + struct rte_eth_dev_info dev_info;
> +
> + if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
> + dev->dev_ops->dev_infos_get(dev, &dev_info);
> + PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
> + " don't match port offloads 0x%" PRIx64
> + " or supported offloads 0x%" PRIx64,
> + (void *)dev, rx_conf->offloads,
> + dev->data->dev_conf.rxmode.offloads,
> + dev_info.rx_offload_capa);
> + return -ENOTSUP;
> + }
>
> if (hw->mac.type == I40E_MAC_VF || hw->mac.type ==
> I40E_MAC_X722_VF) {
> vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
> @@ -1760,8 +1784,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev
> *dev,
> rxq->queue_id = queue_idx;
> rxq->reg_idx = reg_idx;
> rxq->port_id = dev->data->port_id;
> - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
> - 0 : ETHER_CRC_LEN);
> + rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.offloads &
> + DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
> rxq->drop_en = rx_conf->rx_drop_en;
> rxq->vsi = vsi;
> rxq->rx_deferred_start = rx_conf->rx_deferred_start; @@ -2339,7
> +2363,6 @@ i40e_reset_tx_queue(struct i40e_tx_queue *txq)
>
> txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
> txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
> -
> txq->tx_tail = 0;
> txq->nb_tx_used = 0;
>
> @@ -2469,7 +2492,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
>
> len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
> rxq->max_pkt_len = RTE_MIN(len,
> data->dev_conf.rxmode.max_rx_pkt_len);
> - if (data->dev_conf.rxmode.jumbo_frame == 1) {
> + if (data->dev_conf.rxmode.offloads &
> DEV_RX_OFFLOAD_JUMBO_FRAME) {
> if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
> rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
> PMD_DRV_LOG(ERR, "maximum packet length must "
> @@ -2747,6 +2770,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev,
> uint16_t queue_id,
> qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
> qinfo->conf.rx_drop_en = rxq->drop_en;
> qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
> + qinfo->conf.offloads = rxq->offloads;
> }
>
> void
> diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index
> 34cd79233..cb5f8c714 100644
> --- a/drivers/net/i40e/i40e_rxtx.h
> +++ b/drivers/net/i40e/i40e_rxtx.h
> @@ -107,6 +107,7 @@ struct i40e_rx_queue {
> bool rx_deferred_start; /**< don't start this queue in dev start */
> uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
> uint8_t dcb_tc; /**< Traffic class of rx queue */
> + uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
> };
>
> struct i40e_tx_entry {
> --
> 2.11.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx offloads API
2018-03-02 8:20 [dpdk-dev] [DPDK] net/i40e: convert to new Rx offloads API Yanglong Wu
2018-03-19 6:12 ` Zhang, Qi Z
@ 2018-03-27 8:31 ` Yanglong Wu
2018-03-27 12:41 ` Zhang, Qi Z
1 sibling, 1 reply; 6+ messages in thread
From: Yanglong Wu @ 2018-03-27 8:31 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Rx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 27 +++++++++++++++++++--------
drivers/net/i40e/i40e_ethdev_vf.c | 21 +++++++++++++--------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 33 ++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 1 +
5 files changed, 65 insertions(+), 20 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dc473d701..06f11dd23 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3219,6 +3219,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3226,7 +3227,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3253,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3372,7 +3377,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3420,9 +3426,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3430,14 +3438,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3684,6 +3692,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3702,7 +3711,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11354,9 +11363,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index c4dae4262..cb338def9 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1527,7 +1527,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1561,7 +1561,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1718,7 +1718,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1738,7 +1738,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -2174,6 +2174,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2181,7 +2182,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2204,6 +2207,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2634,10 +2638,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b88baa9cd..aa658eb14 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1939,7 +1939,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a61..60cbb4d9c 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1692,6 +1692,20 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+ if ((requested & dev_info.rx_queue_offload_capa) != requested)
+ return 0;
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2469,7 +2495,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2773,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd79233..cb5f8c714 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
--
2.11.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx offloads API
2018-03-27 8:31 ` [dpdk-dev] [PATCH v2 1/2] " Yanglong Wu
@ 2018-03-27 12:41 ` Zhang, Qi Z
0 siblings, 0 replies; 6+ messages in thread
From: Zhang, Qi Z @ 2018-03-27 12:41 UTC (permalink / raw)
To: Wu, Yanglong, dev; +Cc: Dai, Wei, Xing, Beilei
> +static int
> +i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t
> +requested) {
> + struct rte_eth_dev_info dev_info;
> + uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
> + uint64_t supported; /* All per port offloads */
> +
> + dev->dev_ops->dev_infos_get(dev, &dev_info);
> + supported = dev_info.rx_offload_capa ^
> dev_info.rx_queue_offload_capa;
> + if ((requested & dev_info.rx_queue_offload_capa) != requested)
Should be dev_info.rx_offload_capa here but not dev_info.rx_queue_offload_capa.
> + return 0;
> + return !((mandatory ^ requested) & supported); }
> +
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [DPDK] net/i40e: convert to new Tx offloads API
@ 2018-03-02 9:05 Yanglong Wu
2018-03-27 8:49 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
2018-03-28 2:04 ` Yanglong Wu
0 siblings, 2 replies; 6+ messages in thread
From: Yanglong Wu @ 2018-03-02 9:05 UTC (permalink / raw)
To: dev; +Cc: wei.dai, beilei.xing, wenzhuo.lu, Yanglong Wu
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev_vf.c | 1 +
drivers/net/i40e/i40e_rxtx.c | 22 ++++++++++++++++++++++
drivers/net/i40e/i40e_rxtx.h | 1 +
4 files changed, 25 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 3cfc6a5b6..3fb99764a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3191,6 +3191,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index d4f9bde1a..a028006df 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2202,6 +2202,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 6492368ae..3024691fb 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1996,6 +1996,18 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2013,6 +2025,16 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ dev_info.tx_offload_capa);
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index cb5f8c714..10feec4a2 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -149,6 +149,7 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
--
2.11.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx offloads API
2018-03-02 9:05 [dpdk-dev] [DPDK] net/i40e: convert to new Tx " Yanglong Wu
@ 2018-03-27 8:49 ` Yanglong Wu
2018-03-28 2:04 ` Yanglong Wu
1 sibling, 0 replies; 6+ messages in thread
From: Yanglong Wu @ 2018-03-27 8:49 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Rx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 27 +++++++++++++++++++--------
drivers/net/i40e/i40e_ethdev_vf.c | 21 +++++++++++++--------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 33 ++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 1 +
5 files changed, 65 insertions(+), 20 deletions(-)
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dc473d701..06f11dd23 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3219,6 +3219,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3226,7 +3227,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3253,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3372,7 +3377,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3420,9 +3426,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3430,14 +3438,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3684,6 +3692,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3702,7 +3711,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11354,9 +11363,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index c4dae4262..cb338def9 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1527,7 +1527,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1561,7 +1561,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1718,7 +1718,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1738,7 +1738,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -2174,6 +2174,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2181,7 +2182,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2204,6 +2207,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2634,10 +2638,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b88baa9cd..aa658eb14 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1939,7 +1939,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a61..60cbb4d9c 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1692,6 +1692,20 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+ if ((requested & dev_info.rx_queue_offload_capa) != requested)
+ return 0;
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2469,7 +2495,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2773,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd79233..cb5f8c714 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
--
2.11.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx offloads API
2018-03-02 9:05 [dpdk-dev] [DPDK] net/i40e: convert to new Tx " Yanglong Wu
2018-03-27 8:49 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
@ 2018-03-28 2:04 ` Yanglong Wu
1 sibling, 0 replies; 6+ messages in thread
From: Yanglong Wu @ 2018-03-28 2:04 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, wei.dai, beilei.xing, Yanglong Wu
Ethdev Rx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 27 +++++++++++++++++++--------
drivers/net/i40e/i40e_ethdev_vf.c | 21 +++++++++++++--------
drivers/net/i40e/i40e_flow.c | 3 ++-
drivers/net/i40e/i40e_rxtx.c | 33 ++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 1 +
5 files changed, 65 insertions(+), 20 deletions(-)
---
v2:
Adding offload requests checking and
reworking patch according to review comments
---
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dc473d701..06f11dd23 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3219,6 +3219,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3226,7 +3227,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3253,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3372,7 +3377,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3420,9 +3426,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3430,14 +3438,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3684,6 +3692,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3702,7 +3711,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11354,9 +11363,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index c4dae4262..cb338def9 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1527,7 +1527,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1561,7 +1561,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1718,7 +1718,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1738,7 +1738,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -2174,6 +2174,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2181,7 +2182,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2204,6 +2207,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2634,10 +2638,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.jumbo_frame &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b88baa9cd..aa658eb14 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1939,7 +1939,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a61..60cbb4d9c 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1692,6 +1692,20 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+ if ((requested & dev_info.rx_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2469,7 +2495,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2773,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd79233..cb5f8c714 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
--
2.11.0
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2018-03-28 2:08 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-03-02 8:20 [dpdk-dev] [DPDK] net/i40e: convert to new Rx offloads API Yanglong Wu
2018-03-19 6:12 ` Zhang, Qi Z
2018-03-27 8:31 ` [dpdk-dev] [PATCH v2 1/2] " Yanglong Wu
2018-03-27 12:41 ` Zhang, Qi Z
2018-03-02 9:05 [dpdk-dev] [DPDK] net/i40e: convert to new Tx " Yanglong Wu
2018-03-27 8:49 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: convert to new Rx " Yanglong Wu
2018-03-28 2:04 ` Yanglong Wu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).