* [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes
@ 2018-09-08 20:30 Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 01/17] net/qede/base: fix to handle stag update event Rasesh Mody
` (18 more replies)
0 siblings, 19 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:30 UTC (permalink / raw)
To: dev; +Cc: Rasesh Mody, ferruh.yigit, Dept-EngDPDKDev
This patchset adds enhancements and fixes for QEDE PMD.
Rasesh Mody (8):
net/qede/base: fix to handle stag update event
net/qede/base: add support for OneView APIs
net/qede/base: get pre-negotiated values for stag and bw
net/qede: fix to program HW regs with ether type
net/qede/base: limit number of non ethernet queues to 64
net/qede/base: correct MCP error handler's log verbosity
net/qede/base: fix logic for sfp get/set
net/qede/base: use pointer for bytes len read
Shahed Shaikh (9):
net/qede/base: use trust mode for forced MAC limitations
net/qede: reorganize filter code
net/qede: fix flow director bug for IPv6 filter
net/qede: refactor fdir code into generic aRFS
net/qede: add support for generic flow API
net/qede: fix Rx buffer size calculation
net/qede: add support for Rx descriptor status
net/qede/base: fix MFW FLR flow bug
net/qede: add support for dev reset
drivers/net/qede/Makefile | 2 +-
drivers/net/qede/base/bcm_osal.h | 1 +
drivers/net/qede/base/ecore.h | 3 +
drivers/net/qede/base/ecore_dev.c | 85 +-
drivers/net/qede/base/ecore_dev_api.h | 3 +
drivers/net/qede/base/ecore_int.c | 32 +
drivers/net/qede/base/ecore_int.h | 1 +
drivers/net/qede/base/ecore_iov_api.h | 7 +
drivers/net/qede/base/ecore_l2.c | 26 +-
drivers/net/qede/base/ecore_l2_api.h | 11 +-
drivers/net/qede/base/ecore_mcp.c | 157 +++-
drivers/net/qede/base/ecore_mcp_api.h | 40 +-
drivers/net/qede/base/ecore_sriov.c | 36 +-
drivers/net/qede/base/mcp_public.h | 21 +
drivers/net/qede/base/reg_addr.h | 20 +
drivers/net/qede/qede_ethdev.c | 724 +--------------
drivers/net/qede/qede_ethdev.h | 65 +-
drivers/net/qede/qede_fdir.c | 470 ----------
drivers/net/qede/qede_filter.c | 1546 +++++++++++++++++++++++++++++++++
drivers/net/qede/qede_rxtx.c | 140 ++-
drivers/net/qede/qede_rxtx.h | 17 +-
21 files changed, 2177 insertions(+), 1230 deletions(-)
delete mode 100644 drivers/net/qede/qede_fdir.c
create mode 100644 drivers/net/qede/qede_filter.c
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 01/17] net/qede/base: fix to handle stag update event
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
@ 2018-09-08 20:30 ` Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 02/17] net/qede/base: add support for OneView APIs Rasesh Mody
` (17 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:30 UTC (permalink / raw)
To: dev; +Cc: Rasesh Mody, ferruh.yigit, Dept-EngDPDKDev, stable
This fix adds a ecore_mcp_update_stag() handler to handle the STAG update
events from management FW and program the STAG value.
It also clears the stag config on PF, when management FW invalidates
the stag value.
Fixes: ec94dbc57362 ("qede: add base driver")
Cc: stable@dpdk.org
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
drivers/net/qede/base/bcm_osal.h | 1 +
drivers/net/qede/base/ecore_mcp.c | 46 +++++++++++++++++++++++++++++++++
drivers/net/qede/base/ecore_mcp_api.h | 4 +++
drivers/net/qede/base/mcp_public.h | 1 +
drivers/net/qede/base/reg_addr.h | 5 ++++
5 files changed, 57 insertions(+)
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 630867f..b43e0b3 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -447,6 +447,7 @@ void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
#define OSAL_CRC8(table, pdata, nbytes, crc) 0
#define OSAL_MFW_TLV_REQ(p_hwfn) nothing
#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
+#define OSAL_HW_INFO_CHANGE(p_hwfn, change) nothing
#define OSAL_MFW_CMD_PREEMPT(p_hwfn) nothing
#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index ea14c17..49963c6 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -1656,6 +1656,49 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
¶m);
}
+static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+
+ p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
+ FUNC_MF_CFG_OV_STAG_MASK;
+ p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
+ if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+ p_hwfn->hw_info.ovlan);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
+ p_hwfn->hw_info.ovlan);
+ } else {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
+ }
+
+ ecore_sp_pf_update_stag(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+ OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
+
+ /* Acknowledge the MFW */
+ ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+ &resp, ¶m);
+}
+
static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
{
/* A single notification should be sent to upper driver in CMT mode */
@@ -2041,6 +2084,9 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
case MFW_DRV_MSG_BW_UPDATE:
ecore_mcp_update_bw(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_S_TAG_UPDATE:
+ ecore_mcp_update_stag(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_FAILURE_DETECTED:
ecore_mcp_handle_fan_failure(p_hwfn);
break;
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index cfb9f99..8f4efd1 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -521,6 +521,10 @@ struct ecore_mfw_tlv_iscsi {
struct ecore_mfw_tlv_iscsi iscsi;
};
+enum ecore_hw_info_change {
+ ECORE_HW_INFO_CHANGE_OVLAN,
+};
+
/**
* @brief - returns the link params of the hw function
*
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 81aa88e..79d9aae 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1258,6 +1258,7 @@ struct public_drv_mb {
*/
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
+#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
/*deprecated don't use*/
#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 402f620..7ed26fc 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1214,3 +1214,8 @@
#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
#define RSS_REG_RSS_RAM_MASK 0x238c10UL
+
+#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL
+#define NIG_REG_LLH_FUNC_TAG_VALUE 0x5019d0UL
+#define DORQ_REG_TAG1_OVRD_MODE 0x1008b4UL
+#define DORQ_REG_PF_EXT_VID_BB_K2 0x1008c8UL
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 02/17] net/qede/base: add support for OneView APIs
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 01/17] net/qede/base: fix to handle stag update event Rasesh Mody
@ 2018-09-08 20:30 ` Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 03/17] net/qede/base: get pre-negotiated values for stag and bw Rasesh Mody
` (16 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:30 UTC (permalink / raw)
To: dev; +Cc: Rasesh Mody, ferruh.yigit, Dept-EngDPDKDev
Add support for the following OneView APIs:
- ecore_mcp_ov_update_mtu() - Send MTU value to the management FW.
- ecore_mcp_ov_update_mac() - Send MAC address to the management FW.
- ecore_mcp_ov_update_eswitch() - Send eswitch_mode to management FW
after the firmware load.
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
drivers/net/qede/base/ecore_dev.c | 12 ++++--
drivers/net/qede/base/ecore_mcp.c | 68 +++++++++++++++++++++++++++++++--
drivers/net/qede/base/ecore_mcp_api.h | 32 ++++++++++++++++
drivers/net/qede/base/mcp_public.h | 15 ++++++++
4 files changed, 121 insertions(+), 6 deletions(-)
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 31f1f3e..be68a12 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -2599,17 +2599,23 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update firmware version\n");
- if (!b_default_mtu)
+ if (!b_default_mtu) {
rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.mtu);
- if (rc != ECORE_SUCCESS)
- DP_INFO(p_hwfn, "Failed to update default mtu\n");
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update default mtu\n");
+ }
rc = ecore_mcp_ov_update_driver_state(p_hwfn,
p_hwfn->p_main_ptt,
ECORE_OV_DRIVER_STATE_DISABLED);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update driver state\n");
+
+ rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
+ ECORE_OV_ESWITCH_NONE);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
}
return rc;
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 49963c6..1b6fc0a 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -2869,10 +2869,72 @@ enum _ecore_status_t
}
enum _ecore_status_t
-ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 mtu)
+ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u16 mtu)
{
- return 0;
+ u32 resp = 0, param = 0, drv_mb_param = 0;
+ enum _ecore_status_t rc;
+
+ SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu);
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
+ drv_mb_param, &resp, ¶m);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 *mac)
+{
+ struct ecore_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
+ SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE,
+ DRV_MSG_CODE_VMAC_TYPE_MAC);
+ mb_params.param |= MCP_PF_ID(p_hwfn);
+ OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN);
+ mb_params.p_data_src = &union_data;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_ov_eswitch eswitch)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+
+ switch (eswitch) {
+ case ECORE_OV_ESWITCH_NONE:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
+ break;
+ case ECORE_OV_ESWITCH_VEB:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
+ break;
+ case ECORE_OV_ESWITCH_VEPA:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
+ drv_mb_param, &resp, ¶m);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
+
+ return rc;
}
enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index 8f4efd1..0103293 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -185,6 +185,12 @@ enum ecore_ov_driver_state {
ECORE_OV_DRIVER_STATE_ACTIVE
};
+enum ecore_ov_eswitch {
+ ECORE_OV_ESWITCH_NONE,
+ ECORE_OV_ESWITCH_VEB,
+ ECORE_OV_ESWITCH_VEPA
+};
+
#define ECORE_MAX_NPIV_ENTRIES 128
#define ECORE_WWN_SIZE 8
struct ecore_fc_npiv_tbl {
@@ -814,6 +820,32 @@ enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 mtu);
/**
+ * @brief Send MAC address to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param mac - MAC address
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 *mac);
+
+/**
+ * @brief Send eswitch mode to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param eswitch - eswitch mode
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_ov_eswitch eswitch);
+
+/**
* @brief Set LED status
*
* @param p_hwfn
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 79d9aae..5575d9d 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1258,7 +1258,15 @@ struct public_drv_mb {
*/
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
+#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
+#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
+#define DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID 0x3c000000
+#define DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME 0x3d000000
+#define DRV_MSG_CODE_OEM_UPDATE_BOOT_CFG 0x3e000000
+#define DRV_MSG_CODE_OEM_RESET_TO_DEFAULT 0x3f000000
+#define DRV_MSG_CODE_OV_GET_CURR_CFG 0x40000000
+#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
/*deprecated don't use*/
#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
@@ -1583,6 +1591,13 @@ struct public_drv_mb {
#define DRV_MB_PARAM_OV_MTU_SIZE_OFFSET 0
#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
+#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEB | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEPA)
+#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
+#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
+#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 03/17] net/qede/base: get pre-negotiated values for stag and bw
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 01/17] net/qede/base: fix to handle stag update event Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 02/17] net/qede/base: add support for OneView APIs Rasesh Mody
@ 2018-09-08 20:30 ` Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 04/17] net/qede: fix to program HW regs with ether type Rasesh Mody
` (15 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:30 UTC (permalink / raw)
To: dev; +Cc: Rasesh Mody, ferruh.yigit, Dept-EngDPDKDev
Request management FW for STAG and bandwidth values negotiated prior to
the driver load.
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
drivers/net/qede/base/ecore_dev.c | 14 ++++++++++++++
drivers/net/qede/base/mcp_public.h | 3 +++
2 files changed, 17 insertions(+)
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index be68a12..958d7a0 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -2591,6 +2591,20 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
if (IS_PF(p_dev)) {
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
+ &resp, ¶m);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+ }
+
+ if (IS_PF(p_dev)) {
p_hwfn = ECORE_LEADING_HWFN(p_dev);
drv_mb_param = STORM_FW_VERSION;
rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 5575d9d..e9f3350 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1598,6 +1598,9 @@ struct public_drv_mb {
#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
+
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 04/17] net/qede: fix to program HW regs with ether type
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (2 preceding siblings ...)
2018-09-08 20:30 ` [dpdk-dev] [PATCH 03/17] net/qede/base: get pre-negotiated values for stag and bw Rasesh Mody
@ 2018-09-08 20:30 ` Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 05/17] net/qede/base: limit number of non ethernet queues to 64 Rasesh Mody
` (14 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:30 UTC (permalink / raw)
To: dev; +Cc: Rasesh Mody, ferruh.yigit, Dept-EngDPDKDev, stable
Fix to program the HW registers with proper ether type.
Fixes: 36f45bce2537 ("net/qede/base: fix to support OVLAN mode")
Cc: stable@dpdk.org
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
drivers/net/qede/base/ecore_dev.c | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 958d7a0..6302abc 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -2410,6 +2410,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 ether_type;
int i;
if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
@@ -2442,6 +2443,25 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
return rc;
+ if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits) ||
+ OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+ &p_dev->mf_bits))) {
+ if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits))
+ ether_type = ETHER_TYPE_VLAN;
+ else
+ ether_type = ETHER_TYPE_QINQ;
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+ ether_type);
+ }
+
ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms);
rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 05/17] net/qede/base: limit number of non ethernet queues to 64
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (3 preceding siblings ...)
2018-09-08 20:30 ` [dpdk-dev] [PATCH 04/17] net/qede: fix to program HW regs with ether type Rasesh Mody
@ 2018-09-08 20:30 ` Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 06/17] net/qede/base: correct MCP error handler's log verbosity Rasesh Mody
` (13 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:30 UTC (permalink / raw)
To: dev; +Cc: Rasesh Mody, ferruh.yigit, Dept-EngDPDKDev
Limit the number of non ethernet queues to 64, allowing a max queues to
status block ratio of 2:1 in case of storage target. Theoretically a
non-target storage PF can have 128 queues and SBs.
This change is to support 64 entries for a target iSCSI/FCoE PF and 128
for a non-target.
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
drivers/net/qede/base/ecore.h | 3 +++
drivers/net/qede/base/ecore_dev.c | 32 ++++++++++++++++++++++++--------
drivers/net/qede/base/ecore_dev_api.h | 3 +++
3 files changed, 30 insertions(+), 8 deletions(-)
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 5d79fdf..cf66c4c 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -870,6 +870,9 @@ struct ecore_dev {
bool b_is_emul_full;
#endif
+ /* Indicates whether this PF serves a storage target */
+ bool b_is_target;
+
#ifdef CONFIG_ECORE_BINARY_FW /* @DPDK */
void *firmware;
u64 fw_len;
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 6302abc..fdb62f2 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -3027,15 +3027,30 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
}
- if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
- feat_num[ECORE_FCOE_CQ] =
- OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
+ if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
+ ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) {
+ u32 *p_storage_feat = ECORE_IS_FCOE_PERSONALITY(p_hwfn) ?
+ &feat_num[ECORE_FCOE_CQ] :
+ &feat_num[ECORE_ISCSI_CQ];
+ u32 limit = sb_cnt.cnt;
+
+ /* The number of queues should not exceed the number of FP SBs.
+ * In storage target, the queues are divided into pairs of a CQ
+ * and a CmdQ, and each pair uses a single SB. The limit in
+ * this case should allow a max ratio of 2:1 instead of 1:1.
+ */
+ if (p_hwfn->p_dev->b_is_target)
+ limit *= 2;
+ *p_storage_feat = OSAL_MIN_T(u32, limit,
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
- if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
- feat_num[ECORE_ISCSI_CQ] =
- OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
+ /* @DPDK */
+ /* The size of "cq_cmdq_sb_num_arr" in the fcoe/iscsi init
+ * ramrod is limited to "NUM_OF_GLOBAL_QUEUES / 2".
+ */
+ *p_storage_feat = OSAL_MIN_T(u32, *p_storage_feat,
+ (NUM_OF_GLOBAL_QUEUES / 2));
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
"#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
@@ -4327,6 +4342,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
p_dev->allow_mdump = p_params->allow_mdump;
p_hwfn->b_en_pacing = p_params->b_en_pacing;
+ p_dev->b_is_target = p_params->b_is_target;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 02bacc2..7cba54c 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -271,6 +271,9 @@ struct ecore_hw_prepare_params {
/* Enable/disable request by ecore client for pacing */
bool b_en_pacing;
+
+ /* Indicates whether this PF serves a storage target */
+ bool b_is_target;
};
/**
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 06/17] net/qede/base: correct MCP error handler's log verbosity
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (4 preceding siblings ...)
2018-09-08 20:30 ` [dpdk-dev] [PATCH 05/17] net/qede/base: limit number of non ethernet queues to 64 Rasesh Mody
@ 2018-09-08 20:30 ` Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 07/17] net/qede/base: fix logic for sfp get/set Rasesh Mody
` (12 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:30 UTC (permalink / raw)
To: dev; +Cc: Rasesh Mody, ferruh.yigit, Dept-EngDPDKDev
Correct the verbosity for slowpath message from DCB to SP.
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
drivers/net/qede/base/ecore_mcp.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 1b6fc0a..1b6eb94 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -1989,7 +1989,7 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
val);
- DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"UFP shmem config: mode = %d tc = %d pri_type = %d\n",
p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
p_hwfn->ufp_info.pri_type);
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 07/17] net/qede/base: fix logic for sfp get/set
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (5 preceding siblings ...)
2018-09-08 20:30 ` [dpdk-dev] [PATCH 06/17] net/qede/base: correct MCP error handler's log verbosity Rasesh Mody
@ 2018-09-08 20:30 ` Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 08/17] net/qede/base: use trust mode for forced MAC limitations Rasesh Mody
` (11 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:30 UTC (permalink / raw)
To: dev; +Cc: Rasesh Mody, ferruh.yigit, Dept-EngDPDKDev, stable
Fix logic for sfp get rx_los, tx_fault, tx_disable, and sfp set tx_disable.
Fixes: bdc40630a8eb ("net/qede/base: add APIs for xcvr")
Cc: stable@dpdk.org
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
drivers/net/qede/base/ecore_mcp.c | 37 ++++++++++++++++++++-------------
drivers/net/qede/base/ecore_mcp_api.h | 2 ++
2 files changed, 24 insertions(+), 15 deletions(-)
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 1b6eb94..ea71d07 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -2201,8 +2201,10 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 *p_tranceiver_type)
+ u32 *p_transceiver_state,
+ u32 *p_transceiver_type)
{
+ u32 transceiver_info;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* TODO - Add support for VFs */
@@ -2213,14 +2215,23 @@ enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
return ECORE_BUSY;
}
- if (!p_ptt) {
- *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
- rc = ECORE_INVAL;
+
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+ *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
+
+ transceiver_info = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+
+ *p_transceiver_state = GET_MFW_FIELD(transceiver_info,
+ ETH_TRANSCEIVER_STATE);
+
+ if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) {
+ *p_transceiver_type = GET_MFW_FIELD(transceiver_info,
+ ETH_TRANSCEIVER_TYPE);
} else {
- *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt,
- p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port,
- transceiver_data));
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
}
return rc;
@@ -2240,15 +2251,11 @@ enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_speed_mask)
{
- u32 transceiver_data, transceiver_type, transceiver_state;
-
- ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data);
+ u32 transceiver_type, transceiver_state;
- transceiver_state = GET_MFW_FIELD(transceiver_data,
- ETH_TRANSCEIVER_STATE);
+ ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+ &transceiver_type);
- transceiver_type = GET_MFW_FIELD(transceiver_data,
- ETH_TRANSCEIVER_TYPE);
if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
return ECORE_INVAL;
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index 0103293..4098bae 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -607,6 +607,7 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
*
* @param p_dev - ecore dev pointer
* @param p_ptt
+ * @param p_transceiver_state - transceiver state.
* @param p_transceiver_type - media type value
*
* @return enum _ecore_status_t -
@@ -615,6 +616,7 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
+ u32 *p_transceiver_state,
u32 *p_tranceiver_type);
/**
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 08/17] net/qede/base: use trust mode for forced MAC limitations
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (6 preceding siblings ...)
2018-09-08 20:30 ` [dpdk-dev] [PATCH 07/17] net/qede/base: fix logic for sfp get/set Rasesh Mody
@ 2018-09-08 20:30 ` Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 09/17] net/qede/base: use pointer for bytes len read Rasesh Mody
` (10 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:30 UTC (permalink / raw)
To: dev; +Cc: Shahed Shaikh, ferruh.yigit, Dept-EngDPDKDev
From: Shahed Shaikh <shahed.shaikh@cavium.com>
When trust mode is set to ON, VF can change it's MAC address
inspite PF has set a forced MAC for that VF from HV.
Earlier similar functionality is provided by module parameter
"allow_vf_mac_change_mode" of qed.
This change makes few changes in behavior of VF shadow config -
- Let driver track the VF mac in shadow config as long as trust
mode is OFF.
- Once trust mode is ON, we should not care about MACs in shadow
config (because we never intend to fall back because of lack of restore
implementation).
- Delete existing shadow MAC (this helps when trust mode is turned OFF,
and VF tries to add new MAC – it won’t fail that time since we have
a clean slate).
- Skip addition and deletion of MACs in shadow configs.
Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
---
drivers/net/qede/base/ecore_iov_api.h | 7 +++++++
drivers/net/qede/base/ecore_sriov.c | 36 ++++++++++++++++++++++++---------
2 files changed, 34 insertions(+), 9 deletions(-)
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 29001d7..d398478 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -84,6 +84,13 @@ struct ecore_public_vf_info {
*/
u8 forced_mac[ETH_ALEN];
u16 forced_vlan;
+
+ /* Trusted VFs can configure promiscuous mode and
+ * set MAC address inspite PF has set forced MAC.
+ * Also store shadow promisc configuration if needed.
+ */
+ bool is_trusted_configured;
+ bool is_trusted_request;
};
struct ecore_iov_vf_init_params {
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index f7ebf7a..9e4a57b 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -1968,7 +1968,8 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
if ((events & (1 << MAC_ADDR_FORCED)) ||
- p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) {
+ p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
@@ -1989,7 +1990,8 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
return rc;
}
- if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured)
p_vf->configured_features |=
1 << VFPF_BULLETIN_MAC_ADDR;
else
@@ -3351,6 +3353,15 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
return ECORE_SUCCESS;
+ /* Since we don't have the implementation of the logic for removing
+ * a forced MAC and restoring shadow MAC, let's not worry about
+ * processing shadow copies of MAC as long as VF trust mode is ON,
+ * to keep things simple.
+ */
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured)
+ return ECORE_SUCCESS;
+
/* First remove entries and then add new ones */
if (p_params->opcode == ECORE_FILTER_REMOVE) {
for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
@@ -4415,17 +4426,23 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
return;
}
- if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ vf_info->p_vf_info.is_trusted_configured) {
feature = 1 << VFPF_BULLETIN_MAC_ADDR;
- else
+ /* Trust mode will disable Forced MAC */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << MAC_ADDR_FORCED);
+ } else {
feature = 1 << MAC_ADDR_FORCED;
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << VFPF_BULLETIN_MAC_ADDR);
+ }
- OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+ OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
+ mac, ETH_ALEN);
vf_info->bulletin.p_virt->valid_bitmap |= feature;
- /* Forced MAC will disable MAC_ADDR */
- vf_info->bulletin.p_virt->valid_bitmap &=
- ~(1 << VFPF_BULLETIN_MAC_ADDR);
ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
@@ -4460,7 +4477,8 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
vf_info->bulletin.p_virt->valid_bitmap |= feature;
- if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ vf_info->p_vf_info.is_trusted_configured)
ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
return ECORE_SUCCESS;
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 09/17] net/qede/base: use pointer for bytes len read
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (7 preceding siblings ...)
2018-09-08 20:30 ` [dpdk-dev] [PATCH 08/17] net/qede/base: use trust mode for forced MAC limitations Rasesh Mody
@ 2018-09-08 20:30 ` Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 10/17] net/qede: reorganize filter code Rasesh Mody
` (9 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:30 UTC (permalink / raw)
To: dev; +Cc: Rasesh Mody, ferruh.yigit, Dept-EngDPDKDev
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
drivers/net/qede/base/ecore_mcp.c | 4 ++--
drivers/net/qede/base/ecore_mcp_api.h | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index ea71d07..364c146 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -3053,7 +3053,7 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
}
enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
- u32 addr, u8 *p_buf, u32 len)
+ u32 addr, u8 *p_buf, u32 *p_len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
struct ecore_ptt *p_ptt;
@@ -3068,7 +3068,7 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
(cmd == ECORE_PHY_CORE_READ) ?
DRV_MSG_CODE_PHY_CORE_READ :
DRV_MSG_CODE_PHY_RAW_READ,
- addr, &resp, ¶m, &len, (u32 *)p_buf);
+ addr, &resp, ¶m, p_len, (u32 *)p_buf);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index 4098bae..7327074 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -943,7 +943,7 @@ enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
- u32 addr, u8 *p_buf, u32 len);
+ u32 addr, u8 *p_buf, u32 *p_len);
/**
* @brief Read from nvm
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 10/17] net/qede: reorganize filter code
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (8 preceding siblings ...)
2018-09-08 20:30 ` [dpdk-dev] [PATCH 09/17] net/qede/base: use pointer for bytes len read Rasesh Mody
@ 2018-09-08 20:30 ` Rasesh Mody
2018-09-20 23:51 ` Ferruh Yigit
2018-09-08 20:31 ` [dpdk-dev] [PATCH 11/17] net/qede: fix flow director bug for IPv6 filter Rasesh Mody
` (8 subsequent siblings)
18 siblings, 1 reply; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:30 UTC (permalink / raw)
To: dev; +Cc: Shahed Shaikh, ferruh.yigit, Dept-EngDPDKDev
From: Shahed Shaikh <shahed.shaikh@cavium.com>
- rename qede_fdir.c to qede_filter.c
- move all filter code to qede_filter.c
Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
---
drivers/net/qede/Makefile | 2 +-
drivers/net/qede/qede_ethdev.c | 687 +-----------------------
drivers/net/qede/qede_ethdev.h | 25 +-
drivers/net/qede/qede_fdir.c | 470 ----------------
drivers/net/qede/qede_filter.c | 1147 ++++++++++++++++++++++++++++++++++++++++
5 files changed, 1172 insertions(+), 1159 deletions(-)
delete mode 100644 drivers/net/qede/qede_fdir.c
create mode 100644 drivers/net/qede/qede_filter.c
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index 488ca1d..2ecbd8d 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -105,6 +105,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_filter.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index df52ea9..3a7c466 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -16,111 +16,6 @@
static const struct qed_eth_ops *qed_ops;
#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
-/* VXLAN tunnel classification mapping */
-const struct _qede_udp_tunn_types {
- uint16_t rte_filter_type;
- enum ecore_filter_ucast_type qede_type;
- enum ecore_tunn_clss qede_tunn_clss;
- const char *string;
-} qede_tunn_types[] = {
- {
- ETH_TUNNEL_FILTER_OMAC,
- ECORE_FILTER_MAC,
- ECORE_TUNN_CLSS_MAC_VLAN,
- "outer-mac"
- },
- {
- ETH_TUNNEL_FILTER_TENID,
- ECORE_FILTER_VNI,
- ECORE_TUNN_CLSS_MAC_VNI,
- "vni"
- },
- {
- ETH_TUNNEL_FILTER_IMAC,
- ECORE_FILTER_INNER_MAC,
- ECORE_TUNN_CLSS_INNER_MAC_VLAN,
- "inner-mac"
- },
- {
- ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_INNER_VLAN,
- ECORE_TUNN_CLSS_INNER_MAC_VLAN,
- "inner-vlan"
- },
- {
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
- ECORE_FILTER_MAC_VNI_PAIR,
- ECORE_TUNN_CLSS_MAC_VNI,
- "outer-mac and vni"
- },
- {
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "outer-mac and inner-mac"
- },
- {
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "outer-mac and inner-vlan"
- },
- {
- ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
- ECORE_FILTER_INNER_MAC_VNI_PAIR,
- ECORE_TUNN_CLSS_INNER_MAC_VNI,
- "vni and inner-mac",
- },
- {
- ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "vni and inner-vlan",
- },
- {
- ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_INNER_PAIR,
- ECORE_TUNN_CLSS_INNER_MAC_VLAN,
- "inner-mac and inner-vlan",
- },
- {
- ETH_TUNNEL_FILTER_OIP,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "outer-IP"
- },
- {
- ETH_TUNNEL_FILTER_IIP,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "inner-IP"
- },
- {
- RTE_TUNNEL_FILTER_IMAC_IVLAN,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "IMAC_IVLAN"
- },
- {
- RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "IMAC_IVLAN_TENID"
- },
- {
- RTE_TUNNEL_FILTER_IMAC_TENID,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "IMAC_TENID"
- },
- {
- RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "OMAC_TENID_IMAC"
- },
-};
-
struct rte_qede_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint64_t offset;
@@ -614,14 +509,6 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
return 0;
}
-static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
-{
- memset(ucast, 0, sizeof(struct ecore_filter_ucast));
- ucast->is_rx_filter = true;
- ucast->is_tx_filter = true;
- /* ucast->assert_on_error = true; - For debug */
-}
-
static int
qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
enum qed_filter_rx_mode_type type)
@@ -660,167 +547,7 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
ECORE_SPQ_MODE_CB, NULL);
}
-static int
-qede_tunnel_update(struct qede_dev *qdev,
- struct ecore_tunnel_info *tunn_info)
-{
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_hwfn *p_hwfn;
- struct ecore_ptt *p_ptt;
- int i;
-
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- if (IS_PF(edev)) {
- p_ptt = ecore_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- DP_ERR(p_hwfn, "Can't acquire PTT\n");
- return -EAGAIN;
- }
- } else {
- p_ptt = NULL;
- }
-
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
- tunn_info, ECORE_SPQ_MODE_CB, NULL);
- if (IS_PF(edev))
- ecore_ptt_release(p_hwfn, p_ptt);
-
- if (rc != ECORE_SUCCESS)
- break;
- }
-
- return rc;
-}
-
-static int
-qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- bool enable)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_tunnel_info tunn;
-
- if (qdev->vxlan.enable == enable)
- return ECORE_SUCCESS;
-
- memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
- tunn.vxlan.b_update_mode = true;
- tunn.vxlan.b_mode_enabled = enable;
- tunn.b_update_rx_cls = true;
- tunn.b_update_tx_cls = true;
- tunn.vxlan.tun_cls = clss;
-
- tunn.vxlan_port.b_update_port = true;
- tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc == ECORE_SUCCESS) {
- qdev->vxlan.enable = enable;
- qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
- DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
- enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
- } else {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- tunn.vxlan.tun_cls);
- }
-
- return rc;
-}
-
-static int
-qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- bool enable)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_tunnel_info tunn;
-
- memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
- tunn.l2_geneve.b_update_mode = true;
- tunn.l2_geneve.b_mode_enabled = enable;
- tunn.ip_geneve.b_update_mode = true;
- tunn.ip_geneve.b_mode_enabled = enable;
- tunn.l2_geneve.tun_cls = clss;
- tunn.ip_geneve.tun_cls = clss;
- tunn.b_update_rx_cls = true;
- tunn.b_update_tx_cls = true;
-
- tunn.geneve_port.b_update_port = true;
- tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc == ECORE_SUCCESS) {
- qdev->geneve.enable = enable;
- qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
- DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
- enable ? "enabled" : "disabled", qdev->geneve.udp_port);
- } else {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- clss);
- }
-
- return rc;
-}
-
-static int
-qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- bool enable)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_tunnel_info tunn;
-
- memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
- tunn.ip_gre.b_update_mode = true;
- tunn.ip_gre.b_mode_enabled = enable;
- tunn.ip_gre.tun_cls = clss;
- tunn.ip_gre.tun_cls = clss;
- tunn.b_update_rx_cls = true;
- tunn.b_update_tx_cls = true;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc == ECORE_SUCCESS) {
- qdev->ipgre.enable = enable;
- DP_INFO(edev, "IPGRE is %s\n",
- enable ? "enabled" : "disabled");
- } else {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- clss);
- }
-
- return rc;
-}
-
-static int
-qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- enum rte_eth_tunnel_type tunn_type, bool enable)
-{
- int rc = -EINVAL;
-
- switch (tunn_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- rc = qede_vxlan_enable(eth_dev, clss, enable);
- break;
- case RTE_TUNNEL_TYPE_GENEVE:
- rc = qede_geneve_enable(eth_dev, clss, enable);
- break;
- case RTE_TUNNEL_TYPE_IP_IN_GRE:
- rc = qede_ipgre_enable(eth_dev, clss, enable);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- return rc;
-}
-
-static int
+int
qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
bool add)
{
@@ -941,7 +668,7 @@ static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
return 0;
}
-static enum _ecore_status_t
+enum _ecore_status_t
qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
bool add)
{
@@ -1033,7 +760,7 @@ static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
-static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
+void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
{
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_sp_vport_update_params params;
@@ -2568,414 +2295,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
-static int
-qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
- struct rte_eth_udp_tunnel *tunnel_udp)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_tunnel_info tunn; /* @DPDK */
- uint16_t udp_port;
- int rc;
-
- PMD_INIT_FUNC_TRACE(edev);
-
- memset(&tunn, 0, sizeof(tunn));
-
- switch (tunnel_udp->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
- DP_ERR(edev, "UDP port %u doesn't exist\n",
- tunnel_udp->udp_port);
- return ECORE_INVAL;
- }
- udp_port = 0;
-
- tunn.vxlan_port.b_update_port = true;
- tunn.vxlan_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u\n",
- tunn.vxlan_port.port);
- return rc;
- }
-
- qdev->vxlan.udp_port = udp_port;
- /* If the request is to delete UDP port and if the number of
- * VXLAN filters have reached 0 then VxLAN offload can be be
- * disabled.
- */
- if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
- return qede_vxlan_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, false);
-
- break;
- case RTE_TUNNEL_TYPE_GENEVE:
- if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
- DP_ERR(edev, "UDP port %u doesn't exist\n",
- tunnel_udp->udp_port);
- return ECORE_INVAL;
- }
-
- udp_port = 0;
-
- tunn.geneve_port.b_update_port = true;
- tunn.geneve_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u\n",
- tunn.vxlan_port.port);
- return rc;
- }
-
- qdev->vxlan.udp_port = udp_port;
- /* If the request is to delete UDP port and if the number of
- * GENEVE filters have reached 0 then GENEVE offload can be be
- * disabled.
- */
- if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
- return qede_geneve_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, false);
-
- break;
-
- default:
- return ECORE_INVAL;
- }
-
- return 0;
-
-}
-static int
-qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
- struct rte_eth_udp_tunnel *tunnel_udp)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_tunnel_info tunn; /* @DPDK */
- uint16_t udp_port;
- int rc;
-
- PMD_INIT_FUNC_TRACE(edev);
-
- memset(&tunn, 0, sizeof(tunn));
-
- switch (tunnel_udp->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
- DP_INFO(edev,
- "UDP port %u for VXLAN was already configured\n",
- tunnel_udp->udp_port);
- return ECORE_SUCCESS;
- }
-
- /* Enable VxLAN tunnel with default MAC/VLAN classification if
- * it was not enabled while adding VXLAN filter before UDP port
- * update.
- */
- if (!qdev->vxlan.enable) {
- rc = qede_vxlan_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, true);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to enable VXLAN "
- "prior to updating UDP port\n");
- return rc;
- }
- }
- udp_port = tunnel_udp->udp_port;
-
- tunn.vxlan_port.b_update_port = true;
- tunn.vxlan_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
- udp_port);
- return rc;
- }
-
- DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
-
- qdev->vxlan.udp_port = udp_port;
- break;
- case RTE_TUNNEL_TYPE_GENEVE:
- if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
- DP_INFO(edev,
- "UDP port %u for GENEVE was already configured\n",
- tunnel_udp->udp_port);
- return ECORE_SUCCESS;
- }
-
- /* Enable GENEVE tunnel with default MAC/VLAN classification if
- * it was not enabled while adding GENEVE filter before UDP port
- * update.
- */
- if (!qdev->geneve.enable) {
- rc = qede_geneve_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, true);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to enable GENEVE "
- "prior to updating UDP port\n");
- return rc;
- }
- }
- udp_port = tunnel_udp->udp_port;
-
- tunn.geneve_port.b_update_port = true;
- tunn.geneve_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
- udp_port);
- return rc;
- }
-
- DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
-
- qdev->geneve.udp_port = udp_port;
- break;
- default:
- return ECORE_INVAL;
- }
-
- return 0;
-}
-
-static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
- uint32_t *clss, char *str)
-{
- uint16_t j;
- *clss = MAX_ECORE_TUNN_CLSS;
-
- for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
- if (filter == qede_tunn_types[j].rte_filter_type) {
- *type = qede_tunn_types[j].qede_type;
- *clss = qede_tunn_types[j].qede_tunn_clss;
- strcpy(str, qede_tunn_types[j].string);
- return;
- }
- }
-}
-
-static int
-qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
- const struct rte_eth_tunnel_filter_conf *conf,
- uint32_t type)
-{
- /* Init commmon ucast params first */
- qede_set_ucast_cmn_params(ucast);
-
- /* Copy out the required fields based on classification type */
- ucast->type = type;
-
- switch (type) {
- case ECORE_FILTER_VNI:
- ucast->vni = conf->tenant_id;
- break;
- case ECORE_FILTER_INNER_VLAN:
- ucast->vlan = conf->inner_vlan;
- break;
- case ECORE_FILTER_MAC:
- memcpy(ucast->mac, conf->outer_mac.addr_bytes,
- ETHER_ADDR_LEN);
- break;
- case ECORE_FILTER_INNER_MAC:
- memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
- break;
- case ECORE_FILTER_MAC_VNI_PAIR:
- memcpy(ucast->mac, conf->outer_mac.addr_bytes,
- ETHER_ADDR_LEN);
- ucast->vni = conf->tenant_id;
- break;
- case ECORE_FILTER_INNER_MAC_VNI_PAIR:
- memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
- ucast->vni = conf->tenant_id;
- break;
- case ECORE_FILTER_INNER_PAIR:
- memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
- ucast->vlan = conf->inner_vlan;
- break;
- default:
- return -EINVAL;
- }
-
- return ECORE_SUCCESS;
-}
-
-static int
-_qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
- const struct rte_eth_tunnel_filter_conf *conf,
- __attribute__((unused)) enum rte_filter_op filter_op,
- enum ecore_tunn_clss *clss,
- bool add)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_filter_ucast ucast = {0};
- enum ecore_filter_ucast_type type;
- uint16_t filter_type = 0;
- char str[80];
- int rc;
-
- filter_type = conf->filter_type;
- /* Determine if the given filter classification is supported */
- qede_get_ecore_tunn_params(filter_type, &type, clss, str);
- if (*clss == MAX_ECORE_TUNN_CLSS) {
- DP_ERR(edev, "Unsupported filter type\n");
- return -EINVAL;
- }
- /* Init tunnel ucast params */
- rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
- conf->filter_type);
- return rc;
- }
- DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
- str, filter_op, ucast.type);
-
- ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
-
- /* Skip MAC/VLAN if filter is based on VNI */
- if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
- rc = qede_mac_int_ops(eth_dev, &ucast, add);
- if ((rc == 0) && add) {
- /* Enable accept anyvlan */
- qede_config_accept_any_vlan(qdev, true);
- }
- } else {
- rc = qede_ucast_filter(eth_dev, &ucast, add);
- if (rc == 0)
- rc = ecore_filter_ucast_cmd(edev, &ucast,
- ECORE_SPQ_MODE_CB, NULL);
- }
-
- return rc;
-}
-
-static int
-qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op,
- const struct rte_eth_tunnel_filter_conf *conf)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
- bool add;
- int rc;
-
- PMD_INIT_FUNC_TRACE(edev);
-
- switch (filter_op) {
- case RTE_ETH_FILTER_ADD:
- add = true;
- break;
- case RTE_ETH_FILTER_DELETE:
- add = false;
- break;
- default:
- DP_ERR(edev, "Unsupported operation %d\n", filter_op);
- return -EINVAL;
- }
-
- if (IS_VF(edev))
- return qede_tunn_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN,
- conf->tunnel_type, add);
-
- rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- if (add) {
- if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
- qdev->vxlan.num_filters++;
- qdev->vxlan.filter_type = conf->filter_type;
- } else { /* GENEVE */
- qdev->geneve.num_filters++;
- qdev->geneve.filter_type = conf->filter_type;
- }
-
- if (!qdev->vxlan.enable || !qdev->geneve.enable ||
- !qdev->ipgre.enable)
- return qede_tunn_enable(eth_dev, clss,
- conf->tunnel_type,
- true);
- } else {
- if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
- qdev->vxlan.num_filters--;
- else /*GENEVE*/
- qdev->geneve.num_filters--;
-
- /* Disable VXLAN if VXLAN filters become 0 */
- if ((qdev->vxlan.num_filters == 0) ||
- (qdev->geneve.num_filters == 0))
- return qede_tunn_enable(eth_dev, clss,
- conf->tunnel_type,
- false);
- }
-
- return 0;
-}
-
-int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_tunnel_filter_conf *filter_conf =
- (struct rte_eth_tunnel_filter_conf *)arg;
-
- switch (filter_type) {
- case RTE_ETH_FILTER_TUNNEL:
- switch (filter_conf->tunnel_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_IP_IN_GRE:
- DP_INFO(edev,
- "Packet steering to the specified Rx queue"
- " is not supported with UDP tunneling");
- return(qede_tunn_filter_config(eth_dev, filter_op,
- filter_conf));
- case RTE_TUNNEL_TYPE_TEREDO:
- case RTE_TUNNEL_TYPE_NVGRE:
- case RTE_L2_TUNNEL_TYPE_E_TAG:
- DP_ERR(edev, "Unsupported tunnel type %d\n",
- filter_conf->tunnel_type);
- return -EINVAL;
- case RTE_TUNNEL_TYPE_NONE:
- default:
- return 0;
- }
- break;
- case RTE_ETH_FILTER_FDIR:
- return qede_fdir_filter_conf(eth_dev, filter_op, arg);
- case RTE_ETH_FILTER_NTUPLE:
- return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
- case RTE_ETH_FILTER_MACVLAN:
- case RTE_ETH_FILTER_ETHERTYPE:
- case RTE_ETH_FILTER_FLEXIBLE:
- case RTE_ETH_FILTER_SYN:
- case RTE_ETH_FILTER_HASH:
- case RTE_ETH_FILTER_L2_TUNNEL:
- case RTE_ETH_FILTER_MAX:
- default:
- DP_ERR(edev, "Unsupported filter type %d\n",
- filter_type);
- return -EINVAL;
- }
-
- return 0;
-}
-
static const struct eth_dev_ops qede_eth_dev_ops = {
.dev_configure = qede_dev_configure,
.dev_infos_get = qede_dev_info_get,
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 6e9a5b4..d54f19b 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -215,6 +215,15 @@ struct qede_dev {
void *ethdev;
};
+static inline void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
+{
+ memset(ucast, 0, sizeof(struct ecore_filter_ucast));
+ ucast->is_rx_filter = true;
+ ucast->is_tx_filter = true;
+ /* ucast->assert_on_error = true; - For debug */
+}
+
+
/* Non-static functions */
int qede_config_rss(struct rte_eth_dev *eth_dev);
@@ -235,9 +244,6 @@ int qede_link_update(struct rte_eth_dev *eth_dev,
int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type,
enum rte_filter_op op, void *arg);
-int qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op, void *arg);
-
int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
enum rte_filter_op filter_op, void *arg);
@@ -255,5 +261,16 @@ uint16_t qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg);
-
+int qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+int qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+
+enum _ecore_status_t
+qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
+ bool add);
+void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg);
+int qede_ucast_filter(struct rte_eth_dev *eth_dev,
+ struct ecore_filter_ucast *ucast,
+ bool add);
#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
deleted file mode 100644
index 83580d0..0000000
--- a/drivers/net/qede/qede_fdir.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2017 Cavium Inc.
- * All rights reserved.
- * www.cavium.com
- */
-
-#include <rte_udp.h>
-#include <rte_tcp.h>
-#include <rte_sctp.h>
-#include <rte_errno.h>
-
-#include "qede_ethdev.h"
-
-#define IP_VERSION (0x40)
-#define IP_HDRLEN (0x5)
-#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
-#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
-#define QEDE_FDIR_IPV4_DEF_TTL (64)
-
-/* Sum of length of header types of L2, L3, L4.
- * L2 : ether_hdr + vlan_hdr + vxlan_hdr
- * L3 : ipv6_hdr
- * L4 : tcp_hdr
- */
-#define QEDE_MAX_FDIR_PKT_LEN (86)
-
-#ifndef IPV6_ADDR_LEN
-#define IPV6_ADDR_LEN (16)
-#endif
-
-#define QEDE_VALID_FLOW(flow_type) \
- ((flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
- (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
- (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
- (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
-
-/* Note: Flowdir support is only partial.
- * For ex: drop_queue, FDIR masks, flex_conf are not supported.
- * Parameters like pballoc/status fields are irrelevant here.
- */
-int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
-
- /* check FDIR modes */
- switch (fdir->mode) {
- case RTE_FDIR_MODE_NONE:
- qdev->fdir_info.arfs.arfs_enable = false;
- DP_INFO(edev, "flowdir is disabled\n");
- break;
- case RTE_FDIR_MODE_PERFECT:
- if (ECORE_IS_CMT(edev)) {
- DP_ERR(edev, "flowdir is not supported in 100G mode\n");
- qdev->fdir_info.arfs.arfs_enable = false;
- return -ENOTSUP;
- }
- qdev->fdir_info.arfs.arfs_enable = true;
- DP_INFO(edev, "flowdir is enabled\n");
- break;
- case RTE_FDIR_MODE_PERFECT_TUNNEL:
- case RTE_FDIR_MODE_SIGNATURE:
- case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
- DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
- return -ENOTSUP;
- }
-
- return 0;
-}
-
-void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct qede_fdir_entry *tmp = NULL;
-
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
- if (tmp) {
- if (tmp->mz)
- rte_memzone_free(tmp->mz);
- SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
- qede_fdir_entry, list);
- rte_free(tmp);
- }
- }
-}
-
-static int
-qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir_filter *fdir_filter,
- bool add)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
- struct qede_fdir_entry *tmp = NULL;
- struct qede_fdir_entry *fdir = NULL;
- const struct rte_memzone *mz;
- struct ecore_hwfn *p_hwfn;
- enum _ecore_status_t rc;
- uint16_t pkt_len;
- void *pkt;
-
- if (add) {
- if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
- DP_ERR(edev, "Reached max flowdir filter limit\n");
- return -EINVAL;
- }
- fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
- RTE_CACHE_LINE_SIZE);
- if (!fdir) {
- DP_ERR(edev, "Did not allocate memory for fdir\n");
- return -ENOMEM;
- }
- }
- /* soft_id could have been used as memzone string, but soft_id is
- * not currently used so it has no significance.
- */
- snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
- (unsigned long)rte_get_timer_cycles());
- mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
- SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
- if (!mz) {
- DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
- rte_strerror(rte_errno));
- rc = -rte_errno;
- goto err1;
- }
-
- pkt = mz->addr;
- memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
- pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
- &qdev->fdir_info.arfs);
- if (pkt_len == 0) {
- rc = -EINVAL;
- goto err2;
- }
- DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
- if (add) {
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
- if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
- DP_INFO(edev, "flowdir filter exist\n");
- rc = 0;
- goto err2;
- }
- }
- } else {
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
- if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
- break;
- }
- if (!tmp) {
- DP_ERR(edev, "flowdir filter does not exist\n");
- rc = -EEXIST;
- goto err2;
- }
- }
- p_hwfn = ECORE_LEADING_HWFN(edev);
- if (add) {
- if (!qdev->fdir_info.arfs.arfs_enable) {
- /* Force update */
- eth_dev->data->dev_conf.fdir_conf.mode =
- RTE_FDIR_MODE_PERFECT;
- qdev->fdir_info.arfs.arfs_enable = true;
- DP_INFO(edev, "Force enable flowdir in perfect mode\n");
- }
- /* Enable ARFS searcher with updated flow_types */
- ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
- &qdev->fdir_info.arfs);
- }
- /* configure filter with ECORE_SPQ_MODE_EBLOCK */
- rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
- (dma_addr_t)mz->iova,
- pkt_len,
- fdir_filter->action.rx_queue,
- 0, add);
- if (rc == ECORE_SUCCESS) {
- if (add) {
- fdir->rx_queue = fdir_filter->action.rx_queue;
- fdir->pkt_len = pkt_len;
- fdir->mz = mz;
- SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
- fdir, list);
- qdev->fdir_info.filter_count++;
- DP_INFO(edev, "flowdir filter added, count = %d\n",
- qdev->fdir_info.filter_count);
- } else {
- rte_memzone_free(tmp->mz);
- SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
- qede_fdir_entry, list);
- rte_free(tmp); /* the node deleted */
- rte_memzone_free(mz); /* temp node allocated */
- qdev->fdir_info.filter_count--;
- DP_INFO(edev, "Fdir filter deleted, count = %d\n",
- qdev->fdir_info.filter_count);
- }
- } else {
- DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
- rc, qdev->fdir_info.filter_count);
- }
-
- /* Disable ARFS searcher if there are no more filters */
- if (qdev->fdir_info.filter_count == 0) {
- memset(&qdev->fdir_info.arfs, 0,
- sizeof(struct ecore_arfs_config_params));
- DP_INFO(edev, "Disabling flowdir\n");
- qdev->fdir_info.arfs.arfs_enable = false;
- ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
- &qdev->fdir_info.arfs);
- }
- return 0;
-
-err2:
- rte_memzone_free(mz);
-err1:
- if (add)
- rte_free(fdir);
- return rc;
-}
-
-static int
-qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir_filter *fdir,
- bool add)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
- if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
- DP_ERR(edev, "invalid flow_type input\n");
- return -EINVAL;
- }
-
- if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
- DP_ERR(edev, "invalid queue number %u\n",
- fdir->action.rx_queue);
- return -EINVAL;
- }
-
- if (fdir->input.flow_ext.is_vf) {
- DP_ERR(edev, "flowdir is not supported over VF\n");
- return -EINVAL;
- }
-
- return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
-}
-
-/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
-uint16_t
-qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir_filter *fdir,
- void *buff,
- struct ecore_arfs_config_params *params)
-
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- uint16_t *ether_type;
- uint8_t *raw_pkt;
- struct rte_eth_fdir_input *input;
- static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
- struct ipv4_hdr *ip;
- struct ipv6_hdr *ip6;
- struct udp_hdr *udp;
- struct tcp_hdr *tcp;
- uint16_t len;
- static const uint8_t next_proto[] = {
- [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
- [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
- [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
- [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
- };
- raw_pkt = (uint8_t *)buff;
- input = &fdir->input;
- DP_INFO(edev, "flow_type %d\n", input->flow_type);
-
- len = 2 * sizeof(struct ether_addr);
- raw_pkt += 2 * sizeof(struct ether_addr);
- if (input->flow_ext.vlan_tci) {
- DP_INFO(edev, "adding VLAN header\n");
- rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
- rte_memcpy(raw_pkt + sizeof(uint16_t),
- &input->flow_ext.vlan_tci,
- sizeof(uint16_t));
- raw_pkt += sizeof(vlan_frame);
- len += sizeof(vlan_frame);
- }
- ether_type = (uint16_t *)raw_pkt;
- raw_pkt += sizeof(uint16_t);
- len += sizeof(uint16_t);
-
- switch (input->flow_type) {
- case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
- /* fill the common ip header */
- ip = (struct ipv4_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
- ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
- ip->total_length = sizeof(struct ipv4_hdr);
- ip->next_proto_id = input->flow.ip4_flow.proto ?
- input->flow.ip4_flow.proto :
- next_proto[input->flow_type];
- ip->time_to_live = input->flow.ip4_flow.ttl ?
- input->flow.ip4_flow.ttl :
- QEDE_FDIR_IPV4_DEF_TTL;
- ip->type_of_service = input->flow.ip4_flow.tos;
- ip->dst_addr = input->flow.ip4_flow.dst_ip;
- ip->src_addr = input->flow.ip4_flow.src_ip;
- len += sizeof(struct ipv4_hdr);
- params->ipv4 = true;
-
- raw_pkt = (uint8_t *)buff;
- /* UDP */
- if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
- udp = (struct udp_hdr *)(raw_pkt + len);
- udp->dst_port = input->flow.udp4_flow.dst_port;
- udp->src_port = input->flow.udp4_flow.src_port;
- udp->dgram_len = sizeof(struct udp_hdr);
- len += sizeof(struct udp_hdr);
- /* adjust ip total_length */
- ip->total_length += sizeof(struct udp_hdr);
- params->udp = true;
- } else { /* TCP */
- tcp = (struct tcp_hdr *)(raw_pkt + len);
- tcp->src_port = input->flow.tcp4_flow.src_port;
- tcp->dst_port = input->flow.tcp4_flow.dst_port;
- tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
- len += sizeof(struct tcp_hdr);
- /* adjust ip total_length */
- ip->total_length += sizeof(struct tcp_hdr);
- params->tcp = true;
- }
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- ip6 = (struct ipv6_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
- ip6->proto = input->flow.ipv6_flow.proto ?
- input->flow.ipv6_flow.proto :
- next_proto[input->flow_type];
- rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
- IPV6_ADDR_LEN);
- rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
- IPV6_ADDR_LEN);
- len += sizeof(struct ipv6_hdr);
-
- raw_pkt = (uint8_t *)buff;
- /* UDP */
- if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
- udp = (struct udp_hdr *)(raw_pkt + len);
- udp->src_port = input->flow.udp6_flow.dst_port;
- udp->dst_port = input->flow.udp6_flow.src_port;
- len += sizeof(struct udp_hdr);
- params->udp = true;
- } else { /* TCP */
- tcp = (struct tcp_hdr *)(raw_pkt + len);
- tcp->src_port = input->flow.tcp4_flow.src_port;
- tcp->dst_port = input->flow.tcp4_flow.dst_port;
- tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
- len += sizeof(struct tcp_hdr);
- params->tcp = true;
- }
- break;
- default:
- DP_ERR(edev, "Unsupported flow_type %u\n",
- input->flow_type);
- return 0;
- }
-
- return len;
-}
-
-int
-qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_fdir_filter *fdir;
- int ret;
-
- fdir = (struct rte_eth_fdir_filter *)arg;
- switch (filter_op) {
- case RTE_ETH_FILTER_NOP:
- /* Typically used to query flowdir support */
- if (ECORE_IS_CMT(edev)) {
- DP_ERR(edev, "flowdir is not supported in 100G mode\n");
- return -ENOTSUP;
- }
- return 0; /* means supported */
- case RTE_ETH_FILTER_ADD:
- ret = qede_fdir_filter_add(eth_dev, fdir, 1);
- break;
- case RTE_ETH_FILTER_DELETE:
- ret = qede_fdir_filter_add(eth_dev, fdir, 0);
- break;
- case RTE_ETH_FILTER_FLUSH:
- case RTE_ETH_FILTER_UPDATE:
- case RTE_ETH_FILTER_INFO:
- return -ENOTSUP;
- break;
- default:
- DP_ERR(edev, "unknown operation %u", filter_op);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_ntuple_filter *ntuple;
- struct rte_eth_fdir_filter fdir_entry;
- struct rte_eth_tcpv4_flow *tcpv4_flow;
- struct rte_eth_udpv4_flow *udpv4_flow;
- bool add = false;
-
- switch (filter_op) {
- case RTE_ETH_FILTER_NOP:
- /* Typically used to query fdir support */
- if (ECORE_IS_CMT(edev)) {
- DP_ERR(edev, "flowdir is not supported in 100G mode\n");
- return -ENOTSUP;
- }
- return 0; /* means supported */
- case RTE_ETH_FILTER_ADD:
- add = true;
- break;
- case RTE_ETH_FILTER_DELETE:
- break;
- case RTE_ETH_FILTER_INFO:
- case RTE_ETH_FILTER_GET:
- case RTE_ETH_FILTER_UPDATE:
- case RTE_ETH_FILTER_FLUSH:
- case RTE_ETH_FILTER_SET:
- case RTE_ETH_FILTER_STATS:
- case RTE_ETH_FILTER_OP_MAX:
- DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
- return -ENOTSUP;
- }
- ntuple = (struct rte_eth_ntuple_filter *)arg;
- /* Internally convert ntuple to fdir entry */
- memset(&fdir_entry, 0, sizeof(fdir_entry));
- if (ntuple->proto == IPPROTO_TCP) {
- fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
- tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
- tcpv4_flow->ip.src_ip = ntuple->src_ip;
- tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
- tcpv4_flow->ip.proto = IPPROTO_TCP;
- tcpv4_flow->src_port = ntuple->src_port;
- tcpv4_flow->dst_port = ntuple->dst_port;
- } else {
- fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
- udpv4_flow = &fdir_entry.input.flow.udp4_flow;
- udpv4_flow->ip.src_ip = ntuple->src_ip;
- udpv4_flow->ip.dst_ip = ntuple->dst_ip;
- udpv4_flow->ip.proto = IPPROTO_TCP;
- udpv4_flow->src_port = ntuple->src_port;
- udpv4_flow->dst_port = ntuple->dst_port;
- }
-
- fdir_entry.action.rx_queue = ntuple->queue;
-
- return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
-}
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
new file mode 100644
index 0000000..b8460a0
--- /dev/null
+++ b/drivers/net/qede/qede_filter.c
@@ -0,0 +1,1147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_errno.h>
+
+#include "qede_ethdev.h"
+
+/* VXLAN tunnel classification mapping */
+const struct _qede_udp_tunn_types {
+ uint16_t rte_filter_type;
+ enum ecore_filter_ucast_type qede_type;
+ enum ecore_tunn_clss qede_tunn_clss;
+ const char *string;
+} qede_tunn_types[] = {
+ {
+ ETH_TUNNEL_FILTER_OMAC,
+ ECORE_FILTER_MAC,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ "outer-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_VNI,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_VLAN,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "outer-mac and vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ "vni and inner-mac",
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "vni and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_OIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-IP"
+ },
+ {
+ ETH_TUNNEL_FILTER_IIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "inner-IP"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "OMAC_TENID_IMAC"
+ },
+};
+
+#define IP_VERSION (0x40)
+#define IP_HDRLEN (0x5)
+#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
+#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
+#define QEDE_FDIR_IPV4_DEF_TTL (64)
+
+/* Sum of length of header types of L2, L3, L4.
+ * L2 : ether_hdr + vlan_hdr + vxlan_hdr
+ * L3 : ipv6_hdr
+ * L4 : tcp_hdr
+ */
+#define QEDE_MAX_FDIR_PKT_LEN (86)
+
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN (16)
+#endif
+
+static inline bool qede_valid_flow(uint16_t flow_type)
+{
+ return ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
+ (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
+ (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
+ (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
+}
+
+/* Note: Flowdir support is only partial.
+ * For ex: drop_queue, FDIR masks, flex_conf are not supported.
+ * Parameters like pballoc/status fields are irrelevant here.
+ */
+int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
+
+ /* check FDIR modes */
+ switch (fdir->mode) {
+ case RTE_FDIR_MODE_NONE:
+ qdev->fdir_info.arfs.arfs_enable = false;
+ DP_INFO(edev, "flowdir is disabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT:
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ qdev->fdir_info.arfs.arfs_enable = false;
+ return -ENOTSUP;
+ }
+ qdev->fdir_info.arfs.arfs_enable = true;
+ DP_INFO(edev, "flowdir is enabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT_TUNNEL:
+ case RTE_FDIR_MODE_SIGNATURE:
+ case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
+ DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct qede_fdir_entry *tmp = NULL;
+
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (tmp) {
+ if (tmp->mz)
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
+ qede_fdir_entry, list);
+ rte_free(tmp);
+ }
+ }
+}
+
+static int
+qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir_filter,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
+ struct qede_fdir_entry *tmp = NULL;
+ struct qede_fdir_entry *fdir = NULL;
+ const struct rte_memzone *mz;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc;
+ uint16_t pkt_len;
+ void *pkt;
+
+ if (add) {
+ if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
+ DP_ERR(edev, "Reached max flowdir filter limit\n");
+ return -EINVAL;
+ }
+ fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!fdir) {
+ DP_ERR(edev, "Did not allocate memory for fdir\n");
+ return -ENOMEM;
+ }
+ }
+ /* soft_id could have been used as memzone string, but soft_id is
+ * not currently used so it has no significance.
+ */
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
+ SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+ if (!mz) {
+ DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
+ rte_strerror(rte_errno));
+ rc = -rte_errno;
+ goto err1;
+ }
+
+ pkt = mz->addr;
+ memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
+ pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
+ &qdev->fdir_info.arfs);
+ if (pkt_len == 0) {
+ rc = -EINVAL;
+ goto err2;
+ }
+ DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
+ DP_INFO(edev, "flowdir filter exist\n");
+ rc = 0;
+ goto err2;
+ }
+ }
+ } else {
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
+ break;
+ }
+ if (!tmp) {
+ DP_ERR(edev, "flowdir filter does not exist\n");
+ rc = -EEXIST;
+ goto err2;
+ }
+ }
+ p_hwfn = ECORE_LEADING_HWFN(edev);
+ if (add) {
+ if (!qdev->fdir_info.arfs.arfs_enable) {
+ /* Force update */
+ eth_dev->data->dev_conf.fdir_conf.mode =
+ RTE_FDIR_MODE_PERFECT;
+ qdev->fdir_info.arfs.arfs_enable = true;
+ DP_INFO(edev, "Force enable flowdir in perfect mode\n");
+ }
+ /* Enable ARFS searcher with updated flow_types */
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->fdir_info.arfs);
+ }
+ /* configure filter with ECORE_SPQ_MODE_EBLOCK */
+ rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
+ (dma_addr_t)mz->iova,
+ pkt_len,
+ fdir_filter->action.rx_queue,
+ 0, add);
+ if (rc == ECORE_SUCCESS) {
+ if (add) {
+ fdir->rx_queue = fdir_filter->action.rx_queue;
+ fdir->pkt_len = pkt_len;
+ fdir->mz = mz;
+ SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
+ fdir, list);
+ qdev->fdir_info.filter_count++;
+ DP_INFO(edev, "flowdir filter added, count = %d\n",
+ qdev->fdir_info.filter_count);
+ } else {
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
+ qede_fdir_entry, list);
+ rte_free(tmp); /* the node deleted */
+ rte_memzone_free(mz); /* temp node allocated */
+ qdev->fdir_info.filter_count--;
+ DP_INFO(edev, "Fdir filter deleted, count = %d\n",
+ qdev->fdir_info.filter_count);
+ }
+ } else {
+ DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
+ rc, qdev->fdir_info.filter_count);
+ }
+
+ /* Disable ARFS searcher if there are no more filters */
+ if (qdev->fdir_info.filter_count == 0) {
+ memset(&qdev->fdir_info.arfs, 0,
+ sizeof(struct ecore_arfs_config_params));
+ DP_INFO(edev, "Disabling flowdir\n");
+ qdev->fdir_info.arfs.arfs_enable = false;
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->fdir_info.arfs);
+ }
+ return 0;
+
+err2:
+ rte_memzone_free(mz);
+err1:
+ if (add)
+ rte_free(fdir);
+ return rc;
+}
+
+static int
+qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ if (!qede_valid_flow(fdir->input.flow_type)) {
+ DP_ERR(edev, "invalid flow_type input\n");
+ return -EINVAL;
+ }
+
+ if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
+ DP_ERR(edev, "invalid queue number %u\n",
+ fdir->action.rx_queue);
+ return -EINVAL;
+ }
+
+ if (fdir->input.flow_ext.is_vf) {
+ DP_ERR(edev, "flowdir is not supported over VF\n");
+ return -EINVAL;
+ }
+
+ return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
+}
+
+/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
+uint16_t
+qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ void *buff,
+ struct ecore_arfs_config_params *params)
+
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint16_t *ether_type;
+ uint8_t *raw_pkt;
+ struct rte_eth_fdir_input *input;
+ static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ uint16_t len;
+ static const uint8_t next_proto[] = {
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+ };
+ raw_pkt = (uint8_t *)buff;
+ input = &fdir->input;
+ DP_INFO(edev, "flow_type %d\n", input->flow_type);
+
+ len = 2 * sizeof(struct ether_addr);
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ if (input->flow_ext.vlan_tci) {
+ DP_INFO(edev, "adding VLAN header\n");
+ rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+ rte_memcpy(raw_pkt + sizeof(uint16_t),
+ &input->flow_ext.vlan_tci,
+ sizeof(uint16_t));
+ raw_pkt += sizeof(vlan_frame);
+ len += sizeof(vlan_frame);
+ }
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+ switch (input->flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ /* fill the common ip header */
+ ip = (struct ipv4_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
+ ip->total_length = sizeof(struct ipv4_hdr);
+ ip->next_proto_id = input->flow.ip4_flow.proto ?
+ input->flow.ip4_flow.proto :
+ next_proto[input->flow_type];
+ ip->time_to_live = input->flow.ip4_flow.ttl ?
+ input->flow.ip4_flow.ttl :
+ QEDE_FDIR_IPV4_DEF_TTL;
+ ip->type_of_service = input->flow.ip4_flow.tos;
+ ip->dst_addr = input->flow.ip4_flow.dst_ip;
+ ip->src_addr = input->flow.ip4_flow.src_ip;
+ len += sizeof(struct ipv4_hdr);
+ params->ipv4 = true;
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->dst_port = input->flow.udp4_flow.dst_port;
+ udp->src_port = input->flow.udp4_flow.src_port;
+ udp->dgram_len = sizeof(struct udp_hdr);
+ len += sizeof(struct udp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = input->flow.tcp4_flow.src_port;
+ tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ip6->proto = input->flow.ipv6_flow.proto ?
+ input->flow.ipv6_flow.proto :
+ next_proto[input->flow_type];
+ rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
+ IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->src_port = input->flow.udp6_flow.dst_port;
+ udp->dst_port = input->flow.udp6_flow.src_port;
+ len += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = input->flow.tcp4_flow.src_port;
+ tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported flow_type %u\n",
+ input->flow_type);
+ return 0;
+ }
+
+ return len;
+}
+
+static int
+qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_fdir_filter *fdir;
+ int ret;
+
+ fdir = (struct rte_eth_fdir_filter *)arg;
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query flowdir support */
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 1);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 0);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_INFO:
+ return -ENOTSUP;
+ break;
+ default:
+ DP_ERR(edev, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_ntuple_filter *ntuple;
+ struct rte_eth_fdir_filter fdir_entry;
+ struct rte_eth_tcpv4_flow *tcpv4_flow;
+ struct rte_eth_udpv4_flow *udpv4_flow;
+ bool add = false;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query fdir support */
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ break;
+ case RTE_ETH_FILTER_INFO:
+ case RTE_ETH_FILTER_GET:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_SET:
+ case RTE_ETH_FILTER_STATS:
+ case RTE_ETH_FILTER_OP_MAX:
+ DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
+ return -ENOTSUP;
+ }
+ ntuple = (struct rte_eth_ntuple_filter *)arg;
+ /* Internally convert ntuple to fdir entry */
+ memset(&fdir_entry, 0, sizeof(fdir_entry));
+ if (ntuple->proto == IPPROTO_TCP) {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
+ tcpv4_flow->ip.src_ip = ntuple->src_ip;
+ tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ tcpv4_flow->ip.proto = IPPROTO_TCP;
+ tcpv4_flow->src_port = ntuple->src_port;
+ tcpv4_flow->dst_port = ntuple->dst_port;
+ } else {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ udpv4_flow = &fdir_entry.input.flow.udp4_flow;
+ udpv4_flow->ip.src_ip = ntuple->src_ip;
+ udpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ udpv4_flow->ip.proto = IPPROTO_TCP;
+ udpv4_flow->src_port = ntuple->src_port;
+ udpv4_flow->dst_port = ntuple->dst_port;
+ }
+
+ fdir_entry.action.rx_queue = ntuple->queue;
+
+ return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
+}
+
+static int
+qede_tunnel_update(struct qede_dev *qdev,
+ struct ecore_tunnel_info *tunn_info)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (IS_PF(edev)) {
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+ } else {
+ p_ptt = NULL;
+ }
+
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
+ tunn_info, ECORE_SPQ_MODE_CB, NULL);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+static int
+qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ if (qdev->vxlan.enable == enable)
+ return ECORE_SUCCESS;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.vxlan.b_update_mode = true;
+ tunn.vxlan.b_mode_enabled = enable;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+ tunn.vxlan.tun_cls = clss;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->vxlan.enable = enable;
+ qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
+ DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
+ enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ }
+
+ return rc;
+}
+
+static int
+qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.l2_geneve.b_update_mode = true;
+ tunn.l2_geneve.b_mode_enabled = enable;
+ tunn.ip_geneve.b_update_mode = true;
+ tunn.ip_geneve.b_mode_enabled = enable;
+ tunn.l2_geneve.tun_cls = clss;
+ tunn.ip_geneve.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->geneve.enable = enable;
+ qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
+ DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
+ enable ? "enabled" : "disabled", qdev->geneve.udp_port);
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
+static int
+qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.ip_gre.b_update_mode = true;
+ tunn.ip_gre.b_mode_enabled = enable;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->ipgre.enable = enable;
+ DP_INFO(edev, "IPGRE is %s\n",
+ enable ? "enabled" : "disabled");
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
+int
+qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ uint16_t udp_port;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+
+ switch (tunnel_udp->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
+ }
+ udp_port = 0;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+
+ qdev->vxlan.udp_port = udp_port;
+ /* If the request is to delete UDP port and if the number of
+ * VXLAN filters have reached 0 then VxLAN offload can be be
+ * disabled.
+ */
+ if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
+ return qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false);
+
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
+ }
+
+ udp_port = 0;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+
+ qdev->vxlan.udp_port = udp_port;
+ /* If the request is to delete UDP port and if the number of
+ * GENEVE filters have reached 0 then GENEVE offload can be be
+ * disabled.
+ */
+ if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
+ return qede_geneve_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false);
+
+ break;
+
+ default:
+ return ECORE_INVAL;
+ }
+
+ return 0;
+}
+
+int
+qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ uint16_t udp_port;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+
+ switch (tunnel_udp->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u for VXLAN was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+
+ /* Enable VxLAN tunnel with default MAC/VLAN classification if
+ * it was not enabled while adding VXLAN filter before UDP port
+ * update.
+ */
+ if (!qdev->vxlan.enable) {
+ rc = qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable VXLAN "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
+ udp_port);
+ return rc;
+ }
+
+ DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
+
+ qdev->vxlan.udp_port = udp_port;
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u for GENEVE was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+
+ /* Enable GENEVE tunnel with default MAC/VLAN classification if
+ * it was not enabled while adding GENEVE filter before UDP port
+ * update.
+ */
+ if (!qdev->geneve.enable) {
+ rc = qede_geneve_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable GENEVE "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
+ udp_port);
+ return rc;
+ }
+
+ DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
+
+ qdev->geneve.udp_port = udp_port;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return 0;
+}
+
+static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
+ uint32_t *clss, char *str)
+{
+ uint16_t j;
+ *clss = MAX_ECORE_TUNN_CLSS;
+
+ for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
+ if (filter == qede_tunn_types[j].rte_filter_type) {
+ *type = qede_tunn_types[j].qede_type;
+ *clss = qede_tunn_types[j].qede_tunn_clss;
+ strcpy(str, qede_tunn_types[j].string);
+ return;
+ }
+ }
+}
+
+static int
+qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ uint32_t type)
+{
+ /* Init commmon ucast params first */
+ qede_set_ucast_cmn_params(ucast);
+
+ /* Copy out the required fields based on classification type */
+ ucast->type = type;
+
+ switch (type) {
+ case ECORE_FILTER_VNI:
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_VLAN:
+ ucast->vlan = conf->inner_vlan;
+ break;
+ case ECORE_FILTER_MAC:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_INNER_MAC:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vlan = conf->inner_vlan;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int
+_qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ __attribute__((unused)) enum rte_filter_op filter_op,
+ enum ecore_tunn_clss *clss,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_ucast ucast = {0};
+ enum ecore_filter_ucast_type type;
+ uint16_t filter_type = 0;
+ char str[80];
+ int rc;
+
+ filter_type = conf->filter_type;
+ /* Determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, clss, str);
+ if (*clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Unsupported filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+
+ ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
+
+ /* Skip MAC/VLAN if filter is based on VNI */
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, add);
+ if (rc == 0 && add) {
+ /* Enable accept anyvlan */
+ qede_config_accept_any_vlan(qdev, true);
+ }
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, add);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+
+ return rc;
+}
+
+static int
+qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ enum rte_eth_tunnel_type tunn_type, bool enable)
+{
+ int rc = -EINVAL;
+
+ switch (tunn_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ rc = qede_vxlan_enable(eth_dev, clss, enable);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ rc = qede_geneve_enable(eth_dev, clss, enable);
+ break;
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ rc = qede_ipgre_enable(eth_dev, clss, enable);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ const struct rte_eth_tunnel_filter_conf *conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
+ bool add;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ add = false;
+ break;
+ default:
+ DP_ERR(edev, "Unsupported operation %d\n", filter_op);
+ return -EINVAL;
+ }
+
+ if (IS_VF(edev))
+ return qede_tunn_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ conf->tunnel_type, add);
+
+ rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (add) {
+ if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
+ qdev->vxlan.num_filters++;
+ qdev->vxlan.filter_type = conf->filter_type;
+ } else { /* GENEVE */
+ qdev->geneve.num_filters++;
+ qdev->geneve.filter_type = conf->filter_type;
+ }
+
+ if (!qdev->vxlan.enable || !qdev->geneve.enable ||
+ !qdev->ipgre.enable)
+ return qede_tunn_enable(eth_dev, clss,
+ conf->tunnel_type,
+ true);
+ } else {
+ if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
+ qdev->vxlan.num_filters--;
+ else /*GENEVE*/
+ qdev->geneve.num_filters--;
+
+ /* Disable VXLAN if VXLAN filters become 0 */
+ if (qdev->vxlan.num_filters == 0 ||
+ qdev->geneve.num_filters == 0)
+ return qede_tunn_enable(eth_dev, clss,
+ conf->tunnel_type,
+ false);
+ }
+
+ return 0;
+}
+
+int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_tunnel_filter_conf *filter_conf =
+ (struct rte_eth_tunnel_filter_conf *)arg;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_TUNNEL:
+ switch (filter_conf->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ DP_INFO(edev,
+ "Packet steering to the specified Rx queue"
+ " is not supported with UDP tunneling");
+ return(qede_tunn_filter_config(eth_dev, filter_op,
+ filter_conf));
+ case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_TUNNEL_TYPE_NVGRE:
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ DP_ERR(edev, "Unsupported tunnel type %d\n",
+ filter_conf->tunnel_type);
+ return -EINVAL;
+ case RTE_TUNNEL_TYPE_NONE:
+ default:
+ return 0;
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ return qede_fdir_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_NTUPLE:
+ return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_MACVLAN:
+ case RTE_ETH_FILTER_ETHERTYPE:
+ case RTE_ETH_FILTER_FLEXIBLE:
+ case RTE_ETH_FILTER_SYN:
+ case RTE_ETH_FILTER_HASH:
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ case RTE_ETH_FILTER_MAX:
+ default:
+ DP_ERR(edev, "Unsupported filter type %d\n",
+ filter_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* RTE_FLOW */
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 11/17] net/qede: fix flow director bug for IPv6 filter
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (9 preceding siblings ...)
2018-09-08 20:30 ` [dpdk-dev] [PATCH 10/17] net/qede: reorganize filter code Rasesh Mody
@ 2018-09-08 20:31 ` Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 12/17] net/qede: refactor fdir code into generic aRFS Rasesh Mody
` (7 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:31 UTC (permalink / raw)
To: dev; +Cc: Shahed Shaikh, ferruh.yigit, Dept-EngDPDKDev, stable
From: Shahed Shaikh <shahed.shaikh@cavium.com>
- PMD does not fill vtc_flow field of IPv6 header while
constructing a packet for IPv6 filter. Hence filter was
not getting applied properly.
- IPv6 addresses got swapped while copying src and dst addresses.
- Same issue with UDP and TCP port ids.
Fixes: 622075356e8f ("net/qede: support ntuple and flow director filter")
Cc: stable@dpdk.org
Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
---
drivers/net/qede/qede_filter.c | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index b8460a0..4b709e6 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -121,7 +121,7 @@
#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
#define QEDE_FDIR_IPV4_DEF_TTL (64)
-
+#define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW (0x60000000)
/* Sum of length of header types of L2, L3, L4.
* L2 : ether_hdr + vlan_hdr + vxlan_hdr
* L3 : ipv6_hdr
@@ -445,24 +445,28 @@ void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
ip6->proto = input->flow.ipv6_flow.proto ?
input->flow.ipv6_flow.proto :
next_proto[input->flow_type];
- rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
+
+ rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.src_ip,
IPV6_ADDR_LEN);
- rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
+ rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.dst_ip,
IPV6_ADDR_LEN);
len += sizeof(struct ipv6_hdr);
+ params->ipv6 = true;
raw_pkt = (uint8_t *)buff;
/* UDP */
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
udp = (struct udp_hdr *)(raw_pkt + len);
- udp->src_port = input->flow.udp6_flow.dst_port;
- udp->dst_port = input->flow.udp6_flow.src_port;
+ udp->src_port = input->flow.udp6_flow.src_port;
+ udp->dst_port = input->flow.udp6_flow.dst_port;
len += sizeof(struct udp_hdr);
params->udp = true;
} else { /* TCP */
tcp = (struct tcp_hdr *)(raw_pkt + len);
- tcp->src_port = input->flow.tcp4_flow.src_port;
- tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->src_port = input->flow.tcp6_flow.src_port;
+ tcp->dst_port = input->flow.tcp6_flow.dst_port;
tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
len += sizeof(struct tcp_hdr);
params->tcp = true;
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 12/17] net/qede: refactor fdir code into generic aRFS
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (10 preceding siblings ...)
2018-09-08 20:31 ` [dpdk-dev] [PATCH 11/17] net/qede: fix flow director bug for IPv6 filter Rasesh Mody
@ 2018-09-08 20:31 ` Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 13/17] net/qede: add support for generic flow API Rasesh Mody
` (6 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:31 UTC (permalink / raw)
To: dev; +Cc: Shahed Shaikh, ferruh.yigit, Dept-EngDPDKDev
From: Shahed Shaikh <shahed.shaikh@cavium.com>
- In order to prepare the base for RTE FLOW support,
convert common code used for flow director support
into common aRFS code.
Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
---
drivers/net/qede/base/ecore_l2.c | 26 ++-
drivers/net/qede/base/ecore_l2_api.h | 11 +-
drivers/net/qede/qede_ethdev.c | 2 +-
drivers/net/qede/qede_ethdev.h | 35 +++-
drivers/net/qede/qede_filter.c | 291 +++++++++++++++++++++-------------
5 files changed, 240 insertions(+), 125 deletions(-)
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index d71f461..ca4d901 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -2084,6 +2084,24 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
}
}
+static enum gft_profile_type
+ecore_arfs_mode_to_hsi(enum ecore_filter_config_mode mode)
+{
+ if (mode == ECORE_FILTER_CONFIG_MODE_5_TUPLE)
+ return GFT_PROFILE_TYPE_4_TUPLE;
+
+ if (mode == ECORE_FILTER_CONFIG_MODE_IP_DEST)
+ return GFT_PROFILE_TYPE_IP_DST_ADDR;
+
+ if (mode == ECORE_FILTER_CONFIG_MODE_TUNN_TYPE)
+ return GFT_PROFILE_TYPE_TUNNEL_TYPE;
+
+ if (mode == ECORE_FILTER_CONFIG_MODE_IP_SRC)
+ return GFT_PROFILE_TYPE_IP_SRC_ADDR;
+
+ return GFT_PROFILE_TYPE_L4_DST_PORT;
+}
+
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params)
@@ -2091,13 +2109,13 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
return;
- if (p_cfg_params->arfs_enable) {
+ if (p_cfg_params->mode != ECORE_FILTER_CONFIG_MODE_DISABLE) {
ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
p_cfg_params->tcp,
p_cfg_params->udp,
p_cfg_params->ipv4,
p_cfg_params->ipv6,
- GFT_PROFILE_TYPE_4_TUPLE);
+ ecore_arfs_mode_to_hsi(p_cfg_params->mode));
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
p_cfg_params->tcp ? "Enable" : "Disable",
@@ -2107,8 +2125,8 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
} else {
ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
- p_cfg_params->arfs_enable ? "Enable" : "Disable");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %d\n",
+ (int)p_cfg_params->mode);
}
enum _ecore_status_t
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index 575b9e3..85034e6 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -139,12 +139,21 @@ struct ecore_filter_accept_flags {
#define ECORE_ACCEPT_BCAST 0x20
};
+enum ecore_filter_config_mode {
+ ECORE_FILTER_CONFIG_MODE_DISABLE,
+ ECORE_FILTER_CONFIG_MODE_5_TUPLE,
+ ECORE_FILTER_CONFIG_MODE_L4_PORT,
+ ECORE_FILTER_CONFIG_MODE_IP_DEST,
+ ECORE_FILTER_CONFIG_MODE_TUNN_TYPE,
+ ECORE_FILTER_CONFIG_MODE_IP_SRC,
+};
+
struct ecore_arfs_config_params {
bool tcp;
bool udp;
bool ipv4;
bool ipv6;
- bool arfs_enable; /* Enable or disable arfs mode */
+ enum ecore_filter_config_mode mode;
};
/* Add / remove / move / remove-all unicast MAC-VLAN filters.
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 3a7c466..d5e162c 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -2576,7 +2576,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
- SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+ SLIST_INIT(&adapter->arfs_info.arfs_list_head);
SLIST_INIT(&adapter->vlan_list_head);
SLIST_INIT(&adapter->uc_list_head);
SLIST_INIT(&adapter->mc_list_head);
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index d54f19b..59828f8 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -151,18 +151,43 @@ struct qede_ucast_entry {
SLIST_ENTRY(qede_ucast_entry) list;
};
-struct qede_fdir_entry {
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN (16)
+#endif
+
+struct qede_arfs_tuple {
+ union {
+ uint32_t src_ipv4;
+ uint8_t src_ipv6[IPV6_ADDR_LEN];
+ };
+
+ union {
+ uint32_t dst_ipv4;
+ uint8_t dst_ipv6[IPV6_ADDR_LEN];
+ };
+
+ uint16_t src_port;
+ uint16_t dst_port;
+ uint16_t eth_proto;
+ uint8_t ip_proto;
+
+ /* Describe filtering mode needed for this kind of filter */
+ enum ecore_filter_config_mode mode;
+};
+
+struct qede_arfs_entry {
uint32_t soft_id; /* unused for now */
uint16_t pkt_len; /* actual packet length to match */
uint16_t rx_queue; /* queue to be steered to */
const struct rte_memzone *mz; /* mz used to hold L2 frame */
- SLIST_ENTRY(qede_fdir_entry) list;
+ struct qede_arfs_tuple tuple;
+ SLIST_ENTRY(qede_arfs_entry) list;
};
-struct qede_fdir_info {
+struct qede_arfs_info {
struct ecore_arfs_config_params arfs;
uint16_t filter_count;
- SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
+ SLIST_HEAD(arfs_list_head, qede_arfs_entry)arfs_list_head;
};
/* IANA assigned default UDP ports for encapsulation protocols */
@@ -207,7 +232,7 @@ struct qede_dev {
struct qede_tunn_params vxlan;
struct qede_tunn_params geneve;
struct qede_tunn_params ipgre;
- struct qede_fdir_info fdir_info;
+ struct qede_arfs_info arfs_info;
bool vlan_strip_flg;
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
bool vport_started;
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index 4b709e6..bdf2885 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -129,10 +129,6 @@
*/
#define QEDE_MAX_FDIR_PKT_LEN (86)
-#ifndef IPV6_ADDR_LEN
-#define IPV6_ADDR_LEN (16)
-#endif
-
static inline bool qede_valid_flow(uint16_t flow_type)
{
return ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
@@ -141,6 +137,12 @@ static inline bool qede_valid_flow(uint16_t flow_type)
(flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
}
+static uint16_t
+qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct qede_arfs_entry *arfs,
+ void *buff,
+ struct ecore_arfs_config_params *params);
+
/* Note: Flowdir support is only partial.
* For ex: drop_queue, FDIR masks, flex_conf are not supported.
* Parameters like pballoc/status fields are irrelevant here.
@@ -154,17 +156,19 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
/* check FDIR modes */
switch (fdir->mode) {
case RTE_FDIR_MODE_NONE:
- qdev->fdir_info.arfs.arfs_enable = false;
+ qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
DP_INFO(edev, "flowdir is disabled\n");
break;
case RTE_FDIR_MODE_PERFECT:
if (ECORE_IS_CMT(edev)) {
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
- qdev->fdir_info.arfs.arfs_enable = false;
+ qdev->arfs_info.arfs.mode =
+ ECORE_FILTER_CONFIG_MODE_DISABLE;
return -ENOTSUP;
}
- qdev->fdir_info.arfs.arfs_enable = true;
- DP_INFO(edev, "flowdir is enabled\n");
+ qdev->arfs_info.arfs.mode =
+ ECORE_FILTER_CONFIG_MODE_5_TUPLE;
+ DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
break;
case RTE_FDIR_MODE_PERFECT_TUNNEL:
case RTE_FDIR_MODE_SIGNATURE:
@@ -179,29 +183,96 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct qede_fdir_entry *tmp = NULL;
+ struct qede_arfs_entry *tmp = NULL;
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
if (tmp) {
if (tmp->mz)
rte_memzone_free(tmp->mz);
- SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
- qede_fdir_entry, list);
+ SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
+ qede_arfs_entry, list);
rte_free(tmp);
}
}
}
static int
-qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir_filter *fdir_filter,
- bool add)
+qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ struct qede_arfs_entry *arfs)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_fdir_input *input;
+
+ static const uint8_t next_proto[] = {
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+ };
+
+ input = &fdir->input;
+
+ DP_INFO(edev, "flow_type %d\n", input->flow_type);
+
+ switch (input->flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ /* fill the common ip header */
+ arfs->tuple.eth_proto = ETHER_TYPE_IPv4;
+ arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
+ arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
+ arfs->tuple.ip_proto = next_proto[input->flow_type];
+
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
+ arfs->tuple.src_port = input->flow.udp4_flow.src_port;
+ } else { /* TCP */
+ arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
+ arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
+ }
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ arfs->tuple.eth_proto = ETHER_TYPE_IPv6;
+ arfs->tuple.ip_proto = next_proto[input->flow_type];
+ rte_memcpy(arfs->tuple.dst_ipv6,
+ &input->flow.ipv6_flow.dst_ip,
+ IPV6_ADDR_LEN);
+ rte_memcpy(arfs->tuple.src_ipv6,
+ &input->flow.ipv6_flow.src_ip,
+ IPV6_ADDR_LEN);
+
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
+ arfs->tuple.src_port = input->flow.udp6_flow.src_port;
+ } else { /* TCP */
+ arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
+ arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported flow_type %u\n",
+ input->flow_type);
+ return -ENOTSUP;
+ }
+
+ arfs->rx_queue = fdir->action.rx_queue;
+ return 0;
+}
+
+static int
+qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
+ struct qede_arfs_entry *arfs,
+ bool add)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
- struct qede_fdir_entry *tmp = NULL;
- struct qede_fdir_entry *fdir = NULL;
+ struct qede_arfs_entry *tmp = NULL;
const struct rte_memzone *mz;
struct ecore_hwfn *p_hwfn;
enum _ecore_status_t rc;
@@ -209,17 +280,12 @@ void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
void *pkt;
if (add) {
- if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
+ if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
DP_ERR(edev, "Reached max flowdir filter limit\n");
return -EINVAL;
}
- fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
- RTE_CACHE_LINE_SIZE);
- if (!fdir) {
- DP_ERR(edev, "Did not allocate memory for fdir\n");
- return -ENOMEM;
- }
}
+
/* soft_id could have been used as memzone string, but soft_id is
* not currently used so it has no significance.
*/
@@ -230,98 +296,124 @@ void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
if (!mz) {
DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
rte_strerror(rte_errno));
- rc = -rte_errno;
- goto err1;
+ return -rte_errno;
}
pkt = mz->addr;
memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
- pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
- &qdev->fdir_info.arfs);
+ pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
+ &qdev->arfs_info.arfs);
if (pkt_len == 0) {
rc = -EINVAL;
- goto err2;
+ goto err1;
}
+
DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
if (add) {
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
DP_INFO(edev, "flowdir filter exist\n");
- rc = 0;
- goto err2;
+ rc = -EEXIST;
+ goto err1;
}
}
} else {
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
break;
}
if (!tmp) {
DP_ERR(edev, "flowdir filter does not exist\n");
rc = -EEXIST;
- goto err2;
+ goto err1;
}
}
p_hwfn = ECORE_LEADING_HWFN(edev);
if (add) {
- if (!qdev->fdir_info.arfs.arfs_enable) {
+ if (qdev->arfs_info.arfs.mode ==
+ ECORE_FILTER_CONFIG_MODE_DISABLE) {
/* Force update */
eth_dev->data->dev_conf.fdir_conf.mode =
RTE_FDIR_MODE_PERFECT;
- qdev->fdir_info.arfs.arfs_enable = true;
+ qdev->arfs_info.arfs.mode =
+ ECORE_FILTER_CONFIG_MODE_5_TUPLE;
DP_INFO(edev, "Force enable flowdir in perfect mode\n");
}
/* Enable ARFS searcher with updated flow_types */
ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
- &qdev->fdir_info.arfs);
+ &qdev->arfs_info.arfs);
}
/* configure filter with ECORE_SPQ_MODE_EBLOCK */
rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
(dma_addr_t)mz->iova,
pkt_len,
- fdir_filter->action.rx_queue,
+ arfs->rx_queue,
0, add);
if (rc == ECORE_SUCCESS) {
if (add) {
- fdir->rx_queue = fdir_filter->action.rx_queue;
- fdir->pkt_len = pkt_len;
- fdir->mz = mz;
- SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
- fdir, list);
- qdev->fdir_info.filter_count++;
+ arfs->pkt_len = pkt_len;
+ arfs->mz = mz;
+ SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
+ arfs, list);
+ qdev->arfs_info.filter_count++;
DP_INFO(edev, "flowdir filter added, count = %d\n",
- qdev->fdir_info.filter_count);
+ qdev->arfs_info.filter_count);
} else {
rte_memzone_free(tmp->mz);
- SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
- qede_fdir_entry, list);
+ SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
+ qede_arfs_entry, list);
rte_free(tmp); /* the node deleted */
rte_memzone_free(mz); /* temp node allocated */
- qdev->fdir_info.filter_count--;
+ qdev->arfs_info.filter_count--;
DP_INFO(edev, "Fdir filter deleted, count = %d\n",
- qdev->fdir_info.filter_count);
+ qdev->arfs_info.filter_count);
}
} else {
DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
- rc, qdev->fdir_info.filter_count);
+ rc, qdev->arfs_info.filter_count);
}
/* Disable ARFS searcher if there are no more filters */
- if (qdev->fdir_info.filter_count == 0) {
- memset(&qdev->fdir_info.arfs, 0,
+ if (qdev->arfs_info.filter_count == 0) {
+ memset(&qdev->arfs_info.arfs, 0,
sizeof(struct ecore_arfs_config_params));
DP_INFO(edev, "Disabling flowdir\n");
- qdev->fdir_info.arfs.arfs_enable = false;
+ qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
- &qdev->fdir_info.arfs);
+ &qdev->arfs_info.arfs);
}
return 0;
-err2:
- rte_memzone_free(mz);
err1:
- if (add)
- rte_free(fdir);
+ rte_memzone_free(mz);
+ return rc;
+}
+
+static int
+qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir_filter,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_arfs_entry *arfs = NULL;
+ int rc = 0;
+
+ arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!arfs) {
+ DP_ERR(edev, "Did not allocate memory for arfs\n");
+ return -ENOMEM;
+ }
+
+ rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
+ if (rc < 0)
+ return rc;
+
+ rc = qede_config_arfs_filter(eth_dev, arfs, add);
+ if (rc < 0)
+ rte_free(arfs);
+
return rc;
}
@@ -353,9 +445,9 @@ void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
}
/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
-uint16_t
-qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir_filter *fdir,
+static uint16_t
+qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct qede_arfs_entry *arfs,
void *buff,
struct ecore_arfs_config_params *params)
@@ -364,64 +456,39 @@ void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
uint16_t *ether_type;
uint8_t *raw_pkt;
- struct rte_eth_fdir_input *input;
- static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
struct ipv4_hdr *ip;
struct ipv6_hdr *ip6;
struct udp_hdr *udp;
struct tcp_hdr *tcp;
uint16_t len;
- static const uint8_t next_proto[] = {
- [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
- [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
- [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
- [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
- };
+
raw_pkt = (uint8_t *)buff;
- input = &fdir->input;
- DP_INFO(edev, "flow_type %d\n", input->flow_type);
len = 2 * sizeof(struct ether_addr);
raw_pkt += 2 * sizeof(struct ether_addr);
- if (input->flow_ext.vlan_tci) {
- DP_INFO(edev, "adding VLAN header\n");
- rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
- rte_memcpy(raw_pkt + sizeof(uint16_t),
- &input->flow_ext.vlan_tci,
- sizeof(uint16_t));
- raw_pkt += sizeof(vlan_frame);
- len += sizeof(vlan_frame);
- }
ether_type = (uint16_t *)raw_pkt;
raw_pkt += sizeof(uint16_t);
len += sizeof(uint16_t);
- switch (input->flow_type) {
- case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
- /* fill the common ip header */
+ *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
+ switch (arfs->tuple.eth_proto) {
+ case ETHER_TYPE_IPv4:
ip = (struct ipv4_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
ip->total_length = sizeof(struct ipv4_hdr);
- ip->next_proto_id = input->flow.ip4_flow.proto ?
- input->flow.ip4_flow.proto :
- next_proto[input->flow_type];
- ip->time_to_live = input->flow.ip4_flow.ttl ?
- input->flow.ip4_flow.ttl :
- QEDE_FDIR_IPV4_DEF_TTL;
- ip->type_of_service = input->flow.ip4_flow.tos;
- ip->dst_addr = input->flow.ip4_flow.dst_ip;
- ip->src_addr = input->flow.ip4_flow.src_ip;
+ ip->next_proto_id = arfs->tuple.ip_proto;
+ ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
+ ip->dst_addr = arfs->tuple.dst_ipv4;
+ ip->src_addr = arfs->tuple.src_ipv4;
len += sizeof(struct ipv4_hdr);
params->ipv4 = true;
raw_pkt = (uint8_t *)buff;
/* UDP */
- if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ if (arfs->tuple.ip_proto == IPPROTO_UDP) {
udp = (struct udp_hdr *)(raw_pkt + len);
- udp->dst_port = input->flow.udp4_flow.dst_port;
- udp->src_port = input->flow.udp4_flow.src_port;
+ udp->dst_port = arfs->tuple.dst_port;
+ udp->src_port = arfs->tuple.src_port;
udp->dgram_len = sizeof(struct udp_hdr);
len += sizeof(struct udp_hdr);
/* adjust ip total_length */
@@ -429,8 +496,8 @@ void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
params->udp = true;
} else { /* TCP */
tcp = (struct tcp_hdr *)(raw_pkt + len);
- tcp->src_port = input->flow.tcp4_flow.src_port;
- tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->src_port = arfs->tuple.src_port;
+ tcp->dst_port = arfs->tuple.dst_port;
tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
len += sizeof(struct tcp_hdr);
/* adjust ip total_length */
@@ -438,43 +505,39 @@ void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
params->tcp = true;
}
break;
- case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case ETHER_TYPE_IPv6:
ip6 = (struct ipv6_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
- ip6->proto = input->flow.ipv6_flow.proto ?
- input->flow.ipv6_flow.proto :
- next_proto[input->flow_type];
+ ip6->proto = arfs->tuple.ip_proto;
ip6->vtc_flow =
rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
- rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.src_ip,
+ rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
IPV6_ADDR_LEN);
- rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.dst_ip,
+ rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
IPV6_ADDR_LEN);
len += sizeof(struct ipv6_hdr);
params->ipv6 = true;
raw_pkt = (uint8_t *)buff;
/* UDP */
- if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ if (arfs->tuple.ip_proto == IPPROTO_UDP) {
udp = (struct udp_hdr *)(raw_pkt + len);
- udp->src_port = input->flow.udp6_flow.src_port;
- udp->dst_port = input->flow.udp6_flow.dst_port;
+ udp->src_port = arfs->tuple.src_port;
+ udp->dst_port = arfs->tuple.dst_port;
len += sizeof(struct udp_hdr);
params->udp = true;
} else { /* TCP */
tcp = (struct tcp_hdr *)(raw_pkt + len);
- tcp->src_port = input->flow.tcp6_flow.src_port;
- tcp->dst_port = input->flow.tcp6_flow.dst_port;
+ tcp->src_port = arfs->tuple.src_port;
+ tcp->dst_port = arfs->tuple.dst_port;
tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
len += sizeof(struct tcp_hdr);
params->tcp = true;
}
break;
default:
- DP_ERR(edev, "Unsupported flow_type %u\n",
- input->flow_type);
+ DP_ERR(edev, "Unsupported eth_proto %u\n",
+ arfs->tuple.eth_proto);
return 0;
}
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 13/17] net/qede: add support for generic flow API
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (11 preceding siblings ...)
2018-09-08 20:31 ` [dpdk-dev] [PATCH 12/17] net/qede: refactor fdir code into generic aRFS Rasesh Mody
@ 2018-09-08 20:31 ` Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 14/17] net/qede: fix Rx buffer size calculation Rasesh Mody
` (5 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:31 UTC (permalink / raw)
To: dev; +Cc: Shahed Shaikh, ferruh.yigit, Dept-EngDPDKDev
From: Shahed Shaikh <shahed.shaikh@cavium.com>
- Add support for rte_flow_validate(), rte_flow_create() and
rte_flow_destroy() APIs
- This patch adds limited support for the flow items
because of the limited filter profiles supported by HW.
- Only 4 tuples - src and dst IP (v4 or v6) addresses and
src and dst port IDs of TCP or UDP.
- also, only redirect to queue action is supported.
Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
---
drivers/net/qede/qede_ethdev.h | 5 +
drivers/net/qede/qede_filter.c | 334 +++++++++++++++++++++++++++++++++++++++-
2 files changed, 338 insertions(+), 1 deletion(-)
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 59828f8..8a9df98 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -184,6 +184,11 @@ struct qede_arfs_entry {
SLIST_ENTRY(qede_arfs_entry) list;
};
+/* Opaque handle for rte flow managed by PMD */
+struct rte_flow {
+ struct qede_arfs_entry entry;
+};
+
struct qede_arfs_info {
struct ecore_arfs_config_params arfs;
uint16_t filter_count;
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index bdf2885..5e6571c 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -8,6 +8,7 @@
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_errno.h>
+#include <rte_flow_driver.h>
#include "qede_ethdev.h"
@@ -1159,6 +1160,327 @@ static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
return 0;
}
+static int
+qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (attr == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "NULL attribute");
+ return -rte_errno;
+ }
+
+ if (attr->group != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+ "Groups are not supported");
+ return -rte_errno;
+ }
+
+ if (attr->priority != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+ "Priorities are not supported");
+ return -rte_errno;
+ }
+
+ if (attr->egress != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+ "Egress is not supported");
+ return -rte_errno;
+ }
+
+ if (attr->transfer != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+ "Transfer is not supported");
+ return -rte_errno;
+ }
+
+ if (attr->ingress == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+ "Only ingress is supported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+{
+ bool l3 = false, l4 = false;
+
+ if (pattern == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+ "NULL pattern");
+ return -rte_errno;
+ }
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ if (!pattern->spec) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item spec not defined");
+ return -rte_errno;
+ }
+
+ if (pattern->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item last not supported");
+ return -rte_errno;
+ }
+
+ if (pattern->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item mask not supported");
+ return -rte_errno;
+ }
+
+ /* Below validation is only for 4 tuple flow
+ * (GFT_PROFILE_TYPE_4_TUPLE)
+ * - src and dst L3 address (IPv4 or IPv6)
+ * - src and dst L4 port (TCP or UDP)
+ */
+
+ switch (pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = true;
+
+ if (flow) {
+ const struct rte_flow_item_ipv4 *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
+ flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
+ flow->entry.tuple.eth_proto = ETHER_TYPE_IPv4;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = true;
+
+ if (flow) {
+ const struct rte_flow_item_ipv6 *spec;
+
+ spec = pattern->spec;
+ rte_memcpy(flow->entry.tuple.src_ipv6,
+ spec->hdr.src_addr,
+ IPV6_ADDR_LEN);
+ rte_memcpy(flow->entry.tuple.dst_ipv6,
+ spec->hdr.dst_addr,
+ IPV6_ADDR_LEN);
+ flow->entry.tuple.eth_proto = ETHER_TYPE_IPv6;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ l4 = true;
+
+ if (flow) {
+ const struct rte_flow_item_udp *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_port =
+ spec->hdr.src_port;
+ flow->entry.tuple.dst_port =
+ spec->hdr.dst_port;
+ flow->entry.tuple.ip_proto = IPPROTO_UDP;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ l4 = true;
+
+ if (flow) {
+ const struct rte_flow_item_tcp *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_port =
+ spec->hdr.src_port;
+ flow->entry.tuple.dst_port =
+ spec->hdr.dst_port;
+ flow->entry.tuple.ip_proto = IPPROTO_TCP;
+ }
+
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
+ return -rte_errno;
+ }
+ }
+
+ if (!(l3 && l4)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item types need to have both L3 and L4 protocols");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse_actions(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ const struct rte_flow_action_queue *queue;
+
+ if (actions == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+ "NULL actions");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue = actions->conf;
+
+ if (queue->index >= QEDE_RSS_COUNT(qdev)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Bad QUEUE action");
+ return -rte_errno;
+ }
+
+ if (flow)
+ flow->entry.rx_queue = queue->index;
+
+ break;
+
+ default:
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Action is not supported - only ACTION_TYPE_QUEUE supported");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+
+{
+ int rc = 0;
+
+ rc = qede_flow_validate_attr(dev, attr, error);
+ if (rc)
+ return rc;
+
+ /* parse and validate item pattern and actions.
+ * Given item list and actions will be translate to qede PMD
+ * specific arfs structure.
+ */
+ rc = qede_flow_parse_pattern(dev, patterns, error, flow);
+ if (rc)
+ return rc;
+
+ rc = qede_flow_parse_actions(dev, actions, error, flow);
+
+ return rc;
+}
+
+static int
+qede_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
+}
+
+static struct rte_flow *
+qede_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow = NULL;
+ int rc;
+
+ flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to allocate memory");
+ return NULL;
+ }
+
+ rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
+ if (rc < 0) {
+ rte_free(flow);
+ return NULL;
+ }
+
+ rc = qede_config_arfs_filter(dev, &flow->entry, true);
+ if (rc < 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to configure flow filter");
+ rte_free(flow);
+ return NULL;
+ }
+
+ return flow;
+}
+
+static int
+qede_flow_destroy(struct rte_eth_dev *eth_dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc = 0;
+
+ rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
+ if (rc < 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to delete flow filter");
+ rte_free(flow);
+ }
+
+ return rc;
+}
+
+const struct rte_flow_ops qede_flow_ops = {
+ .validate = qede_flow_validate,
+ .create = qede_flow_create,
+ .destroy = qede_flow_destroy,
+};
+
int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
@@ -1195,6 +1517,17 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
return qede_fdir_filter_conf(eth_dev, filter_op, arg);
case RTE_ETH_FILTER_NTUPLE:
return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_GENERIC:
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+
+ *(const void **)arg = &qede_flow_ops;
+ return 0;
case RTE_ETH_FILTER_MACVLAN:
case RTE_ETH_FILTER_ETHERTYPE:
case RTE_ETH_FILTER_FLEXIBLE:
@@ -1211,4 +1544,3 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
return 0;
}
-/* RTE_FLOW */
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 14/17] net/qede: fix Rx buffer size calculation
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (12 preceding siblings ...)
2018-09-08 20:31 ` [dpdk-dev] [PATCH 13/17] net/qede: add support for generic flow API Rasesh Mody
@ 2018-09-08 20:31 ` Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 15/17] net/qede: add support for Rx descriptor status Rasesh Mody
` (4 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:31 UTC (permalink / raw)
To: dev; +Cc: Shahed Shaikh, ferruh.yigit, Dept-EngDPDKDev, stable
From: Shahed Shaikh <shahed.shaikh@cavium.com>
- HW does not include CRC in received frame when passed to host,
so no need to consider CRC length while calculating Rx buffer size.
- In scattered Rx mode, driver may allocate Rx buffer larger than
the size of mbuf because it tries to adjust the buffer size to cache
line size by ceiling it. Fix this by flooring the size instead of
ceiling.
- Consider the rule imposed by HW regarding the minimum size of Rx buffer
in scattered Rx mode -
(MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
Fixes: f6033f2497e7 ("net/qede: fix minimum buffer size and scatter Rx check")
CC: stable@dpdk.org
Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
---
drivers/net/qede/qede_ethdev.c | 28 +++++++++----------
drivers/net/qede/qede_rxtx.c | 59 +++++++++++++++++++++++++++++++++++-----
drivers/net/qede/qede_rxtx.h | 15 ++++++++--
3 files changed, 78 insertions(+), 24 deletions(-)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index d5e162c..259eb45 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -1210,7 +1210,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
eth_dev->data->mtu =
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN;
+ ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
@@ -2226,19 +2226,18 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
struct qede_fastpath *fp;
uint32_t max_rx_pkt_len;
uint32_t frame_size;
- uint16_t rx_buf_size;
uint16_t bufsz;
bool restart = false;
- int i;
+ int i, rc;
PMD_INIT_FUNC_TRACE(edev);
qede_dev_info_get(dev, &dev_info);
- max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
- frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+ max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
+ frame_size = max_rx_pkt_len;
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
- ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
+ QEDE_ETH_OVERHEAD);
return -EINVAL;
}
if (!dev->data->scattered_rx &&
@@ -2266,14 +2265,15 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
if (fp->rxq != NULL) {
bufsz = (uint16_t)rte_pktmbuf_data_room_size(
fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + ETHER_HDR_LEN +
- ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = frame_size;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "RX buffer size %u\n", rx_buf_size);
+ /* cache align the mbuf size to simplfy rx_buf_size
+ * calculation
+ */
+ bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
+ rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
+ if (rc < 0)
+ return rc;
+
+ fp->rxq->rx_buf_size = rc;
}
}
if (max_rx_pkt_len > ETHER_MAX_LEN)
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 0f157de..675c0a0 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -35,6 +35,49 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
return 0;
}
+/* Criterias for calculating Rx buffer size -
+ * 1) rx_buf_size should not exceed the size of mbuf
+ * 2) In scattered_rx mode - minimum rx_buf_size should be
+ * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
+ * 3) In regular mode - minimum rx_buf_size should be
+ * (MTU + Maximum L2 Header Size + 2)
+ * In above cases +2 corrosponds to 2 bytes padding in front of L2
+ * header.
+ * 4) rx_buf_size should be cacheline-size aligned. So considering
+ * criteria 1, we need to adjust the size to floor instead of ceil,
+ * so that we don't exceed mbuf size while ceiling rx_buf_size.
+ */
+int
+qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ uint16_t max_frame_size)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rx_buf_size;
+
+ if (dev->data->scattered_rx) {
+ /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
+ * bufferes can be used for single packet. So need to make sure
+ * mbuf size is sufficient enough for this.
+ */
+ if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
+ (max_frame_size + QEDE_ETH_OVERHEAD)) {
+ DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
+ mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
+ return -EINVAL;
+ }
+
+ rx_buf_size = RTE_MAX(mbufsz,
+ (max_frame_size + QEDE_ETH_OVERHEAD) /
+ ETH_RX_MAX_BUFF_PER_PKT);
+ } else {
+ rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
+ }
+
+ /* Align to cache-line size if needed */
+ return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
+}
+
int
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
@@ -85,6 +128,8 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ /* cache align the mbuf size to simplfy rx_buf_size calculation */
+ bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
(max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
if (!dev->data->scattered_rx) {
@@ -93,13 +138,13 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
}
}
- if (dev->data->scattered_rx)
- rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
- ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
- else
- rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
- /* Align to cache-line size if needed */
- rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
+ rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
+ if (rc < 0) {
+ rte_free(rxq);
+ return rc;
+ }
+
+ rxq->rx_buf_size = rc;
DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index e710fba..8bd8d1c 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -61,9 +61,16 @@
#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
~(QEDE_FW_RX_ALIGN_END - 1))
-/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
-#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
- + (QEDE_LLC_SNAP_HDR_LEN))
+#define QEDE_FLOOR_TO_CACHE_LINE_SIZE(n) RTE_ALIGN_FLOOR(n, \
+ QEDE_FW_RX_ALIGN_END)
+
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional,
+ * +2 is for padding in front of L2 header
+ */
+#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) \
+ + (QEDE_LLC_SNAP_HDR_LEN) + 2)
+
+#define QEDE_MAX_ETHER_HDR_LEN (ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
ETH_RSS_NONFRAG_IPV4_TCP |\
@@ -267,6 +274,8 @@ uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
int qede_start_queues(struct rte_eth_dev *eth_dev);
void qede_stop_queues(struct rte_eth_dev *eth_dev);
+int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ uint16_t max_frame_size);
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 15/17] net/qede: add support for Rx descriptor status
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (13 preceding siblings ...)
2018-09-08 20:31 ` [dpdk-dev] [PATCH 14/17] net/qede: fix Rx buffer size calculation Rasesh Mody
@ 2018-09-08 20:31 ` Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 16/17] net/qede/base: fix MFW FLR flow bug Rasesh Mody
` (3 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:31 UTC (permalink / raw)
To: dev; +Cc: Shahed Shaikh, ferruh.yigit, Dept-EngDPDKDev
From: Shahed Shaikh <shahed.shaikh@cavium.com>
This patch implement eth_dev_ops->rx_descriptor_status
callback.
Walk through receive completion ring to calculate receive
descriptors used by firmware and then provide the status of
offset accordingly.
Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
---
drivers/net/qede/qede_ethdev.c | 2 +
drivers/net/qede/qede_rxtx.c | 81 ++++++++++++++++++++++++++++++++++++++++
drivers/net/qede/qede_rxtx.h | 2 +
3 files changed, 85 insertions(+)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 259eb45..322400c 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -2300,6 +2300,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
+ .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
@@ -2341,6 +2342,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
+ .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 675c0a0..8a4772f 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -2151,3 +2151,84 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
{
return 0;
}
+
+
+/* this function does a fake walk through over completion queue
+ * to calculate number of BDs used by HW.
+ * At the end, it restores the state of completion queue.
+ */
+static uint16_t
+qede_parse_fp_cqe(struct qede_rx_queue *rxq)
+{
+ uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
+ union eth_rx_cqe *cqe, *orig_cqe = NULL;
+
+ hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ if (hw_comp_cons == sw_comp_cons)
+ return 0;
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+ orig_cqe = cqe;
+
+ while (sw_comp_cons != hw_comp_cons) {
+ switch (cqe->fast_path_regular.type) {
+ case ETH_RX_CQE_TYPE_REGULAR:
+ bd_count += cqe->fast_path_regular.bd_num;
+ break;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ bd_count += cqe->fast_path_tpa_end.num_of_bds;
+ break;
+ default:
+ break;
+ }
+
+ cqe =
+ (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+ }
+
+ /* revert comp_ring to original state */
+ ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
+
+ return bd_count;
+}
+
+int
+qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
+{
+ uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
+ uint16_t produced, consumed;
+ struct qede_rx_queue *rxq = p_rxq;
+
+ if (offset > rxq->nb_rx_desc)
+ return -EINVAL;
+
+ sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
+ sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
+
+ /* find BDs used by HW from completion queue elements */
+ hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
+
+ if (hw_bd_cons < sw_bd_cons)
+ /* wraparound case */
+ consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
+ else
+ consumed = hw_bd_cons - sw_bd_cons;
+
+ if (offset <= consumed)
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (sw_bd_prod < sw_bd_cons)
+ /* wraparound case */
+ produced = (0xffff - sw_bd_cons) + sw_bd_prod;
+ else
+ produced = sw_bd_prod - sw_bd_cons;
+
+ if (offset <= produced)
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
+}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index 8bd8d1c..d3a41e9 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -276,6 +276,8 @@ uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
void qede_stop_queues(struct rte_eth_dev *eth_dev);
int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
uint16_t max_frame_size);
+int
+qede_rx_descriptor_status(void *rxq, uint16_t offset);
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 16/17] net/qede/base: fix MFW FLR flow bug
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (14 preceding siblings ...)
2018-09-08 20:31 ` [dpdk-dev] [PATCH 15/17] net/qede: add support for Rx descriptor status Rasesh Mody
@ 2018-09-08 20:31 ` Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 17/17] net/qede: add support for dev reset Rasesh Mody
` (2 subsequent siblings)
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:31 UTC (permalink / raw)
To: dev; +Cc: Shahed Shaikh, ferruh.yigit, Dept-EngDPDKDev, stable
From: Shahed Shaikh <shahed.shaikh@cavium.com>
Management firmware does not properly clean IGU block in PF FLR flow
which may result in undelivered attentions for link events from
default status block.
Add a workaround in PMD to execute extra IGU cleanup right after PF FLR
is done.
Fixes: 9e2f08a4ad5f ("net/qede/base: add request for PF FLR before load request")
Cc: stable@dpdk.org
Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
---
drivers/net/qede/base/ecore_dev.c | 7 +++++++
drivers/net/qede/base/ecore_int.c | 32 ++++++++++++++++++++++++++++++++
drivers/net/qede/base/ecore_int.h | 1 +
drivers/net/qede/base/mcp_public.h | 2 ++
drivers/net/qede/base/reg_addr.h | 15 +++++++++++++++
5 files changed, 57 insertions(+)
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index fdb62f2..d91fe27 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -4272,6 +4272,13 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+
+ /* Workaround for MFW issue where PF FLR does not cleanup
+ * IGU block
+ */
+ if (!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP))
+ ecore_pf_flr_igu_cleanup(p_hwfn);
}
/* Check if mdump logs/data are present and update the epoch value */
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 4c271d3..d41107d 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -2681,3 +2681,35 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+
+void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+ int i;
+
+ /* Do not reorder the following cleanup sequence */
+ /* Ack all attentions */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ACK_BITS, 0xfff);
+
+ /* Clear driver attention */
+ ecore_wr(p_hwfn, p_dpc_ptt,
+ ((p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0), 0);
+
+ /* Clear per-PF IGU registers to restore them as if the IGU
+ * was reset for this PF
+ */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+
+ /* Execute IGU clean up*/
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_FUNCTIONAL_CLEANUP, 1);
+
+ /* Clear Stats */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED, 0);
+
+ for (i = 0; i < IGU_REG_PBA_STS_PF_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PBA_STS_PF + i * 4, 0);
+}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 041240d..ff2310c 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -256,5 +256,6 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool is_hw_init);
+void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn);
#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index e9f3350..2ee8ab5 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1797,6 +1797,8 @@ struct public_drv_mb {
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
/* MFW supports DRV_LOAD Timeout */
#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO 0x00000004
+/* MFW support complete IGU cleanup upon FLR */
+#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP 0x00000080
/* MFW supports virtual link */
#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 7ed26fc..b82ccc1 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -322,6 +322,21 @@
0x180820UL
#define IGU_REG_ATTN_MSG_ADDR_H \
0x180824UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define IGU_REG_ATTENTION_ACK_BITS \
+ 0x180838UL
+#define IGU_REG_PBA_STS_PF \
+ 0x180d20UL
+#define IGU_REG_PF_FUNCTIONAL_CLEANUP \
+ 0x181210UL
+#define IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED \
+ 0x18042cUL
+#define IGU_REG_PBA_STS_PF_SIZE 5
+#define IGU_REG_PBA_STS_PF \
+ 0x180d20UL
#define MISC_REG_AEU_GENERAL_ATTN_0 \
0x008400UL
#define CAU_REG_SB_ADDR_MEMORY \
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 17/17] net/qede: add support for dev reset
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (15 preceding siblings ...)
2018-09-08 20:31 ` [dpdk-dev] [PATCH 16/17] net/qede/base: fix MFW FLR flow bug Rasesh Mody
@ 2018-09-08 20:31 ` Rasesh Mody
2018-09-10 5:05 ` [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes David Marchand
2018-09-20 16:17 ` Ferruh Yigit
18 siblings, 0 replies; 21+ messages in thread
From: Rasesh Mody @ 2018-09-08 20:31 UTC (permalink / raw)
To: dev; +Cc: Shahed Shaikh, ferruh.yigit, Dept-EngDPDKDev
From: Shahed Shaikh <shahed.shaikh@cavium.com>
Implement eth_dev_ops->dev_reset callback.
Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
---
drivers/net/qede/qede_ethdev.c | 17 +++++++++++++++++
1 file changed, 17 insertions(+)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 322400c..36a51f6 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -14,6 +14,9 @@
int qede_logtype_driver;
static const struct qed_eth_ops *qed_ops;
+static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
+static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
+
#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
struct rte_qede_xstats_name_off {
@@ -2295,6 +2298,18 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
+static int
+qede_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = qede_eth_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ return qede_eth_dev_init(dev);
+}
+
static const struct eth_dev_ops qede_eth_dev_ops = {
.dev_configure = qede_dev_configure,
.dev_infos_get = qede_dev_info_get,
@@ -2304,6 +2319,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
+ .dev_reset = qede_dev_reset,
.dev_set_link_up = qede_dev_set_link_up,
.dev_set_link_down = qede_dev_set_link_down,
.link_update = qede_link_update,
@@ -2346,6 +2362,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
+ .dev_reset = qede_dev_reset,
.dev_set_link_up = qede_dev_set_link_up,
.dev_set_link_down = qede_dev_set_link_down,
.link_update = qede_link_update,
--
1.7.10.3
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (16 preceding siblings ...)
2018-09-08 20:31 ` [dpdk-dev] [PATCH 17/17] net/qede: add support for dev reset Rasesh Mody
@ 2018-09-10 5:05 ` David Marchand
2018-09-20 16:17 ` Ferruh Yigit
18 siblings, 0 replies; 21+ messages in thread
From: David Marchand @ 2018-09-10 5:05 UTC (permalink / raw)
To: Rasesh Mody; +Cc: dev, Ferruh Yigit, Dept-EngDPDKDev
On Sat, Sep 8, 2018 at 10:30 PM, Rasesh Mody <rasesh.mody@cavium.com> wrote:
> This patchset adds enhancements and fixes for QEDE PMD.
>
> Rasesh Mody (8):
> net/qede/base: fix to handle stag update event
> net/qede/base: add support for OneView APIs
> net/qede/base: get pre-negotiated values for stag and bw
> net/qede: fix to program HW regs with ether type
> net/qede/base: limit number of non ethernet queues to 64
> net/qede/base: correct MCP error handler's log verbosity
> net/qede/base: fix logic for sfp get/set
> net/qede/base: use pointer for bytes len read
>
> Shahed Shaikh (9):
> net/qede/base: use trust mode for forced MAC limitations
> net/qede: reorganize filter code
> net/qede: fix flow director bug for IPv6 filter
> net/qede: refactor fdir code into generic aRFS
> net/qede: add support for generic flow API
> net/qede: fix Rx buffer size calculation
> net/qede: add support for Rx descriptor status
> net/qede/base: fix MFW FLR flow bug
> net/qede: add support for dev reset
Tested-by: David Marchand <david.marchand@6wind.com>
--
David Marchand
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
` (17 preceding siblings ...)
2018-09-10 5:05 ` [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes David Marchand
@ 2018-09-20 16:17 ` Ferruh Yigit
18 siblings, 0 replies; 21+ messages in thread
From: Ferruh Yigit @ 2018-09-20 16:17 UTC (permalink / raw)
To: Rasesh Mody, dev; +Cc: Dept-EngDPDKDev
On 9/8/2018 9:30 PM, Rasesh Mody wrote:
> This patchset adds enhancements and fixes for QEDE PMD.
>
> Rasesh Mody (8):
> net/qede/base: fix to handle stag update event
> net/qede/base: add support for OneView APIs
> net/qede/base: get pre-negotiated values for stag and bw
> net/qede: fix to program HW regs with ether type
> net/qede/base: limit number of non ethernet queues to 64
> net/qede/base: correct MCP error handler's log verbosity
> net/qede/base: fix logic for sfp get/set
> net/qede/base: use pointer for bytes len read
>
> Shahed Shaikh (9):
> net/qede/base: use trust mode for forced MAC limitations
> net/qede: reorganize filter code
> net/qede: fix flow director bug for IPv6 filter
> net/qede: refactor fdir code into generic aRFS
> net/qede: add support for generic flow API
> net/qede: fix Rx buffer size calculation
> net/qede: add support for Rx descriptor status
> net/qede/base: fix MFW FLR flow bug
> net/qede: add support for dev reset
Series applied to dpdk-next-net/master, thanks.
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH 10/17] net/qede: reorganize filter code
2018-09-08 20:30 ` [dpdk-dev] [PATCH 10/17] net/qede: reorganize filter code Rasesh Mody
@ 2018-09-20 23:51 ` Ferruh Yigit
0 siblings, 0 replies; 21+ messages in thread
From: Ferruh Yigit @ 2018-09-20 23:51 UTC (permalink / raw)
To: Rasesh Mody, dev; +Cc: Shahed Shaikh, Dept-EngDPDKDev
On 9/8/2018 9:30 PM, Rasesh Mody wrote:
> From: Shahed Shaikh <shahed.shaikh@cavium.com>
>
> - rename qede_fdir.c to qede_filter.c
> - move all filter code to qede_filter.c
>
> Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
> ---
> drivers/net/qede/Makefile | 2 +-
> drivers/net/qede/qede_ethdev.c | 687 +-----------------------
> drivers/net/qede/qede_ethdev.h | 25 +-
> drivers/net/qede/qede_fdir.c | 470 ----------------
> drivers/net/qede/qede_filter.c | 1147 ++++++++++++++++++++++++++++++++++++++++
> 5 files changed, 1172 insertions(+), 1159 deletions(-)
> delete mode 100644 drivers/net/qede/qede_fdir.c
> create mode 100644 drivers/net/qede/qede_filter.c
>
> diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
> index 488ca1d..2ecbd8d 100644
> --- a/drivers/net/qede/Makefile
> +++ b/drivers/net/qede/Makefile
> @@ -105,6 +105,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
> SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
> SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
> SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
> -SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
> +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_filter.c
meson.build file also needs to be updated [1], change applied in tree.
[1]
diff --git a/drivers/net/qede/meson.build b/drivers/net/qede/meson.build
index 6280073a5..12388a680 100644
--- a/drivers/net/qede/meson.build
+++ b/drivers/net/qede/meson.build
@@ -6,7 +6,7 @@ objs = [base_objs]
sources = files(
'qede_ethdev.c',
- 'qede_fdir.c',
+ 'qede_filter.c',
'qede_main.c',
'qede_rxtx.c',
)
^ permalink raw reply [flat|nested] 21+ messages in thread
end of thread, other threads:[~2018-09-20 23:51 UTC | newest]
Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 01/17] net/qede/base: fix to handle stag update event Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 02/17] net/qede/base: add support for OneView APIs Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 03/17] net/qede/base: get pre-negotiated values for stag and bw Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 04/17] net/qede: fix to program HW regs with ether type Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 05/17] net/qede/base: limit number of non ethernet queues to 64 Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 06/17] net/qede/base: correct MCP error handler's log verbosity Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 07/17] net/qede/base: fix logic for sfp get/set Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 08/17] net/qede/base: use trust mode for forced MAC limitations Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 09/17] net/qede/base: use pointer for bytes len read Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 10/17] net/qede: reorganize filter code Rasesh Mody
2018-09-20 23:51 ` Ferruh Yigit
2018-09-08 20:31 ` [dpdk-dev] [PATCH 11/17] net/qede: fix flow director bug for IPv6 filter Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 12/17] net/qede: refactor fdir code into generic aRFS Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 13/17] net/qede: add support for generic flow API Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 14/17] net/qede: fix Rx buffer size calculation Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 15/17] net/qede: add support for Rx descriptor status Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 16/17] net/qede/base: fix MFW FLR flow bug Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 17/17] net/qede: add support for dev reset Rasesh Mody
2018-09-10 5:05 ` [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes David Marchand
2018-09-20 16:17 ` Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).