* [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config
@ 2019-02-22 11:15 Hemant Agrawal
2019-02-22 11:15 ` [dpdk-dev] [PATCH 2/6] mempool/dpaa2: fix to reduce continuous print on empty pool Hemant Agrawal
` (5 more replies)
0 siblings, 6 replies; 7+ messages in thread
From: Hemant Agrawal @ 2019-02-22 11:15 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Shreyansh Jain
This patch add support to config custom tpid in dpni.
i.e. value other than 0x8100 and 0x88A8
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.c | 39 ++++++++++++
drivers/net/dpaa2/mc/dpni.c | 98 +++++++++++++++++++++++++++++
drivers/net/dpaa2/mc/fsl_dpni.h | 20 ++++++
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 18 ++++++
4 files changed, 175 insertions(+)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 2b90f4021..bc3faa8a3 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -161,6 +161,44 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
return 0;
}
+static int
+dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type __rte_unused,
+ uint16_t tpid)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ int ret = -ENOTSUP;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* nothing to be done for standard vlan tpids */
+ if (tpid == 0x8100 || tpid == 0x88A8)
+ return 0;
+
+ ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
+ priv->token, tpid);
+ if (ret < 0)
+ DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
+ /* if already configured tpids, remove them first */
+ if (ret == -EBUSY) {
+ struct dpni_custom_tpid_cfg tpid_list = {0};
+
+ ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
+ priv->token, &tpid_list);
+ if (ret < 0)
+ goto fail;
+ ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
+ priv->token, tpid_list.tpid1);
+ if (ret < 0)
+ goto fail;
+ ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
+ priv->token, tpid);
+ }
+fail:
+ return ret;
+}
+
static int
dpaa2_fw_version_get(struct rte_eth_dev *dev,
char *fw_version,
@@ -1832,6 +1870,7 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.mtu_set = dpaa2_dev_mtu_set,
.vlan_filter_set = dpaa2_vlan_filter_set,
.vlan_offload_set = dpaa2_vlan_offload_set,
+ .vlan_tpid_set = dpaa2_vlan_tpid_set,
.rx_queue_setup = dpaa2_dev_rx_queue_setup,
.rx_queue_release = dpaa2_dev_rx_queue_release,
.tx_queue_setup = dpaa2_dev_tx_queue_setup,
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 44b5604d3..0907a3699 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -2063,3 +2063,101 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dpni_add_custom_tpid() - Configures a distinct Ethertype value
+ * (or TPID value) to indicate VLAN tag in addition to the common
+ * TPID values 0x8100 and 0x88A8
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tpid: New value for TPID
+ *
+ * Only two custom values are accepted. If the function is called for the third
+ * time it will return error.
+ * To replace an existing value use dpni_remove_custom_tpid() to remove
+ * a previous TPID and after that use again the function.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint16_t tpid)
+{
+ struct dpni_cmd_add_custom_tpid *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_CUSTOM_TPID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_custom_tpid *)cmd.params;
+ cmd_params->tpid = cpu_to_le16(tpid);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_custom_tpid() - Removes a distinct Ethertype value added
+ * previously with dpni_add_custom_tpid()
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tpid: New value for TPID
+ *
+ * Use this function when a TPID value added with dpni_add_custom_tpid() needs
+ * to be replaced.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint16_t tpid)
+{
+ struct dpni_cmd_remove_custom_tpid *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_CUSTOM_TPID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_custom_tpid *)cmd.params;
+ cmd_params->tpid = cpu_to_le16(tpid);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_custom_tpid() - Returns custom TPID (vlan tags) values configured
+ * to detect 802.1q frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tpid: TPID values. Only nonzero members of the structure are valid.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, struct dpni_custom_tpid_cfg *tpid)
+{
+ struct dpni_rsp_get_custom_tpid *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_CUSTOM_TPID,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* read command response */
+ rsp_params = (struct dpni_rsp_get_custom_tpid *)cmd.params;
+ tpid->tpid1 = le16_to_cpu(rsp_params->tpid1);
+ tpid->tpid2 = le16_to_cpu(rsp_params->tpid2);
+
+ return err;
+}
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index de1bcb5bf..0359a2bc7 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -1202,4 +1202,24 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
struct opr_cfg *cfg,
struct opr_qry *qry);
+int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint16_t tpid);
+
+int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint16_t tpid);
+
+/**
+ * struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID
+ * values used in current dpni object to detect 802.1q frames.
+ * @tpid1: first tag. Not used if zero.
+ * @tpid2: second tag. Not used if zero.
+ */
+struct dpni_custom_tpid_cfg {
+ uint16_t tpid1;
+ uint16_t tpid2;
+};
+
+int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, struct dpni_custom_tpid_cfg *tpid);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index 3df5bcf1f..81830ed85 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -91,6 +91,9 @@
#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D)
#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e)
#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f)
+#define DPNI_CMDID_ADD_CUSTOM_TPID DPNI_CMD(0x275)
+#define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276)
+#define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -674,5 +677,20 @@ struct dpni_rsp_get_opr {
uint16_t opr_id;
};
+struct dpni_cmd_add_custom_tpid {
+ uint16_t pad;
+ uint16_t tpid;
+};
+
+struct dpni_cmd_remove_custom_tpid {
+ uint16_t pad;
+ uint16_t tpid;
+};
+
+struct dpni_rsp_get_custom_tpid {
+ uint16_t tpid1;
+ uint16_t tpid2;
+};
+
#pragma pack(pop)
#endif /* _FSL_DPNI_CMD_H */
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH 2/6] mempool/dpaa2: fix to reduce continuous print on empty pool
2019-02-22 11:15 [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config Hemant Agrawal
@ 2019-02-22 11:15 ` Hemant Agrawal
2019-02-22 11:16 ` [dpdk-dev] [PATCH 3/6] bus/fslmc: add enqueue response read routines in qbman Hemant Agrawal
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Hemant Agrawal @ 2019-02-22 11:15 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Shreyansh Jain, stable
Changing the print to DP_DEBUG to avoid continuous prints when
buffer pools runs out of buffers
Fixes: 3646ccf0b036 ("mempool/dpaa2: support dynamic logging")
Cc: stable@dpdk.org
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 335eae40e..da66577cc 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -326,8 +326,8 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
* in pool, qbman_swp_acquire returns 0
*/
if (ret <= 0) {
- DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
- " err code: %d", ret);
+ DPAA2_MEMPOOL_DP_DEBUG(
+ "Buffer acquire failed with err code: %d", ret);
/* The API expect the exact number of requested bufs */
/* Releasing all buffers allocated */
rte_dpaa2_mbuf_release(pool, obj_table, bpid,
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH 3/6] bus/fslmc: add enqueue response read routines in qbman
2019-02-22 11:15 [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config Hemant Agrawal
2019-02-22 11:15 ` [dpdk-dev] [PATCH 2/6] mempool/dpaa2: fix to reduce continuous print on empty pool Hemant Agrawal
@ 2019-02-22 11:16 ` Hemant Agrawal
2019-02-22 11:16 ` [dpdk-dev] [PATCH 4/6] net/dpaa2: add support for 16 Rx Queues per traffic class Hemant Agrawal
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Hemant Agrawal @ 2019-02-22 11:16 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Shreyansh Jain, Nipun Gupta
From: Nipun Gupta <nipun.gupta@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/bus/fslmc/portal/dpaa2_hw_dpio.c | 47 ++++
drivers/bus/fslmc/portal/dpaa2_hw_dpio.h | 4 +
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 24 ++
.../fslmc/qbman/include/fsl_qbman_portal.h | 56 +++-
drivers/bus/fslmc/qbman/qbman_portal.c | 26 ++
drivers/bus/fslmc/rte_bus_fslmc_version.map | 16 +-
drivers/net/dpaa2/dpaa2_ethdev.c | 42 +++
drivers/net/dpaa2/dpaa2_ethdev.h | 13 +-
drivers/net/dpaa2/dpaa2_rxtx.c | 255 ++++++++++++++++++
9 files changed, 480 insertions(+), 3 deletions(-)
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index f377f24ae..7bcbde840 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -526,6 +526,18 @@ dpaa2_create_dpio_device(int vdev_fd,
goto err;
}
+ dpio_dev->eqresp = rte_zmalloc(NULL, MAX_EQ_RESP_ENTRIES *
+ (sizeof(struct qbman_result) +
+ sizeof(struct eqresp_metadata)),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpio_dev->eqresp) {
+ DPAA2_BUS_ERR("Memory allocation failed for eqresp");
+ goto err;
+ }
+ dpio_dev->eqresp_meta = (struct eqresp_metadata *)(dpio_dev->eqresp +
+ MAX_EQ_RESP_ENTRIES);
+
+
TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
return 0;
@@ -588,6 +600,41 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
return -1;
}
+uint32_t
+dpaa2_free_eq_descriptors(void)
+{
+ struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+ struct qbman_result *eqresp;
+ struct eqresp_metadata *eqresp_meta;
+ struct dpaa2_queue *txq;
+
+ while (dpio_dev->eqresp_ci != dpio_dev->eqresp_pi) {
+ eqresp = &dpio_dev->eqresp[dpio_dev->eqresp_ci];
+ eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_ci];
+
+ if (!qbman_result_eqresp_rspid(eqresp))
+ break;
+
+ if (qbman_result_eqresp_rc(eqresp)) {
+ txq = eqresp_meta->dpaa2_q;
+ txq->cb_eqresp_free(dpio_dev->eqresp_ci);
+ }
+ qbman_result_eqresp_set_rspid(eqresp, 0);
+
+ dpio_dev->eqresp_ci + 1 < MAX_EQ_RESP_ENTRIES ?
+ dpio_dev->eqresp_ci++ : (dpio_dev->eqresp_ci = 0);
+ }
+
+ /* Return 1 less entry so that PI and CI are never same in a
+ * case there all the EQ responses are in use.
+ */
+ if (dpio_dev->eqresp_ci > dpio_dev->eqresp_pi)
+ return dpio_dev->eqresp_ci - dpio_dev->eqresp_pi - 1;
+ else
+ return dpio_dev->eqresp_ci - dpio_dev->eqresp_pi +
+ MAX_EQ_RESP_ENTRIES - 1;
+}
+
static struct rte_dpaa2_object rte_dpaa2_dpio_obj = {
.dev_type = DPAA2_IO,
.create = dpaa2_create_dpio_device,
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
index 4354c76de..17e7e4fad 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -51,4 +51,8 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage);
void
dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage);
+/* free the enqueue response descriptors */
+uint32_t
+dpaa2_free_eq_descriptors(void);
+
#endif /* _DPAA2_HW_DPIO_H_ */
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 626fcbbca..4679e9340 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -34,6 +34,7 @@
/* Maximum number of slots available in TX ring */
#define MAX_TX_RING_SLOTS 32
+#define MAX_EQ_RESP_ENTRIES (MAX_TX_RING_SLOTS + 1)
/* Maximum number of slots available in RX ring */
#define DPAA2_EQCR_RING_SIZE 8
@@ -50,6 +51,15 @@
/* EQCR shift to get EQCR size for LX2 (2 >> 5) = 32 for LX2 */
#define DPAA2_LX2_EQCR_SHIFT 5
+/* Flag to determine an ordered queue mbuf */
+#define DPAA2_ENQUEUE_FLAG_ORP (1ULL << 30)
+/* ORP ID shift and mask */
+#define DPAA2_EQCR_OPRID_SHIFT 16
+#define DPAA2_EQCR_OPRID_MASK 0x3FFF0000
+/* Sequence number shift and mask */
+#define DPAA2_EQCR_SEQNUM_SHIFT 0
+#define DPAA2_EQCR_SEQNUM_MASK 0x0000FFFF
+
#define DPAA2_SWP_CENA_REGION 0
#define DPAA2_SWP_CINH_REGION 1
#define DPAA2_SWP_CENA_MEM_REGION 2
@@ -77,12 +87,23 @@
#define DPAA2_DPCI_MAX_QUEUES 2
+struct dpaa2_queue;
+
+struct eqresp_metadata {
+ struct dpaa2_queue *dpaa2_q;
+ struct rte_mempool *mp;
+};
+
struct dpaa2_dpio_dev {
TAILQ_ENTRY(dpaa2_dpio_dev) next;
/**< Pointer to Next device instance */
uint16_t index; /**< Index of a instance in the list */
rte_atomic16_t ref_count;
/**< How many thread contexts are sharing this.*/
+ uint16_t eqresp_ci;
+ uint16_t eqresp_pi;
+ struct qbman_result *eqresp;
+ struct eqresp_metadata *eqresp_meta;
struct fsl_mc_io *dpio; /** handle to DPIO portal object */
uint16_t token;
struct qbman_swp *sw_portal; /** SW portal object */
@@ -125,6 +146,8 @@ typedef void (dpaa2_queue_cb_dqrr_t)(struct qbman_swp *swp,
struct dpaa2_queue *rxq,
struct rte_event *ev);
+typedef void (dpaa2_queue_cb_eqresp_free_t)(uint16_t eqresp_ci);
+
struct dpaa2_queue {
struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
union {
@@ -144,6 +167,7 @@ struct dpaa2_queue {
};
struct rte_event ev;
dpaa2_queue_cb_dqrr_t *cb;
+ dpaa2_queue_cb_eqresp_free_t *cb_eqresp_free;
struct dpaa2_bp_info *bp_array;
};
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
index 10c72e048..a9192d3cb 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -212,6 +212,23 @@ struct qbman_result {
__le32 rid_tok;
__le64 ctx;
} scn;
+ struct eq_resp {
+ uint8_t verb;
+ uint8_t dca;
+ __le16 seqnum;
+ __le16 oprid;
+ uint8_t reserved;
+ uint8_t rc;
+ __le32 tgtid;
+ __le32 tag;
+ uint16_t qdbin;
+ uint8_t qpri;
+ uint8_t reserved1;
+ __le32 fqid:24;
+ __le32 rspid:8;
+ __le64 rsp_addr;
+ uint8_t fd[32];
+ } eq_resp;
};
};
@@ -788,7 +805,6 @@ uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
/************/
/* Enqueues */
/************/
-
/* struct qbman_eq_desc - structure of enqueue descriptor */
struct qbman_eq_desc {
union {
@@ -956,6 +972,44 @@ void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
uint8_t dqrr_idx, int park);
+/**
+ * qbman_result_eqresp_fd() - Get fd from enqueue response.
+ * @eqresp: enqueue response.
+ *
+ * Return the fd pointer.
+ */
+struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp);
+
+/**
+ * qbman_result_eqresp_set_rspid() - Set the response id in enqueue response.
+ * @eqresp: enqueue response.
+ * @val: values to set into the response id.
+ *
+ * This value is set into the response id before the enqueue command, which,
+ * get overwritten by qbman once the enqueue command is complete.
+ */
+void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val);
+
+/**
+ * qbman_result_eqresp_rspid() - Get the response id.
+ * @eqresp: enqueue response.
+ *
+ * Return the response id.
+ *
+ * At the time of enqueue user provides the response id. Response id gets
+ * copied into the enqueue response to determine if the command has been
+ * completed, and response has been updated.
+ */
+uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
+
+/**
+ * qbman_result_eqresp_rc() - determines if enqueue command is sucessful.
+ * @eqresp: enqueue response.
+ *
+ * Return 0 when command is sucessful.
+ */
+uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
+
/**
* qbman_swp_enqueue() - Issue an enqueue command.
* @s: the software portal used for enqueue.
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index 14f4b0344..f49b18097 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -1569,6 +1569,32 @@ uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
return qbman_result_SCN_ctx(scn);
}
+/********************/
+/* Parsing EQ RESP */
+/********************/
+struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
+{
+ return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
+}
+
+void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
+{
+ eqresp->eq_resp.rspid = val;
+}
+
+uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
+{
+ return eqresp->eq_resp.rspid;
+}
+
+uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
+{
+ if (eqresp->eq_resp.rc == 0xE)
+ return 0;
+ else
+ return -1;
+}
+
/******************/
/* Buffer release */
/******************/
diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map
index dcc4e082e..811a2e7b9 100644
--- a/drivers/bus/fslmc/rte_bus_fslmc_version.map
+++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -120,7 +120,6 @@ DPDK_18.05 {
DPDK_18.11 {
global:
-
dpaa2_dqrr_size;
dpaa2_eqcr_size;
dpci_get_link_state;
@@ -129,3 +128,18 @@ DPDK_18.11 {
dpci_set_opr;
} DPDK_18.05;
+
+DPDK_19.05 {
+ global:
+ dpaa2_free_eq_descriptors;
+
+ qbman_eq_desc_set_orp;
+ qbman_eq_desc_set_token;
+ qbman_result_DQ_odpid;
+ qbman_result_DQ_seqnum;
+ qbman_result_eqresp_fd;
+ qbman_result_eqresp_rc;
+ qbman_result_eqresp_rspid;
+ qbman_result_eqresp_set_rspid;
+} DPDK_18.11;
+
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index bc3faa8a3..0ab43cadf 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -665,6 +665,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -ret;
}
}
+ dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
dev->data->tx_queues[tx_queue_id] = dpaa2_q;
return 0;
}
@@ -894,6 +895,10 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
dpaa2_eth_setup_irqs(dev, 1);
}
+ /* Change the tx burst function if ordered queues are used */
+ if (priv->en_ordered)
+ dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
+
return 0;
}
@@ -1793,6 +1798,8 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
+ else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
+ dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
else
return -EINVAL;
@@ -1807,6 +1814,41 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
cfg.destination.hold_active = 1;
}
+ if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
+ !eth_priv->en_ordered) {
+ struct opr_cfg ocfg;
+
+ /* Restoration window size = 256 frames */
+ ocfg.oprrws = 3;
+ /* Restoration window size = 512 frames for LX2 */
+ if (dpaa2_svr_family == SVR_LX2160A)
+ ocfg.oprrws = 4;
+ /* Auto advance NESN window enabled */
+ ocfg.oa = 1;
+ /* Late arrival window size disabled */
+ ocfg.olws = 0;
+ /* ORL resource exhaustaion advance NESN disabled */
+ ocfg.oeane = 0;
+ /* Loose ordering enabled */
+ ocfg.oloe = 1;
+ eth_priv->en_loose_ordered = 1;
+ /* Strict ordering enabled if explicitly set */
+ if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
+ ocfg.oloe = 0;
+ eth_priv->en_loose_ordered = 0;
+ }
+
+ ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
+ dpaa2_ethq->tc_index, flow_id,
+ OPR_OPT_CREATE, &ocfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
+ return ret;
+ }
+
+ eth_priv->en_ordered = 1;
+ }
+
options |= DPNI_QUEUE_OPT_USER_CTX;
cfg.user_context = (size_t)(dpaa2_ethq);
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 420ad6446..313cbe4bf 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -96,15 +96,17 @@ struct dpaa2_dev_priv {
uint16_t token;
uint8_t nb_tx_queues;
uint8_t nb_rx_queues;
+ uint32_t options;
void *rx_vq[MAX_RX_QUEUES];
void *tx_vq[MAX_TX_QUEUES];
struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
- uint32_t options;
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
uint8_t num_rx_tc;
uint8_t flags; /*dpaa2 config flags */
+ uint8_t en_ordered;
+ uint8_t en_loose_ordered;
};
int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
@@ -135,6 +137,15 @@ void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
const struct qbman_result *dq,
struct dpaa2_queue *rxq,
struct rte_event *ev);
+void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev);
uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts);
uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
+
#endif /* _DPAA2_ETHDEV_H */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 2d4b9ef14..1aa184730 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -699,6 +699,33 @@ dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
}
+void __attribute__((hot))
+dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
+ DPAA2_FD_PTA_SIZE + 16));
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+
+ ev->mbuf = eth_fd_to_mbuf(fd);
+
+ ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP;
+ ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
+ ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+
/*
* Callback to handle sending packets through WRIOP based interface
*/
@@ -864,6 +891,234 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
return num_tx;
}
+void
+dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
+{
+ struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+ struct qbman_fd *fd;
+ struct rte_mbuf *m;
+
+ fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
+ m = eth_fd_to_mbuf(fd);
+ rte_pktmbuf_free(m);
+}
+
+static void
+dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
+ struct rte_mbuf *m,
+ struct qbman_eq_desc *eqdesc)
+{
+ struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
+ struct dpaa2_dev_priv *priv = eth_data->dev_private;
+ struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+ struct eqresp_metadata *eqresp_meta;
+ uint16_t orpid, seqnum;
+ uint8_t dq_idx;
+
+ qbman_eq_desc_set_qd(eqdesc, priv->qdid, dpaa2_q->flow_id,
+ dpaa2_q->tc_index);
+
+ if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
+ orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >>
+ DPAA2_EQCR_OPRID_SHIFT;
+ seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >>
+ DPAA2_EQCR_SEQNUM_SHIFT;
+
+ if (!priv->en_loose_ordered) {
+ qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
+ qbman_eq_desc_set_response(eqdesc, (uint64_t)
+ DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
+ dpio_dev->eqresp_pi]), 1);
+ qbman_eq_desc_set_token(eqdesc, 1);
+
+ eqresp_meta = &dpio_dev->eqresp_meta[
+ dpio_dev->eqresp_pi];
+ eqresp_meta->dpaa2_q = dpaa2_q;
+ eqresp_meta->mp = m->pool;
+
+ dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
+ dpio_dev->eqresp_pi++ :
+ (dpio_dev->eqresp_pi = 0);
+ } else {
+ qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
+ }
+ } else {
+ dq_idx = m->seqn - 1;
+ qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
+ }
+ m->seqn = DPAA2_INVALID_MBUF_SEQN;
+}
+
+/* Callback to handle sending ordered packets through WRIOP based interface */
+uint16_t
+dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function to transmit the frames to given device and VQ*/
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
+ struct dpaa2_dev_priv *priv = eth_data->dev_private;
+ struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ struct rte_mbuf *mi;
+ struct rte_mempool *mp;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ struct qbman_swp *swp;
+ uint32_t frames_to_send, num_free_eq_desc;
+ uint32_t loop, retry_count;
+ int32_t ret;
+ uint16_t num_tx = 0;
+ uint16_t bpid;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
+ eth_data, dpaa2_q->fqid);
+
+ /* This would also handle normal and atomic queues as any type
+ * of packet can be enqueued when ordered queues are being used.
+ */
+ while (nb_pkts) {
+ /*Check if the queue is congested*/
+ retry_count = 0;
+ while (qbman_result_SCN_state(dpaa2_q->cscn)) {
+ retry_count++;
+ /* Retry for some time before giving up */
+ if (retry_count > CONG_RETRY_COUNT)
+ goto skip_tx;
+ }
+
+ frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_pkts;
+
+ if (!priv->en_loose_ordered) {
+ if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
+ num_free_eq_desc = dpaa2_free_eq_descriptors();
+ if (num_free_eq_desc < frames_to_send)
+ frames_to_send = num_free_eq_desc;
+ }
+ }
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ /*Prepare enqueue descriptor*/
+ qbman_eq_desc_clear(&eqdesc[loop]);
+
+ if ((*bufs)->seqn) {
+ /* Use only queue 0 for Tx in case of atomic/
+ * ordered packets as packets can get unordered
+ * when being tranmitted out from the interface
+ */
+ dpaa2_set_enqueue_descriptor(order_sendq,
+ (*bufs),
+ &eqdesc[loop]);
+ } else {
+ qbman_eq_desc_set_no_orp(&eqdesc[loop],
+ DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_qd(&eqdesc[loop], priv->qdid,
+ dpaa2_q->flow_id,
+ dpaa2_q->tc_index);
+ }
+
+ if (likely(RTE_MBUF_DIRECT(*bufs))) {
+ mp = (*bufs)->pool;
+ /* Check the basic scenario and set
+ * the FD appropriately here itself.
+ */
+ if (likely(mp && mp->ops_index ==
+ priv->bp_list->dpaa2_ops_index &&
+ (*bufs)->nb_segs == 1 &&
+ rte_mbuf_refcnt_read((*bufs)) == 1)) {
+ if (unlikely((*bufs)->ol_flags
+ & PKT_TX_VLAN_PKT)) {
+ ret = rte_vlan_insert(bufs);
+ if (ret)
+ goto send_n_return;
+ }
+ DPAA2_MBUF_TO_CONTIG_FD((*bufs),
+ &fd_arr[loop],
+ mempool_to_bpid(mp));
+ bufs++;
+ continue;
+ }
+ } else {
+ mi = rte_mbuf_from_indirect(*bufs);
+ mp = mi->pool;
+ }
+ /* Not a hw_pkt pool allocated frame */
+ if (unlikely(!mp || !priv->bp_list)) {
+ DPAA2_PMD_ERR("Err: No buffer pool attached");
+ goto send_n_return;
+ }
+
+ if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+ DPAA2_PMD_WARN("Non DPAA2 buffer pool");
+ /* alloc should be from the default buffer pool
+ * attached to this interface
+ */
+ bpid = priv->bp_list->buf_pool.bpid;
+
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ DPAA2_PMD_ERR(
+ "S/G not supp for non hw offload buffer");
+ goto send_n_return;
+ }
+ if (eth_copy_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid)) {
+ goto send_n_return;
+ }
+ /* free the original packet */
+ rte_pktmbuf_free(*bufs);
+ } else {
+ bpid = mempool_to_bpid(mp);
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ if (eth_mbuf_to_sg_fd(*bufs,
+ &fd_arr[loop],
+ bpid))
+ goto send_n_return;
+ } else {
+ eth_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid);
+ }
+ }
+ bufs++;
+ }
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_enqueue_multiple_desc(swp,
+ &eqdesc[loop], &fd_arr[loop],
+ frames_to_send - loop);
+ }
+
+ num_tx += frames_to_send;
+ nb_pkts -= frames_to_send;
+ }
+ dpaa2_q->tx_pkts += num_tx;
+ return num_tx;
+
+send_n_return:
+ /* send any already prepared fd */
+ if (loop) {
+ unsigned int i = 0;
+
+ while (i < loop) {
+ i += qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
+ &fd_arr[i], loop - i);
+ }
+ num_tx += loop;
+ }
+skip_tx:
+ dpaa2_q->tx_pkts += num_tx;
+ return num_tx;
+}
+
/**
* Dummy DPDK callback for TX.
*
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH 4/6] net/dpaa2: add support for 16 Rx Queues per traffic class
2019-02-22 11:15 [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config Hemant Agrawal
2019-02-22 11:15 ` [dpdk-dev] [PATCH 2/6] mempool/dpaa2: fix to reduce continuous print on empty pool Hemant Agrawal
2019-02-22 11:16 ` [dpdk-dev] [PATCH 3/6] bus/fslmc: add enqueue response read routines in qbman Hemant Agrawal
@ 2019-02-22 11:16 ` Hemant Agrawal
2019-02-22 11:16 ` [dpdk-dev] [PATCH 5/6] net/dpaa2: support low level loopback tester Hemant Agrawal
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Hemant Agrawal @ 2019-02-22 11:16 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Shreyansh Jain, Ashish Jain
From: Ashish Jain <ashish.jain@nxp.com>
Adding support for 16 queues per TC per DPNI port
which is required for LX2 platform.
Signed-off-by: Ashish Jain <ashish.jain@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.c | 6 ++++--
drivers/net/dpaa2/dpaa2_ethdev.h | 2 +-
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 0ab43cadf..f8c2983b9 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -266,6 +266,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
struct dpaa2_dev_priv *priv = dev->data->dev_private;
uint16_t dist_idx;
uint32_t vq_id;
+ uint8_t num_rxqueue_per_tc;
struct dpaa2_queue *mc_q, *mcq;
uint32_t tot_queues;
int i;
@@ -273,6 +274,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
RTE_CACHE_LINE_SIZE);
@@ -311,8 +313,8 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
vq_id = 0;
for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
- mcq->tc_index = DPAA2_DEF_TC;
- mcq->flow_id = dist_idx;
+ mcq->tc_index = dist_idx / num_rxqueue_per_tc;
+ mcq->flow_id = dist_idx % num_rxqueue_per_tc;
vq_id++;
}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 313cbe4bf..13259be7d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -20,7 +20,7 @@
#define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/
#define MAX_TCS DPNI_MAX_TC
-#define MAX_RX_QUEUES 16
+#define MAX_RX_QUEUES 128
#define MAX_TX_QUEUES 16
/*default tc to be used for ,congestion, distribution etc configuration. */
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH 5/6] net/dpaa2: support low level loopback tester
2019-02-22 11:15 [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config Hemant Agrawal
` (2 preceding siblings ...)
2019-02-22 11:16 ` [dpdk-dev] [PATCH 4/6] net/dpaa2: add support for 16 Rx Queues per traffic class Hemant Agrawal
@ 2019-02-22 11:16 ` Hemant Agrawal
2019-02-22 11:16 ` [dpdk-dev] [PATCH 6/6] net/dpaa2: add basic support for generic flow Hemant Agrawal
2019-02-27 9:30 ` [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config Ferruh Yigit
5 siblings, 0 replies; 7+ messages in thread
From: Hemant Agrawal @ 2019-02-22 11:16 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Shreyansh Jain
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
doc/guides/nics/dpaa2.rst | 5 +
.../fslmc/qbman/include/fsl_qbman_portal.h | 18 ++
drivers/bus/fslmc/qbman/qbman_portal.c | 161 ++++++++++++++++++
drivers/bus/fslmc/rte_bus_fslmc_version.map | 1 +
drivers/net/dpaa2/dpaa2_ethdev.c | 57 ++++++-
drivers/net/dpaa2/dpaa2_ethdev.h | 3 +
drivers/net/dpaa2/dpaa2_rxtx.c | 161 ++++++++++++++++++
7 files changed, 402 insertions(+), 4 deletions(-)
diff --git a/doc/guides/nics/dpaa2.rst b/doc/guides/nics/dpaa2.rst
index 769dc4e12..392ab0580 100644
--- a/doc/guides/nics/dpaa2.rst
+++ b/doc/guides/nics/dpaa2.rst
@@ -499,6 +499,11 @@ for details.
Done
testpmd>
+
+* Use dev arg option ``drv_loopback=1`` to loopback packets at
+ driver level. Any packet received will be reflected back by the
+ driver on same port.
+
Enabling logs
-------------
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
index a9192d3cb..07b8a4372 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -1039,6 +1039,24 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
const struct qbman_fd *fd,
uint32_t *flags,
int num_frames);
+
+/**
+ * qbman_swp_enqueue_multiple_fd() - Enqueue multiple frames with same
+ eq descriptor
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptor.
+ * @fd: the frame descriptor to be enqueued.
+ * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
+ * @num_frames: the number of the frames to be enqueued.
+ *
+ * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ struct qbman_fd **fd,
+ uint32_t *flags,
+ int num_frames);
+
/**
* qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with
* individual eq descriptor.
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index f49b18097..20da8b921 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -93,6 +93,20 @@ qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
uint32_t *flags,
int num_frames);
+static int
+qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ struct qbman_fd **fd,
+ uint32_t *flags,
+ int num_frames);
+
+static int
+qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ struct qbman_fd **fd,
+ uint32_t *flags,
+ int num_frames);
+
static int
qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
@@ -139,6 +153,13 @@ static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
int num_frames)
= qbman_swp_enqueue_multiple_direct;
+static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ struct qbman_fd **fd,
+ uint32_t *flags,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_fd_direct;
+
static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
@@ -243,6 +264,8 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
qbman_swp_enqueue_ring_mode_mem_back;
qbman_swp_enqueue_multiple_ptr =
qbman_swp_enqueue_multiple_mem_back;
+ qbman_swp_enqueue_multiple_fd_ptr =
+ qbman_swp_enqueue_multiple_fd_mem_back;
qbman_swp_enqueue_multiple_desc_ptr =
qbman_swp_enqueue_multiple_desc_mem_back;
qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
@@ -862,6 +885,144 @@ inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
}
+static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ struct qbman_fd **fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+ uint64_t addr_cena;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
+ eqcr_pi++;
+ }
+
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ addr_cena = (size_t)s->sys.addr_cena;
+ for (i = 0; i < num_enqueued; i++) {
+ dcbf(addr_cena +
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ eqcr_pi++;
+ }
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ return num_enqueued;
+}
+
+static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ struct qbman_fd **fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+ return num_enqueued;
+}
+
+inline int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ struct qbman_fd **fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags, num_frames);
+}
+
static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map
index 811a2e7b9..aa844dd80 100644
--- a/drivers/bus/fslmc/rte_bus_fslmc_version.map
+++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -141,5 +141,6 @@ DPDK_19.05 {
qbman_result_eqresp_rc;
qbman_result_eqresp_rspid;
qbman_result_eqresp_set_rspid;
+ qbman_swp_enqueue_multiple_fd;
} DPDK_18.11;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index f8c2983b9..08a95a14a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -27,6 +27,8 @@
#include "dpaa2_ethdev.h"
#include <fsl_qbman_debug.h>
+#define DRIVER_LOOPBACK_MODE "drv_looback"
+
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
DEV_RX_OFFLOAD_VLAN_STRIP |
@@ -732,7 +734,8 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
+ if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
+ dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
return ptypes;
return NULL;
}
@@ -1997,6 +2000,43 @@ populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
return -1;
}
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+ struct rte_kvargs *kvlist;
+
+ if (!devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (!kvlist)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (rte_kvargs_process(kvlist, key,
+ check_devargs_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
static int
dpaa2_dev_init(struct rte_eth_dev *eth_dev)
{
@@ -2016,7 +2056,10 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
* plugged.
*/
eth_dev->dev_ops = &dpaa2_ethdev_ops;
- eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
+ if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
+ eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
+ else
+ eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
return 0;
}
@@ -2133,7 +2176,12 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &dpaa2_ethdev_ops;
- eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
+ if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
+ eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
+ DPAA2_PMD_INFO("Loopback mode");
+ } else {
+ eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
+ }
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
@@ -2251,7 +2299,8 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = {
};
RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
-
+RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
+ DRIVER_LOOPBACK_MODE "=<int>");
RTE_INIT(dpaa2_pmd_init_log)
{
dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 13259be7d..7148104ec 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -125,6 +125,9 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
int eth_rx_queue_id);
+uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts);
+
uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts);
void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 1aa184730..c6e50123c 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1143,3 +1143,164 @@ dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
(void)nb_pkts;
return 0;
}
+
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#elif defined(RTE_TOOLCHAIN_CLANG)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+#endif
+
+/* This function loopbacks all the received packets.*/
+uint16_t
+dpaa2_dev_loopback_rx(void *queue,
+ struct rte_mbuf **bufs __rte_unused,
+ uint16_t nb_pkts)
+{
+ /* Function receive frames for a given device and VQ*/
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct qbman_result *dq_storage, *dq_storage1 = NULL;
+ uint32_t fqid = dpaa2_q->fqid;
+ int ret, num_rx = 0, num_tx = 0, pull_size;
+ uint8_t pending, status;
+ struct qbman_swp *swp;
+ struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
+ struct qbman_pull_desc pulldesc;
+ struct qbman_eq_desc eqdesc;
+ struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+ struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
+ struct dpaa2_dev_priv *priv = eth_data->dev_private;
+ struct dpaa2_queue *tx_q = priv->tx_vq[0];
+ /* todo - currently we are using 1st TX queue only for loopback*/
+
+ if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
+ ret = dpaa2_affine_qbman_ethrx_swp();
+ if (ret) {
+ DPAA2_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
+ pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
+ if (unlikely(!q_storage->active_dqs)) {
+ q_storage->toggle = 0;
+ dq_storage = q_storage->dq_storage[q_storage->toggle];
+ q_storage->last_num_pkts = pull_size;
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc,
+ q_storage->last_num_pkts);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
+ while (!qbman_check_command_complete(
+ get_swp_active_dqs(
+ DPAA2_PER_LCORE_ETHRX_DPIO->index)))
+ ;
+ clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
+ }
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_PMD_DP_DEBUG(
+ "VDQ command not issued.QBMAN busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+ q_storage->active_dqs = dq_storage;
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
+ dq_storage);
+ }
+
+ dq_storage = q_storage->active_dqs;
+ rte_prefetch0((void *)(size_t)(dq_storage));
+ rte_prefetch0((void *)(size_t)(dq_storage + 1));
+
+ /* Prepare next pull descriptor. This will give space for the
+ * prefething done on DQRR entries
+ */
+ q_storage->toggle ^= 1;
+ dq_storage1 = q_storage->dq_storage[q_storage->toggle];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc, pull_size);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
+ (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+
+ /*Prepare enqueue descriptor*/
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+ qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
+
+ /* Check if the previous issued command is completed.
+ * Also seems like the SWP is shared between the Ethernet Driver
+ * and the SEC driver.
+ */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
+ clear_swp_active_dqs(q_storage->active_dpio_id);
+
+ pending = 1;
+
+ do {
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+ rte_prefetch0((void *)((size_t)(dq_storage + 2)));
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ pending = 0;
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
+ continue;
+ }
+ fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
+
+ dq_storage++;
+ num_rx++;
+ } while (pending);
+
+ while (num_tx < num_rx) {
+ num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
+ &fd[num_tx], 0, num_rx - num_tx);
+ }
+
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
+ while (!qbman_check_command_complete(
+ get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
+ ;
+ clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
+ }
+ /* issue a volatile dequeue command for next pull */
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
+ "QBMAN is busy (2)\n");
+ continue;
+ }
+ break;
+ }
+ q_storage->active_dqs = dq_storage1;
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
+
+ dpaa2_q->rx_pkts += num_rx;
+ dpaa2_q->tx_pkts += num_tx;
+
+ return 0;
+}
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC diagnostic pop
+#elif defined(RTE_TOOLCHAIN_CLANG)
+#pragma clang diagnostic pop
+#endif
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH 6/6] net/dpaa2: add basic support for generic flow
2019-02-22 11:15 [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config Hemant Agrawal
` (3 preceding siblings ...)
2019-02-22 11:16 ` [dpdk-dev] [PATCH 5/6] net/dpaa2: support low level loopback tester Hemant Agrawal
@ 2019-02-22 11:16 ` Hemant Agrawal
2019-02-27 9:30 ` [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config Ferruh Yigit
5 siblings, 0 replies; 7+ messages in thread
From: Hemant Agrawal @ 2019-02-22 11:16 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Shreyansh Jain, Sunil Kumar Kori
From: Sunil Kumar Kori <sunil.kori@nxp.com>
Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
drivers/net/dpaa2/Makefile | 1 +
drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 4 +-
drivers/net/dpaa2/dpaa2_ethdev.c | 90 +-
drivers/net/dpaa2/dpaa2_ethdev.h | 22 +
drivers/net/dpaa2/dpaa2_flow.c | 1972 ++++++++++++++++++++++++
drivers/net/dpaa2/mc/dpni.c | 312 ++++
drivers/net/dpaa2/mc/fsl_dpni.h | 154 ++
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 88 ++
drivers/net/dpaa2/meson.build | 1 +
9 files changed, 2635 insertions(+), 9 deletions(-)
create mode 100644 drivers/net/dpaa2/dpaa2_flow.c
diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
index 562551175..8bd269bfa 100644
--- a/drivers/net/dpaa2/Makefile
+++ b/drivers/net/dpaa2/Makefile
@@ -33,6 +33,7 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_mux.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpni.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpkg.c
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 11f14931e..56e2e56a3 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -23,7 +23,7 @@
#include "../dpaa2_ethdev.h"
-static int
+int
dpaa2_distset_to_dpkg_profile_cfg(
uint64_t req_dist_set,
struct dpkg_profile_cfg *kg_cfg);
@@ -170,7 +170,7 @@ int dpaa2_remove_flow_dist(
return ret;
}
-static int
+int
dpaa2_distset_to_dpkg_profile_cfg(
uint64_t req_dist_set,
struct dpkg_profile_cfg *kg_cfg)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 08a95a14a..a8f0e3002 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -17,6 +17,7 @@
#include <rte_kvargs.h>
#include <rte_dev.h>
#include <rte_fslmc.h>
+#include <rte_flow_driver.h>
#include "dpaa2_pmd_logs.h"
#include <fslmc_vfio.h>
@@ -83,6 +84,14 @@ static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
{"egress_confirmed_frames", 2, 4},
};
+static const enum rte_filter_op dpaa2_supported_filter_ops[] = {
+ RTE_ETH_FILTER_ADD,
+ RTE_ETH_FILTER_DELETE,
+ RTE_ETH_FILTER_UPDATE,
+ RTE_ETH_FILTER_FLUSH,
+ RTE_ETH_FILTER_GET
+};
+
static struct rte_dpaa2_driver rte_dpaa2_pmd;
static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
@@ -1892,6 +1901,47 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
return ret;
}
+static inline int
+dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op)
+{
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) {
+ if (dpaa2_supported_filter_ops[i] == filter_op)
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static int
+dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ if (!dev)
+ return -ENODEV;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (dpaa2_dev_verify_filter_ops(filter_op) < 0) {
+ ret = -ENOTSUP;
+ break;
+ }
+ *(const void **)arg = &dpaa2_flow_ops;
+ dpaa2_filter_type |= filter_type;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Filter type (%d) not supported",
+ filter_type);
+ ret = -ENOTSUP;
+ break;
+ }
+ return ret;
+}
+
static struct eth_dev_ops dpaa2_ethdev_ops = {
.dev_configure = dpaa2_eth_dev_configure,
.dev_start = dpaa2_dev_start,
@@ -1930,6 +1980,7 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.mac_addr_set = dpaa2_dev_set_mac_addr,
.rss_hash_update = dpaa2_dev_rss_hash_update,
.rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
+ .filter_ctrl = dpaa2_dev_flow_ctrl,
};
/* Populate the mac address from physically available (u-boot/firmware) and/or
@@ -2046,7 +2097,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
struct dpni_attr attr;
struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
struct dpni_buffer_layout layout;
- int ret, hw_id;
+ int ret, hw_id, i;
PMD_INIT_FUNC_TRACE();
@@ -2102,11 +2153,8 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
priv->num_rx_tc = attr.num_rx_tcs;
- /* Resetting the "num_rx_queues" to equal number of queues in first TC
- * as only one TC is supported on Rx Side. Once Multiple TCs will be
- * in use for Rx processing then this will be changed or removed.
- */
- priv->nb_rx_queues = attr.num_queues;
+ for (i = 0; i < attr.num_rx_tcs; i++)
+ priv->nb_rx_queues += attr.num_queues;
/* Using number of TX queues as number of TX TCs */
priv->nb_tx_queues = attr.num_tx_tcs;
@@ -2184,6 +2232,26 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
}
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
+ /*Init fields w.r.t. classficaition*/
+ memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg));
+ priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
+ if (!priv->extract.qos_extract_param) {
+ DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
+ " classificaiton ", ret);
+ goto init_err;
+ }
+ for (i = 0; i < MAX_TCS; i++) {
+ memset(&priv->extract.fs_key_cfg[i], 0,
+ sizeof(struct dpkg_profile_cfg));
+ priv->extract.fs_extract_param[i] =
+ (size_t)rte_malloc(NULL, 256, 64);
+ if (!priv->extract.fs_extract_param[i]) {
+ DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
+ ret);
+ goto init_err;
+ }
+ }
+
RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
return 0;
init_err:
@@ -2196,7 +2264,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- int ret;
+ int i, ret;
PMD_INIT_FUNC_TRACE();
@@ -2224,6 +2292,14 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
priv->hw = NULL;
rte_free(dpni);
+ for (i = 0; i < MAX_TCS; i++) {
+ if (priv->extract.fs_extract_param[i])
+ rte_free((void *)(size_t)priv->extract.fs_extract_param[i]);
+ }
+
+ if (priv->extract.qos_extract_param)
+ rte_free((void *)(size_t)priv->extract.qos_extract_param);
+
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 7148104ec..0ef1bf368 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -89,6 +89,13 @@
/* enable timestamp in mbuf*/
extern enum pmd_dpaa2_ts dpaa2_enable_ts;
+#define DPAA2_QOS_TABLE_RECONFIGURE 1
+#define DPAA2_FS_TABLE_RECONFIGURE 2
+
+/*Externaly defined*/
+extern const struct rte_flow_ops dpaa2_flow_ops;
+extern enum rte_filter_type dpaa2_filter_type;
+
struct dpaa2_dev_priv {
void *hw;
int32_t hw_id;
@@ -107,8 +114,23 @@ struct dpaa2_dev_priv {
uint8_t flags; /*dpaa2 config flags */
uint8_t en_ordered;
uint8_t en_loose_ordered;
+
+ struct pattern_s {
+ uint8_t item_count;
+ uint8_t pattern_type[DPKG_MAX_NUM_OF_EXTRACTS];
+ } pattern[MAX_TCS + 1];
+
+ struct extract_s {
+ struct dpkg_profile_cfg qos_key_cfg;
+ struct dpkg_profile_cfg fs_key_cfg[MAX_TCS];
+ uint64_t qos_extract_param;
+ uint64_t fs_extract_param[MAX_TCS];
+ } extract;
};
+int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
+ struct dpkg_profile_cfg *kg_cfg);
+
int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
uint64_t req_dist_set);
diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c
new file mode 100644
index 000000000..20de3da53
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_flow.c
@@ -0,0 +1,1972 @@
+/* * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_eth_ctrl.h>
+#include <rte_malloc.h>
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include <fsl_dpni.h>
+#include <fsl_dpkg.h>
+
+#include <dpaa2_ethdev.h>
+#include <dpaa2_pmd_logs.h>
+
+struct rte_flow {
+ struct dpni_rule_cfg rule;
+ uint8_t key_size;
+ uint8_t tc_id;
+ uint8_t flow_type;
+ uint8_t index;
+ enum rte_flow_action_type action;
+ uint16_t flow_id;
+};
+
+/* Layout for rule compositions for supported patterns */
+/* TODO: Current design only supports Ethernet + IPv4 based classification. */
+/* So corresponding offset macros are valid only. Rest are placeholder for */
+/* now. Once support for other netwrok headers will be added then */
+/* corresponding macros will be updated with correct values*/
+#define DPAA2_CLS_RULE_OFFSET_ETH 0 /*Start of buffer*/
+#define DPAA2_CLS_RULE_OFFSET_VLAN 14 /* DPAA2_CLS_RULE_OFFSET_ETH */
+ /* + Sizeof Eth fields */
+#define DPAA2_CLS_RULE_OFFSET_IPV4 14 /* DPAA2_CLS_RULE_OFFSET_VLAN */
+ /* + Sizeof VLAN fields */
+#define DPAA2_CLS_RULE_OFFSET_IPV6 25 /* DPAA2_CLS_RULE_OFFSET_IPV4 */
+ /* + Sizeof IPV4 fields */
+#define DPAA2_CLS_RULE_OFFSET_ICMP 58 /* DPAA2_CLS_RULE_OFFSET_IPV6 */
+ /* + Sizeof IPV6 fields */
+#define DPAA2_CLS_RULE_OFFSET_UDP 60 /* DPAA2_CLS_RULE_OFFSET_ICMP */
+ /* + Sizeof ICMP fields */
+#define DPAA2_CLS_RULE_OFFSET_TCP 64 /* DPAA2_CLS_RULE_OFFSET_UDP */
+ /* + Sizeof UDP fields */
+#define DPAA2_CLS_RULE_OFFSET_SCTP 68 /* DPAA2_CLS_RULE_OFFSET_TCP */
+ /* + Sizeof TCP fields */
+#define DPAA2_CLS_RULE_OFFSET_GRE 72 /* DPAA2_CLS_RULE_OFFSET_SCTP */
+ /* + Sizeof SCTP fields */
+
+static const
+enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
+ RTE_FLOW_ITEM_TYPE_END,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_ICMP,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_GRE,
+};
+
+static const
+enum rte_flow_action_type dpaa2_supported_action_type[] = {
+ RTE_FLOW_ACTION_TYPE_END,
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_RSS
+};
+
+enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
+static const void *default_mask;
+
+static int
+dpaa2_configure_flow_eth(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ int index, j = 0;
+ size_t key_iova;
+ size_t mask_iova;
+ int device_configured = 0, entry_found = 0;
+ uint32_t group;
+ const struct rte_flow_item_eth *spec, *mask;
+
+ /* TODO: Currently upper bound of range parameter is not implemented */
+ const struct rte_flow_item_eth *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* DPAA2 platform has a limitation that extract parameter can not be */
+ /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+ /* TODO: pattern is an array of 9 elements where 9th pattern element */
+ /* is for QoS table and 1-8th pattern element is for FS tables. */
+ /* It can be changed to macro. */
+ if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ for (j = 0; j < priv->pattern[8].item_count; j++) {
+ if (priv->pattern[8].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[8].pattern_type[j] = pattern->type;
+ priv->pattern[8].item_count++;
+ device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ entry_found = 0;
+ for (j = 0; j < priv->pattern[group].item_count; j++) {
+ if (priv->pattern[group].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[group].pattern_type[j] = pattern->type;
+ priv->pattern[group].item_count++;
+ device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->index = attr->priority;
+
+ if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+ index = priv->extract.qos_key_cfg.num_extracts;
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
+ index++;
+
+ priv->extract.qos_key_cfg.num_extracts = index;
+ }
+
+ if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+ index = priv->extract.fs_key_cfg[group].num_extracts;
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
+ index++;
+
+ priv->extract.fs_key_cfg[group].num_extracts = index;
+ }
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_eth *)pattern->spec;
+ last = (const struct rte_flow_item_eth *)pattern->last;
+ mask = (const struct rte_flow_item_eth *)
+ (pattern->mask ? pattern->mask : default_mask);
+
+ /* Key rule */
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ETH;
+ memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes),
+ sizeof(struct ether_addr));
+ key_iova += sizeof(struct ether_addr);
+ memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes),
+ sizeof(struct ether_addr));
+ key_iova += sizeof(struct ether_addr);
+ memcpy((void *)key_iova, (const void *)(&spec->type),
+ sizeof(rte_be16_t));
+
+ /* Key mask */
+ mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ETH;
+ memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes),
+ sizeof(struct ether_addr));
+ mask_iova += sizeof(struct ether_addr);
+ memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes),
+ sizeof(struct ether_addr));
+ mask_iova += sizeof(struct ether_addr);
+ memcpy((void *)mask_iova, (const void *)(&mask->type),
+ sizeof(rte_be16_t));
+
+ flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ETH +
+ ((2 * sizeof(struct ether_addr)) +
+ sizeof(rte_be16_t)));
+ return device_configured;
+}
+
+static int
+dpaa2_configure_flow_vlan(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ int index, j = 0;
+ size_t key_iova;
+ size_t mask_iova;
+ int device_configured = 0, entry_found = 0;
+ uint32_t group;
+ const struct rte_flow_item_vlan *spec, *mask;
+
+ const struct rte_flow_item_vlan *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* DPAA2 platform has a limitation that extract parameter can not be */
+ /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+ if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ for (j = 0; j < priv->pattern[8].item_count; j++) {
+ if (priv->pattern[8].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[8].pattern_type[j] = pattern->type;
+ priv->pattern[8].item_count++;
+ device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ entry_found = 0;
+ for (j = 0; j < priv->pattern[group].item_count; j++) {
+ if (priv->pattern[group].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[group].pattern_type[j] = pattern->type;
+ priv->pattern[group].item_count++;
+ device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->index = attr->priority;
+
+ if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+ index = priv->extract.qos_key_cfg.num_extracts;
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+ priv->extract.qos_key_cfg.num_extracts++;
+ }
+
+ if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+ index = priv->extract.fs_key_cfg[group].num_extracts;
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+ priv->extract.fs_key_cfg[group].num_extracts++;
+ }
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_vlan *)pattern->spec;
+ last = (const struct rte_flow_item_vlan *)pattern->last;
+ mask = (const struct rte_flow_item_vlan *)
+ (pattern->mask ? pattern->mask : default_mask);
+
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
+ memcpy((void *)key_iova, (const void *)(&spec->tci),
+ sizeof(rte_be16_t));
+
+ mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
+ memcpy((void *)mask_iova, (const void *)(&mask->tci),
+ sizeof(rte_be16_t));
+
+ flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_VLAN + sizeof(rte_be16_t));
+ return device_configured;
+}
+
+static int
+dpaa2_configure_flow_ipv4(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ int index, j = 0;
+ size_t key_iova;
+ size_t mask_iova;
+ int device_configured = 0, entry_found = 0;
+ uint32_t group;
+ const struct rte_flow_item_ipv4 *spec, *mask;
+
+ const struct rte_flow_item_ipv4 *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* DPAA2 platform has a limitation that extract parameter can not be */
+ /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+ if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ for (j = 0; j < priv->pattern[8].item_count; j++) {
+ if (priv->pattern[8].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[8].pattern_type[j] = pattern->type;
+ priv->pattern[8].item_count++;
+ device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ entry_found = 0;
+ for (j = 0; j < priv->pattern[group].item_count; j++) {
+ if (priv->pattern[group].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[group].pattern_type[j] = pattern->type;
+ priv->pattern[group].item_count++;
+ device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->index = attr->priority;
+
+ if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+ index = priv->extract.qos_key_cfg.num_extracts;
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+ index++;
+
+ priv->extract.qos_key_cfg.num_extracts = index;
+ }
+
+ if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+ index = priv->extract.fs_key_cfg[group].num_extracts;
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+ index++;
+
+ priv->extract.fs_key_cfg[group].num_extracts = index;
+ }
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_ipv4 *)pattern->spec;
+ last = (const struct rte_flow_item_ipv4 *)pattern->last;
+ mask = (const struct rte_flow_item_ipv4 *)
+ (pattern->mask ? pattern->mask : default_mask);
+
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
+ memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr,
+ sizeof(uint32_t));
+ key_iova += sizeof(uint32_t);
+ memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr,
+ sizeof(uint32_t));
+ key_iova += sizeof(uint32_t);
+ memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id,
+ sizeof(uint8_t));
+
+ mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
+ memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr,
+ sizeof(uint32_t));
+ mask_iova += sizeof(uint32_t);
+ memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr,
+ sizeof(uint32_t));
+ mask_iova += sizeof(uint32_t);
+ memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id,
+ sizeof(uint8_t));
+
+ flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV4 +
+ (2 * sizeof(uint32_t)) + sizeof(uint8_t));
+
+ return device_configured;
+}
+
+static int
+dpaa2_configure_flow_ipv6(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ int index, j = 0;
+ size_t key_iova;
+ size_t mask_iova;
+ int device_configured = 0, entry_found = 0;
+ uint32_t group;
+ const struct rte_flow_item_ipv6 *spec, *mask;
+
+ const struct rte_flow_item_ipv6 *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* DPAA2 platform has a limitation that extract parameter can not be */
+ /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+ if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ for (j = 0; j < priv->pattern[8].item_count; j++) {
+ if (priv->pattern[8].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[8].pattern_type[j] = pattern->type;
+ priv->pattern[8].item_count++;
+ device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ entry_found = 0;
+ for (j = 0; j < priv->pattern[group].item_count; j++) {
+ if (priv->pattern[group].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[group].pattern_type[j] = pattern->type;
+ priv->pattern[group].item_count++;
+ device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->index = attr->priority;
+
+ if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+ index = priv->extract.qos_key_cfg.num_extracts;
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
+ index++;
+
+ priv->extract.qos_key_cfg.num_extracts = index;
+ }
+
+ if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+ index = priv->extract.fs_key_cfg[group].num_extracts;
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
+ index++;
+
+ priv->extract.fs_key_cfg[group].num_extracts = index;
+ }
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_ipv6 *)pattern->spec;
+ last = (const struct rte_flow_item_ipv6 *)pattern->last;
+ mask = (const struct rte_flow_item_ipv6 *)
+ (pattern->mask ? pattern->mask : default_mask);
+
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
+ memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr),
+ sizeof(spec->hdr.src_addr));
+ key_iova += sizeof(spec->hdr.src_addr);
+ memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr),
+ sizeof(spec->hdr.dst_addr));
+
+ mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
+ memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr),
+ sizeof(mask->hdr.src_addr));
+ mask_iova += sizeof(mask->hdr.src_addr);
+ memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr),
+ sizeof(mask->hdr.dst_addr));
+
+ flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV6 +
+ sizeof(spec->hdr.src_addr) +
+ sizeof(mask->hdr.dst_addr));
+ return device_configured;
+}
+
+static int
+dpaa2_configure_flow_icmp(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ int index, j = 0;
+ size_t key_iova;
+ size_t mask_iova;
+ int device_configured = 0, entry_found = 0;
+ uint32_t group;
+ const struct rte_flow_item_icmp *spec, *mask;
+
+ const struct rte_flow_item_icmp *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* DPAA2 platform has a limitation that extract parameter can not be */
+ /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+ if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ for (j = 0; j < priv->pattern[8].item_count; j++) {
+ if (priv->pattern[8].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[8].pattern_type[j] = pattern->type;
+ priv->pattern[8].item_count++;
+ device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ entry_found = 0;
+ for (j = 0; j < priv->pattern[group].item_count; j++) {
+ if (priv->pattern[group].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[group].pattern_type[j] = pattern->type;
+ priv->pattern[group].item_count++;
+ device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->index = attr->priority;
+
+ if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+ index = priv->extract.qos_key_cfg.num_extracts;
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
+ index++;
+
+ priv->extract.qos_key_cfg.num_extracts = index;
+ }
+
+ if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+ index = priv->extract.fs_key_cfg[group].num_extracts;
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
+ index++;
+
+ priv->extract.fs_key_cfg[group].num_extracts = index;
+ }
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_icmp *)pattern->spec;
+ last = (const struct rte_flow_item_icmp *)pattern->last;
+ mask = (const struct rte_flow_item_icmp *)
+ (pattern->mask ? pattern->mask : default_mask);
+
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
+ memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type,
+ sizeof(uint8_t));
+ key_iova += sizeof(uint8_t);
+ memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code,
+ sizeof(uint8_t));
+
+ mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
+ memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type,
+ sizeof(uint8_t));
+ key_iova += sizeof(uint8_t);
+ memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code,
+ sizeof(uint8_t));
+
+ flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ICMP +
+ (2 * sizeof(uint8_t)));
+
+ return device_configured;
+}
+
+static int
+dpaa2_configure_flow_udp(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ int index, j = 0;
+ size_t key_iova;
+ size_t mask_iova;
+ int device_configured = 0, entry_found = 0;
+ uint32_t group;
+ const struct rte_flow_item_udp *spec, *mask;
+
+ const struct rte_flow_item_udp *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* DPAA2 platform has a limitation that extract parameter can not be */
+ /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+ if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ for (j = 0; j < priv->pattern[8].item_count; j++) {
+ if (priv->pattern[8].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[8].pattern_type[j] = pattern->type;
+ priv->pattern[8].item_count++;
+ device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ entry_found = 0;
+ for (j = 0; j < priv->pattern[group].item_count; j++) {
+ if (priv->pattern[group].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[group].pattern_type[j] = pattern->type;
+ priv->pattern[group].item_count++;
+ device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->index = attr->priority;
+
+ if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+ index = priv->extract.qos_key_cfg.num_extracts;
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
+ index++;
+
+ priv->extract.qos_key_cfg.num_extracts = index;
+ }
+
+ if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+ index = priv->extract.fs_key_cfg[group].num_extracts;
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
+ index++;
+
+ priv->extract.fs_key_cfg[group].num_extracts = index;
+ }
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_udp *)pattern->spec;
+ last = (const struct rte_flow_item_udp *)pattern->last;
+ mask = (const struct rte_flow_item_udp *)
+ (pattern->mask ? pattern->mask : default_mask);
+
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
+ (2 * sizeof(uint32_t));
+ memset((void *)key_iova, 0x11, sizeof(uint8_t));
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_UDP;
+ memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
+ sizeof(uint16_t));
+ key_iova += sizeof(uint16_t);
+ memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
+ sizeof(uint16_t));
+
+ mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_UDP;
+ memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
+ sizeof(uint16_t));
+ mask_iova += sizeof(uint16_t);
+ memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
+ sizeof(uint16_t));
+
+ flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_UDP +
+ (2 * sizeof(uint16_t)));
+
+ return device_configured;
+}
+
+static int
+dpaa2_configure_flow_tcp(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ int index, j = 0;
+ size_t key_iova;
+ size_t mask_iova;
+ int device_configured = 0, entry_found = 0;
+ uint32_t group;
+ const struct rte_flow_item_tcp *spec, *mask;
+
+ const struct rte_flow_item_tcp *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* DPAA2 platform has a limitation that extract parameter can not be */
+ /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+ if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ for (j = 0; j < priv->pattern[8].item_count; j++) {
+ if (priv->pattern[8].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[8].pattern_type[j] = pattern->type;
+ priv->pattern[8].item_count++;
+ device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ entry_found = 0;
+ for (j = 0; j < priv->pattern[group].item_count; j++) {
+ if (priv->pattern[group].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[group].pattern_type[j] = pattern->type;
+ priv->pattern[group].item_count++;
+ device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->index = attr->priority;
+
+ if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+ index = priv->extract.qos_key_cfg.num_extracts;
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
+ index++;
+
+ priv->extract.qos_key_cfg.num_extracts = index;
+ }
+
+ if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+ index = priv->extract.fs_key_cfg[group].num_extracts;
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
+ index++;
+
+ priv->extract.fs_key_cfg[group].num_extracts = index;
+ }
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_tcp *)pattern->spec;
+ last = (const struct rte_flow_item_tcp *)pattern->last;
+ mask = (const struct rte_flow_item_tcp *)
+ (pattern->mask ? pattern->mask : default_mask);
+
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
+ (2 * sizeof(uint32_t));
+ memset((void *)key_iova, 0x06, sizeof(uint8_t));
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_TCP;
+ memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
+ sizeof(uint16_t));
+ key_iova += sizeof(uint16_t);
+ memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
+ sizeof(uint16_t));
+
+ mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_TCP;
+ memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
+ sizeof(uint16_t));
+ mask_iova += sizeof(uint16_t);
+ memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
+ sizeof(uint16_t));
+
+ flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_TCP +
+ (2 * sizeof(uint16_t)));
+
+ return device_configured;
+}
+
+static int
+dpaa2_configure_flow_sctp(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ int index, j = 0;
+ size_t key_iova;
+ size_t mask_iova;
+ int device_configured = 0, entry_found = 0;
+ uint32_t group;
+ const struct rte_flow_item_sctp *spec, *mask;
+
+ const struct rte_flow_item_sctp *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* DPAA2 platform has a limitation that extract parameter can not be */
+ /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
+ if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ for (j = 0; j < priv->pattern[8].item_count; j++) {
+ if (priv->pattern[8].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[8].pattern_type[j] = pattern->type;
+ priv->pattern[8].item_count++;
+ device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ entry_found = 0;
+ for (j = 0; j < priv->pattern[group].item_count; j++) {
+ if (priv->pattern[group].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[group].pattern_type[j] = pattern->type;
+ priv->pattern[group].item_count++;
+ device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->index = attr->priority;
+
+ if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+ index = priv->extract.qos_key_cfg.num_extracts;
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
+ index++;
+
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
+ index++;
+
+ priv->extract.qos_key_cfg.num_extracts = index;
+ }
+
+ if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+ index = priv->extract.fs_key_cfg[group].num_extracts;
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
+ index++;
+
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
+ index++;
+
+ priv->extract.fs_key_cfg[group].num_extracts = index;
+ }
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_sctp *)pattern->spec;
+ last = (const struct rte_flow_item_sctp *)pattern->last;
+ mask = (const struct rte_flow_item_sctp *)
+ (pattern->mask ? pattern->mask : default_mask);
+
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
+ (2 * sizeof(uint32_t));
+ memset((void *)key_iova, 0x84, sizeof(uint8_t));
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
+ memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
+ sizeof(uint16_t));
+ key_iova += sizeof(uint16_t);
+ memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
+ sizeof(uint16_t));
+
+ mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
+ memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
+ sizeof(uint16_t));
+ mask_iova += sizeof(uint16_t);
+ memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
+ sizeof(uint16_t));
+
+ flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_SCTP +
+ (2 * sizeof(uint16_t)));
+ return device_configured;
+}
+
+static int
+dpaa2_configure_flow_gre(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ int index, j = 0;
+ size_t key_iova;
+ size_t mask_iova;
+ int device_configured = 0, entry_found = 0;
+ uint32_t group;
+ const struct rte_flow_item_gre *spec, *mask;
+
+ const struct rte_flow_item_gre *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* DPAA2 platform has a limitation that extract parameter can not be */
+ /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
+ if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ return -ENOTSUP;
+ }
+
+ for (j = 0; j < priv->pattern[8].item_count; j++) {
+ if (priv->pattern[8].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[8].pattern_type[j] = pattern->type;
+ priv->pattern[8].item_count++;
+ device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ entry_found = 0;
+ for (j = 0; j < priv->pattern[group].item_count; j++) {
+ if (priv->pattern[group].pattern_type[j] != pattern->type) {
+ continue;
+ } else {
+ entry_found = 1;
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ priv->pattern[group].pattern_type[j] = pattern->type;
+ priv->pattern[group].item_count++;
+ device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->index = attr->priority;
+
+ if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+ index = priv->extract.qos_key_cfg.num_extracts;
+ priv->extract.qos_key_cfg.extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
+ priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
+ index++;
+
+ priv->extract.qos_key_cfg.num_extracts = index;
+ }
+
+ if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+ index = priv->extract.fs_key_cfg[group].num_extracts;
+ priv->extract.fs_key_cfg[group].extracts[index].type =
+ DPKG_EXTRACT_FROM_HDR;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
+ priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
+ index++;
+
+ priv->extract.fs_key_cfg[group].num_extracts = index;
+ }
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_gre *)pattern->spec;
+ last = (const struct rte_flow_item_gre *)pattern->last;
+ mask = (const struct rte_flow_item_gre *)
+ (pattern->mask ? pattern->mask : default_mask);
+
+ key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_GRE;
+ memcpy((void *)key_iova, (const void *)(&spec->protocol),
+ sizeof(rte_be16_t));
+
+ mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_GRE;
+ memcpy((void *)mask_iova, (const void *)(&mask->protocol),
+ sizeof(rte_be16_t));
+
+ flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_GRE + sizeof(rte_be16_t));
+
+ return device_configured;
+}
+
+static int
+dpaa2_generic_flow_set(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_queue *dest_queue;
+ const struct rte_flow_action_rss *rss_conf;
+ uint16_t index;
+ int is_keycfg_configured = 0, end_of_list = 0;
+ int ret = 0, i = 0, j = 0;
+ struct dpni_attr nic_attr;
+ struct dpni_rx_tc_dist_cfg tc_cfg;
+ struct dpni_qos_tbl_cfg qos_cfg;
+ struct dpkg_profile_cfg key_cfg;
+ struct dpni_fs_action_cfg action;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ size_t param;
+
+ /* Parse pattern list to get the matching parameters */
+ while (!end_of_list) {
+ switch (pattern[i].type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ is_keycfg_configured = dpaa2_configure_flow_eth(flow,
+ dev,
+ attr,
+ &pattern[i],
+ actions,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ is_keycfg_configured = dpaa2_configure_flow_vlan(flow,
+ dev,
+ attr,
+ &pattern[i],
+ actions,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ is_keycfg_configured = dpaa2_configure_flow_ipv4(flow,
+ dev,
+ attr,
+ &pattern[i],
+ actions,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ is_keycfg_configured = dpaa2_configure_flow_ipv6(flow,
+ dev,
+ attr,
+ &pattern[i],
+ actions,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_ICMP:
+ is_keycfg_configured = dpaa2_configure_flow_icmp(flow,
+ dev,
+ attr,
+ &pattern[i],
+ actions,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ is_keycfg_configured = dpaa2_configure_flow_udp(flow,
+ dev,
+ attr,
+ &pattern[i],
+ actions,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ is_keycfg_configured = dpaa2_configure_flow_tcp(flow,
+ dev,
+ attr,
+ &pattern[i],
+ actions,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ is_keycfg_configured = dpaa2_configure_flow_sctp(flow,
+ dev, attr,
+ &pattern[i],
+ actions,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ is_keycfg_configured = dpaa2_configure_flow_gre(flow,
+ dev,
+ attr,
+ &pattern[i],
+ actions,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_END:
+ end_of_list = 1;
+ break; /*End of List*/
+ default:
+ DPAA2_PMD_ERR("Invalid action type");
+ ret = -ENOTSUP;
+ break;
+ }
+ i++;
+ }
+
+ /* Let's parse action on matching traffic */
+ end_of_list = 0;
+ while (!end_of_list) {
+ switch (actions[j].type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
+ flow->flow_id = dest_queue->index;
+ flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
+ memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
+ action.flow_id = flow->flow_id;
+ if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+ if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
+ (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
+ DPAA2_PMD_ERR(
+ "Unable to prepare extract parameters");
+ return -1;
+ }
+
+ memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
+ qos_cfg.discard_on_miss = true;
+ qos_cfg.keep_entries = true;
+ qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
+ ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
+ priv->token, &qos_cfg);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Distribution cannot be configured.(%d)"
+ , ret);
+ return -1;
+ }
+ }
+ if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+ if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id],
+ (uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) {
+ DPAA2_PMD_ERR(
+ "Unable to prepare extract parameters");
+ return -1;
+ }
+
+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+ tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
+ tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
+ tc_cfg.key_cfg_iova =
+ (uint64_t)priv->extract.fs_extract_param[flow->tc_id];
+ tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
+ tc_cfg.fs_cfg.keep_entries = true;
+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
+ priv->token,
+ flow->tc_id, &tc_cfg);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Distribution cannot be configured.(%d)"
+ , ret);
+ return -1;
+ }
+ }
+ /* Configure QoS table first */
+ memset(&nic_attr, 0, sizeof(struct dpni_attr));
+ ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
+ priv->token, &nic_attr);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Failure to get attribute. dpni@%p err code(%d)\n",
+ dpni, ret);
+ return ret;
+ }
+
+ action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
+ index = flow->index + (flow->tc_id * nic_attr.fs_entries);
+ ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
+ priv->token, &flow->rule,
+ flow->tc_id, index);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Error in addnig entry to QoS table(%d)", ret);
+ return ret;
+ }
+
+ /* Then Configure FS table */
+ ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
+ flow->tc_id, flow->index,
+ &flow->rule, &action);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Error in adding entry to FS table(%d)", ret);
+ return ret;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
+ priv->token, &nic_attr);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Failure to get attribute. dpni@%p err code(%d)\n",
+ dpni, ret);
+ return ret;
+ }
+ rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
+ for (i = 0; i < (int)rss_conf->queue_num; i++) {
+ if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) ||
+ rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) {
+ DPAA2_PMD_ERR(
+ "Queue/Group combination are not supported\n");
+ return -ENOTSUP;
+ }
+ }
+
+ flow->action = RTE_FLOW_ACTION_TYPE_RSS;
+ ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
+ &key_cfg);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "unable to set flow distribution.please check queue config\n");
+ return ret;
+ }
+
+ /* Allocate DMA'ble memory to write the rules */
+ param = (size_t)rte_malloc(NULL, 256, 64);
+ if (!param) {
+ DPAA2_PMD_ERR("Memory allocation failure\n");
+ return -1;
+ }
+
+ if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) {
+ DPAA2_PMD_ERR(
+ "Unable to prepare extract parameters");
+ rte_free((void *)param);
+ return -1;
+ }
+
+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+ tc_cfg.dist_size = rss_conf->queue_num;
+ tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+ tc_cfg.key_cfg_iova = (size_t)param;
+ tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
+
+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
+ priv->token, flow->tc_id,
+ &tc_cfg);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Distribution cannot be configured: %d\n", ret);
+ rte_free((void *)param);
+ return -1;
+ }
+
+ rte_free((void *)param);
+ if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+ if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
+ (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
+ DPAA2_PMD_ERR(
+ "Unable to prepare extract parameters");
+ return -1;
+ }
+ memset(&qos_cfg, 0,
+ sizeof(struct dpni_qos_tbl_cfg));
+ qos_cfg.discard_on_miss = true;
+ qos_cfg.keep_entries = true;
+ qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
+ ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
+ priv->token, &qos_cfg);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Distribution can not be configured(%d)\n",
+ ret);
+ return -1;
+ }
+ }
+
+ /* Add Rule into QoS table */
+ index = flow->index + (flow->tc_id * nic_attr.fs_entries);
+ ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
+ &flow->rule, flow->tc_id,
+ index);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Error in entry addition in QoS table(%d)",
+ ret);
+ return ret;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_END:
+ end_of_list = 1;
+ break;
+ default:
+ DPAA2_PMD_ERR("Invalid action type");
+ ret = -ENOTSUP;
+ break;
+ }
+ j++;
+ }
+
+ return ret;
+}
+
+static inline int
+dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
+ const struct rte_flow_attr *attr)
+{
+ int ret = 0;
+
+ if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
+ DPAA2_PMD_ERR("Priority group is out of range\n");
+ ret = -ENOTSUP;
+ }
+ if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
+ DPAA2_PMD_ERR("Priority within the group is out of range\n");
+ ret = -ENOTSUP;
+ }
+ if (unlikely(attr->egress)) {
+ DPAA2_PMD_ERR(
+ "Flow configuration is not supported on egress side\n");
+ ret = -ENOTSUP;
+ }
+ if (unlikely(!attr->ingress)) {
+ DPAA2_PMD_ERR("Ingress flag must be configured\n");
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static inline void
+dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern)
+{
+ switch (pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ default_mask = (const void *)&rte_flow_item_eth_mask;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ default_mask = (const void *)&rte_flow_item_vlan_mask;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ default_mask = (const void *)&rte_flow_item_ipv4_mask;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ default_mask = (const void *)&rte_flow_item_ipv6_mask;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ICMP:
+ default_mask = (const void *)&rte_flow_item_icmp_mask;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ default_mask = (const void *)&rte_flow_item_udp_mask;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ default_mask = (const void *)&rte_flow_item_tcp_mask;
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ default_mask = (const void *)&rte_flow_item_sctp_mask;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ default_mask = (const void *)&rte_flow_item_gre_mask;
+ break;
+ default:
+ DPAA2_PMD_ERR("Invalid pattern type");
+ }
+}
+
+static inline int
+dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv,
+ const struct rte_flow_item pattern[])
+{
+ unsigned int i, j, k, is_found = 0;
+ int ret = 0;
+
+ for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
+ for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
+ if (dpaa2_supported_pattern_type[i] == pattern[j].type) {
+ is_found = 1;
+ break;
+ }
+ }
+ if (!is_found) {
+ ret = -ENOTSUP;
+ break;
+ }
+ }
+ /* Lets verify other combinations of given pattern rules */
+ for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
+ if (!pattern[j].spec) {
+ ret = -EINVAL;
+ break;
+ }
+ if ((pattern[j].last) && (!pattern[j].mask))
+ dpaa2_dev_update_default_mask(&pattern[j]);
+ }
+
+ /* DPAA2 platform has a limitation that extract parameter can not be */
+ /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
+ for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
+ for (j = 0; j < MAX_TCS + 1; j++) {
+ for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; j++) {
+ if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type)
+ break;
+ }
+ if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS)
+ ret = -ENOTSUP;
+ }
+ }
+ return ret;
+}
+
+static inline int
+dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
+{
+ unsigned int i, j, is_found = 0;
+ int ret = 0;
+
+ for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
+ for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
+ if (dpaa2_supported_action_type[i] == actions[j].type) {
+ is_found = 1;
+ break;
+ }
+ }
+ if (!is_found) {
+ ret = -ENOTSUP;
+ break;
+ }
+ }
+ for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
+ if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf))
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static
+int dpaa2_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *flow_attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error __rte_unused)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpni_attr dpni_attr;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ uint16_t token = priv->token;
+ int ret = 0;
+
+ memset(&dpni_attr, 0, sizeof(struct dpni_attr));
+ ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Failure to get dpni@%p attribute, err code %d\n",
+ dpni, ret);
+ return ret;
+ }
+
+ /* Verify input attributes */
+ ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Invalid attributes are given\n");
+ goto not_valid_params;
+ }
+ /* Verify input pattern list */
+ ret = dpaa2_dev_verify_patterns(priv, pattern);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Invalid pattern list is given\n");
+ goto not_valid_params;
+ }
+ /* Verify input action list */
+ ret = dpaa2_dev_verify_actions(actions);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Invalid action list is given\n");
+ goto not_valid_params;
+ }
+not_valid_params:
+ return ret;
+}
+
+static
+struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow = NULL;
+ size_t key_iova = 0, mask_iova = 0;
+ int ret;
+
+ flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
+ if (!flow) {
+ DPAA2_PMD_ERR("Failure to allocate memory for flow");
+ return NULL;
+ }
+ /* Allocate DMA'ble memory to write the rules */
+ key_iova = (size_t)rte_malloc(NULL, 256, 64);
+ if (!key_iova) {
+ DPAA2_PMD_ERR(
+ "Memory allocation failure for rule configration\n");
+ goto creation_error;
+ }
+ mask_iova = (size_t)rte_malloc(NULL, 256, 64);
+ if (!mask_iova) {
+ DPAA2_PMD_ERR(
+ "Memory allocation failure for rule configration\n");
+ goto creation_error;
+ }
+
+ flow->rule.key_iova = key_iova;
+ flow->rule.mask_iova = mask_iova;
+ flow->rule.key_size = 0;
+
+ switch (dpaa2_filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
+ actions, error);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Failure to create flow, return code (%d)", ret);
+ goto creation_error;
+ }
+ break;
+ default:
+ DPAA2_PMD_ERR("Filter type (%d) not supported",
+ dpaa2_filter_type);
+ break;
+ }
+
+ return flow;
+
+creation_error:
+ if (flow)
+ rte_free((void *)flow);
+ if (key_iova)
+ rte_free((void *)key_iova);
+ if (mask_iova)
+ rte_free((void *)mask_iova);
+ return NULL;
+}
+
+static
+int dpaa2_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error __rte_unused)
+{
+ int ret = 0;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ switch (flow->action) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ /* Remove entry from QoS table first */
+ ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
+ &flow->rule);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Error in adding entry to QoS table(%d)", ret);
+ goto error;
+ }
+
+ /* Then remove entry from FS table */
+ ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
+ flow->tc_id, &flow->rule);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Error in entry addition in FS table(%d)", ret);
+ goto error;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
+ &flow->rule);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(
+ "Error in entry addition in QoS table(%d)", ret);
+ goto error;
+ }
+ break;
+ default:
+ DPAA2_PMD_ERR(
+ "Action type (%d) is not supported", flow->action);
+ ret = -ENOTSUP;
+ break;
+ }
+
+ /* Now free the flow */
+ rte_free(flow);
+
+error:
+ return ret;
+}
+
+static int
+dpaa2_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error __rte_unused)
+{
+ int ret = 0, tc_id;
+ struct dpni_rx_tc_dist_cfg tc_cfg;
+ struct dpni_qos_tbl_cfg qos_cfg;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ /* Reset QoS table */
+ qos_cfg.default_tc = 0;
+ qos_cfg.discard_on_miss = false;
+ qos_cfg.keep_entries = false;
+ qos_cfg.key_cfg_iova = priv->extract.qos_extract_param;
+ ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, priv->token, &qos_cfg);
+ if (ret < 0)
+ DPAA2_PMD_ERR(
+ "QoS table is not reset to default: %d\n", ret);
+
+ for (tc_id = 0; tc_id < priv->num_rx_tc; tc_id++) {
+ /* Reset FS table */
+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token,
+ tc_id, &tc_cfg);
+ if (ret < 0)
+ DPAA2_PMD_ERR(
+ "Error (%d) in flushing entries for TC (%d)",
+ ret, tc_id);
+ }
+ return ret;
+}
+
+static int
+dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const struct rte_flow_action *actions __rte_unused,
+ void *data __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ return 0;
+}
+
+const struct rte_flow_ops dpaa2_flow_ops = {
+ .create = dpaa2_flow_create,
+ .validate = dpaa2_flow_validate,
+ .destroy = dpaa2_flow_destroy,
+ .flush = dpaa2_flow_flush,
+ .query = dpaa2_flow_query,
+};
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 0907a3699..6c12a0ae1 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -1528,6 +1528,248 @@ int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_set_qos_table() - Set QoS mapping table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS table configuration
+ *
+ * This function and all QoS-related functions require that
+ *'max_tcs > 1' was set at DPNI creation.
+ *
+ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
+ * prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_qos_tbl_cfg *cfg)
+{
+ struct dpni_cmd_set_qos_table *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
+ cmd_params->default_tc = cfg->default_tc;
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+ dpni_set_field(cmd_params->discard_on_miss,
+ ENABLE,
+ cfg->discard_on_miss);
+ dpni_set_field(cmd_params->discard_on_miss,
+ KEEP_QOS_ENTRIES,
+ cfg->keep_entries);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS rule to add
+ * @tc_id: Traffic class selection (0-7)
+ * @index: Location in the QoS table where to insert the entry.
+ * Only relevant if MASKING is enabled for QoS classification on
+ * this DPNI, it is ignored for exact match.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_rule_cfg *cfg,
+ uint8_t tc_id,
+ uint16_t index)
+{
+ struct dpni_cmd_add_qos_entry *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->index = cpu_to_le16(index);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_qos_entry() - Remove QoS mapping entry
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS rule to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_rule_cfg *cfg)
+{
+ struct dpni_cmd_remove_qos_entry *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_qos_table() - Clear all QoS mapping entries
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Following this function call, all frames are directed to
+ * the default traffic class (0)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
+ * (to select a flow ID)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @index: Location in the QoS table where to insert the entry.
+ * Only relevant if MASKING is enabled for QoS classification
+ * on this DPNI, it is ignored for exact match.
+ * @cfg: Flow steering rule to add
+ * @action: Action to be taken as result of a classification hit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ uint16_t index,
+ const struct dpni_rule_cfg *cfg,
+ const struct dpni_fs_action_cfg *action)
+{
+ struct dpni_cmd_add_fs_entry *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->index = cpu_to_le16(index);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+ cmd_params->options = cpu_to_le16(action->options);
+ cmd_params->flow_id = cpu_to_le16(action->flow_id);
+ cmd_params->flc = cpu_to_le64(action->flc);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
+ * traffic class
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Flow steering rule to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ const struct dpni_rule_cfg *cfg)
+{
+ struct dpni_cmd_remove_fs_entry *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific
+ * traffic class
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id)
+{
+ struct dpni_cmd_clear_fs_entries *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_clear_fs_entries *)cmd.params;
+ cmd_params->tc_id = tc_id;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
/**
* dpni_set_congestion_notification() - Set traffic class congestion
* notification configuration
@@ -2064,6 +2306,76 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
return 0;
}
+/**
+ * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Distribution configuration
+ * If the FS is already enabled with a previous call the classification
+ * key will be changed but all the table rules are kept. If the
+ * existing rules do not match the key the results will not be
+ * predictable. It is the user responsibility to keep key integrity
+ * If cfg.enable is set to 1 the command will create a flow steering table
+ * and will classify packets according to this table. The packets
+ * that miss all the table rules will be classified according to
+ * settings made in dpni_set_rx_hash_dist()
+ * If cfg.enable is set to 0 the command will clear flow steering table. The
+ * packets will be classified according to settings made in
+ * dpni_set_rx_hash_dist()
+ */
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, const struct dpni_rx_dist_cfg *cfg)
+{
+ struct dpni_cmd_set_rx_fs_dist *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+ cmd_params->tc = cfg->tc;
+ cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Distribution configuration
+ * If cfg.enable is set to 1 the packets will be classified using a hash
+ * function based on the key received in cfg.key_cfg_iova parameter
+ * If cfg.enable is set to 0 the packets will be sent to the queue configured in
+ * dpni_set_rx_dist_default_queue() call
+ */
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, const struct dpni_rx_dist_cfg *cfg)
+{
+ struct dpni_cmd_set_rx_hash_dist *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+ cmd_params->tc_id = cfg->tc;
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
/**
* dpni_add_custom_tpid() - Configures a distinct Ethertype value
* (or TPID value) to indicate VLAN tag in addition to the common
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index 0359a2bc7..aecdc8d1f 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -1071,6 +1071,123 @@ int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io,
uint16_t token,
enum dpni_confirmation_mode *mode);
+/**
+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * key extractions to be used as the QoS criteria by calling
+ * dpkg_prepare_key_cfg()
+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
+ * '0' to use the 'default_tc' in such cases
+ * @keep_entries: if set to one will not delele existing table entries. This
+ * option will work properly only for dpni objects created with
+ * DPNI_OPT_HAS_KEY_MASKING option. All previous QoS entries must
+ * be compatible with new key composition rule.
+ * It is the caller's job to delete incompatible entries before
+ * executing this function.
+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
+ */
+struct dpni_qos_tbl_cfg {
+ uint64_t key_cfg_iova;
+ int discard_on_miss;
+ int keep_entries;
+ uint8_t default_tc;
+};
+
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_qos_tbl_cfg *cfg);
+
+/**
+ * struct dpni_rule_cfg - Rule configuration for table lookup
+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
+ * @key_size: key and mask size (in bytes)
+ */
+struct dpni_rule_cfg {
+ uint64_t key_iova;
+ uint64_t mask_iova;
+ uint8_t key_size;
+};
+
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_rule_cfg *cfg,
+ uint8_t tc_id,
+ uint16_t index);
+
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_rule_cfg *cfg);
+
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * Discard matching traffic. If set, this takes precedence over any other
+ * configuration and matching traffic is always discarded.
+ */
+ #define DPNI_FS_OPT_DISCARD 0x1
+
+/**
+ * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to
+ * override the FLC value set per queue.
+ * For more details check the Frame Descriptor section in the hardware
+ * documentation.
+ */
+#define DPNI_FS_OPT_SET_FLC 0x2
+
+/*
+ * Indicates whether the 6 lowest significant bits of FLC are used for stash
+ * control. If set, the 6 least significant bits in value are interpreted as
+ * follows:
+ * - bits 0-1: indicates the number of 64 byte units of context that are
+ * stashed. FLC value is interpreted as a memory address in this case,
+ * excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame annotation
+ * to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame data to be
+ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
+ */
+#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
+
+/**
+ * struct dpni_fs_action_cfg - Action configuration for table look-up
+ * @flc: FLC value for traffic matching this rule. Please check the Frame
+ * Descriptor section in the hardware documentation for more information.
+ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
+ * values are in range 0 to num_queue-1.
+ * @options: Any combination of DPNI_FS_OPT_ values.
+ */
+struct dpni_fs_action_cfg {
+ uint64_t flc;
+ uint16_t flow_id;
+ uint16_t options;
+};
+
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ uint16_t index,
+ const struct dpni_rule_cfg *cfg,
+ const struct dpni_fs_action_cfg *action);
+
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ const struct dpni_rule_cfg *cfg);
+
+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id);
+
int dpni_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
@@ -1202,6 +1319,43 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
struct opr_cfg *cfg,
struct opr_qry *qry);
+/**
+ * When used for queue_idx in function dpni_set_rx_dist_default_queue will
+ * signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP ((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - distribution configuration
+ * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
+ * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
+ * 512,768,896,1024
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
+ * it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ * hash is disabled it will be put into this queue id; use
+ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ * used only when flow steering distribution is enabled and hash
+ * distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+ uint16_t dist_size;
+ uint64_t key_cfg_iova;
+ uint8_t enable;
+ uint8_t tc;
+ uint16_t fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, const struct dpni_rx_dist_cfg *cfg);
+
int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, uint16_t tpid);
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index 81830ed85..9116e417e 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -69,6 +69,14 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD_V3(0x235)
+#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD_V2(0x240)
+#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
+#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
+#define DPNI_CMDID_CLR_QOS_TBL DPNI_CMD(0x243)
+#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
+#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
+#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
+
#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D)
#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
@@ -91,6 +99,8 @@
#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D)
#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e)
#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f)
+#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
+#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
#define DPNI_CMDID_ADD_CUSTOM_TPID DPNI_CMD(0x275)
#define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276)
#define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277)
@@ -495,6 +505,63 @@ struct dpni_cmd_set_queue {
uint64_t user_context;
};
+#define DPNI_DISCARD_ON_MISS_SHIFT 0
+#define DPNI_DISCARD_ON_MISS_SIZE 1
+#define DPNI_KEEP_QOS_ENTRIES_SHIFT 1
+#define DPNI_KEEP_QOS_ENTRIES_SIZE 1
+
+struct dpni_cmd_set_qos_table {
+ uint32_t pad;
+ uint8_t default_tc;
+ /* only the LSB */
+ uint8_t discard_on_miss;
+ uint16_t pad1[21];
+ uint64_t key_cfg_iova;
+};
+
+struct dpni_cmd_add_qos_entry {
+ uint16_t pad;
+ uint8_t tc_id;
+ uint8_t key_size;
+ uint16_t index;
+ uint16_t pad2;
+ uint64_t key_iova;
+ uint64_t mask_iova;
+};
+
+struct dpni_cmd_remove_qos_entry {
+ uint8_t pad1[3];
+ uint8_t key_size;
+ uint32_t pad2;
+ uint64_t key_iova;
+ uint64_t mask_iova;
+};
+
+struct dpni_cmd_add_fs_entry {
+ uint16_t options;
+ uint8_t tc_id;
+ uint8_t key_size;
+ uint16_t index;
+ uint16_t flow_id;
+ uint64_t key_iova;
+ uint64_t mask_iova;
+ uint64_t flc;
+};
+
+struct dpni_cmd_remove_fs_entry {
+ uint16_t pad1;
+ uint8_t tc_id;
+ uint8_t key_size;
+ uint32_t pad2;
+ uint64_t key_iova;
+ uint64_t mask_iova;
+};
+
+struct dpni_cmd_clear_fs_entries {
+ uint16_t pad;
+ uint8_t tc_id;
+};
+
#define DPNI_DROP_ENABLE_SHIFT 0
#define DPNI_DROP_ENABLE_SIZE 1
#define DPNI_DROP_UNITS_SHIFT 2
@@ -692,5 +759,26 @@ struct dpni_rsp_get_custom_tpid {
uint16_t tpid2;
};
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_fs_dist {
+ uint16_t dist_size;
+ uint8_t enable;
+ uint8_t tc;
+ uint16_t miss_flow_id;
+ uint16_t pad1;
+ uint64_t key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_hash_dist {
+ uint16_t dist_size;
+ uint8_t enable;
+ uint8_t tc_id;
+ uint32_t pad;
+ uint64_t key_cfg_iova;
+};
+
#pragma pack(pop)
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/dpaa2/meson.build b/drivers/net/dpaa2/meson.build
index 801cbf5d7..53e1d8189 100644
--- a/drivers/net/dpaa2/meson.build
+++ b/drivers/net/dpaa2/meson.build
@@ -11,6 +11,7 @@ deps += ['mempool_dpaa2']
sources = files('base/dpaa2_hw_dpni.c',
'dpaa2_mux.c',
'dpaa2_ethdev.c',
+ 'dpaa2_flow.c',
'dpaa2_rxtx.c',
'mc/dpkg.c',
'mc/dpdmux.c',
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config
2019-02-22 11:15 [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config Hemant Agrawal
` (4 preceding siblings ...)
2019-02-22 11:16 ` [dpdk-dev] [PATCH 6/6] net/dpaa2: add basic support for generic flow Hemant Agrawal
@ 2019-02-27 9:30 ` Ferruh Yigit
5 siblings, 0 replies; 7+ messages in thread
From: Ferruh Yigit @ 2019-02-27 9:30 UTC (permalink / raw)
To: Hemant Agrawal, dev; +Cc: Shreyansh Jain
On 2/22/2019 11:15 AM, Hemant Agrawal wrote:
> This patch add support to config custom tpid in dpni.
> i.e. value other than 0x8100 and 0x88A8
>
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Series applied to dpdk-next-net/master, thanks.
Hi Hemant,
What do you think adding a release notes update related to the changes, at least
rte_flow support looks like to me something to mention in release notes?
Thanks,
ferruh
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2019-02-27 9:30 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-02-22 11:15 [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config Hemant Agrawal
2019-02-22 11:15 ` [dpdk-dev] [PATCH 2/6] mempool/dpaa2: fix to reduce continuous print on empty pool Hemant Agrawal
2019-02-22 11:16 ` [dpdk-dev] [PATCH 3/6] bus/fslmc: add enqueue response read routines in qbman Hemant Agrawal
2019-02-22 11:16 ` [dpdk-dev] [PATCH 4/6] net/dpaa2: add support for 16 Rx Queues per traffic class Hemant Agrawal
2019-02-22 11:16 ` [dpdk-dev] [PATCH 5/6] net/dpaa2: support low level loopback tester Hemant Agrawal
2019-02-22 11:16 ` [dpdk-dev] [PATCH 6/6] net/dpaa2: add basic support for generic flow Hemant Agrawal
2019-02-27 9:30 ` [dpdk-dev] [PATCH 1/6] net/dpaa2: add support for VLAN tpid config Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).