automatic DPDK test reports
 help / color / mirror / Atom feed
* [dpdk-test-report] |WARNING| pw100363-100364 [PATCH] [2/2] raw/cnxk_bphy: use roc calls for max irq
@ 2021-10-04 23:44 dpdklab
  0 siblings, 0 replies; only message in thread
From: dpdklab @ 2021-10-04 23:44 UTC (permalink / raw)
  To: test-report; +Cc: dpdk-test-reports

[-- Attachment #1: Type: text/plain, Size: 14999 bytes --]

Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/100363

_apply patch failure_

Submitter: Tomasz Duszynski <tduszynski@marvell.com>
Date: Friday, October 01 2021 20:19:41 
Applied on: CommitID:086d426406bd3f6fac96a15bbd871c7fe714bc2d
Apply patch set 100363-100364 failed:

Checking patch drivers/raw/cnxk_bphy/cnxk_bphy.c...
Hunk #1 succeeded at 46 (offset 1 line).
error: while searching for:
		goto err_desc;
	}

	ret = rte_pmd_bphy_npa_pf_func_get(dev_id);
	if (ret == 0)
		plt_warn("NPA pf_func is invalid");

	ret = rte_pmd_bphy_sso_pf_func_get(dev_id);
	if (ret == 0)
		plt_warn("SSO pf_func is invalid");

	ret = rte_pmd_bphy_intr_init(dev_id);

error: patch failed: drivers/raw/cnxk_bphy/cnxk_bphy.c:68
Hunk #3 succeeded at 161 (offset -9 lines).
Hunk #4 succeeded at 178 (offset -9 lines).
error: while searching for:
		ret = cnxk_bphy_intr_register(dev->dev_id, info->irq_num,
					      info->handler, info->data,
					      info->cpu);
		break;
	case CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER:
		info = (struct cnxk_bphy_irq_info *)msg->data;
		cnxk_bphy_intr_unregister(dev->dev_id, info->irq_num);
		break;
	case CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET:
		bphy_dev->queues[queue].rsp = &bphy_dev->mem;
		break;
	case CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC:
		bphy_dev->queues[queue].rsp =
			(void *)(size_t)roc_bphy_npa_pf_func_get();
		break;
	case CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC:
		bphy_dev->queues[queue].rsp =
			(void *)(size_t)roc_bphy_sso_pf_func_get();
		break;
	default:
		ret = -EINVAL;
	}

	return ret;
}

static int

error: patch failed: drivers/raw/cnxk_bphy/cnxk_bphy.c:191
Hunk #6 succeeded at 211 (offset -39 lines).
Hunk #7 succeeded at 219 (offset -39 lines).
Checking patch drivers/raw/cnxk_bphy/cnxk_bphy_cgx_test.c...
Checking patch drivers/raw/cnxk_bphy/rte_pmd_bphy.h...
Hunk #2 succeeded at 138 (offset -2 lines).
Hunk #3 succeeded at 208 (offset -2 lines).
error: while searching for:
		.type = CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER,
		.data = &info
	};
	struct rte_rawdev_buf *bufs[1];
	struct rte_rawdev_buf buf;

	buf.buf_addr = &msg;
	bufs[0] = &buf;

	rte_rawdev_enqueue_buffers(dev_id, bufs, 1, 0);
}

static __rte_always_inline struct cnxk_bphy_mem *
rte_pmd_bphy_intr_mem_get(uint16_t dev_id)
{
	struct cnxk_bphy_irq_msg msg = {
		.type = CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET,
	};
	struct rte_rawdev_buf *bufs[1];
	struct rte_rawdev_buf buf;
	int ret;

	buf.buf_addr = &msg;
	bufs[0] = &buf;

	ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
	if (ret)
		return NULL;

	ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
	if (ret)
		return NULL;

	return buf.buf_addr;
}

static __rte_always_inline uint16_t
rte_pmd_bphy_npa_pf_func_get(uint16_t dev_id)
{
	struct cnxk_bphy_irq_msg msg = {
		.type = CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC,
	};
	struct rte_rawdev_buf *bufs[1];
	struct rte_rawdev_buf buf;
	int ret;

	buf.buf_addr = &msg;
	bufs[0] = &buf;

	ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
	if (ret)
		return 0;

	ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
	if (ret)
		return 0;

	return (uint16_t)(size_t)buf.buf_addr;
}

static __rte_always_inline uint16_t
rte_pmd_bphy_sso_pf_func_get(uint16_t dev_id)
{
	struct cnxk_bphy_irq_msg msg = {
		.type = CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC,
	};
	struct rte_rawdev_buf *bufs[1];
	struct rte_rawdev_buf buf;
	int ret;

	buf.buf_addr = &msg;
	bufs[0] = &buf;

	ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
	if (ret)
		return 0;

	ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
	if (ret)
		return 0;

	return (uint16_t)(size_t)buf.buf_addr;
}

#endif /* _CNXK_BPHY_H_ */

error: patch failed: drivers/raw/cnxk_bphy/rte_pmd_bphy.h:201
Applying patch drivers/raw/cnxk_bphy/cnxk_bphy.c with 2 rejects...
Hunk #1 applied cleanly.
Rejected hunk #2.
Hunk #3 applied cleanly.
Hunk #4 applied cleanly.
Rejected hunk #5.
Hunk #6 applied cleanly.
Hunk #7 applied cleanly.
Applied patch drivers/raw/cnxk_bphy/cnxk_bphy_cgx_test.c cleanly.
Applying patch drivers/raw/cnxk_bphy/rte_pmd_bphy.h with 1 reject...
Hunk #1 applied cleanly.
Hunk #2 applied cleanly.
Hunk #3 applied cleanly.
Rejected hunk #4.
diff a/drivers/raw/cnxk_bphy/cnxk_bphy.c b/drivers/raw/cnxk_bphy/cnxk_bphy.c	(rejected hunks)
@@ -68,12 +69,12 @@ bphy_rawdev_selftest(uint16_t dev_id)
 		goto err_desc;
 	}
 
-	ret = rte_pmd_bphy_npa_pf_func_get(dev_id);
-	if (ret == 0)
+	ret = rte_pmd_bphy_npa_pf_func_get(dev_id, &pf_func);
+	if (ret || pf_func == 0)
 		plt_warn("NPA pf_func is invalid");
 
-	ret = rte_pmd_bphy_sso_pf_func_get(dev_id);
-	if (ret == 0)
+	ret = rte_pmd_bphy_sso_pf_func_get(dev_id, &pf_func);
+	if (ret || pf_func == 0)
 		plt_warn("SSO pf_func is invalid");
 
 	ret = rte_pmd_bphy_intr_init(dev_id);
@@ -191,27 +198,49 @@ cnxk_bphy_irq_enqueue_bufs(struct rte_rawdev *dev,
 		ret = cnxk_bphy_intr_register(dev->dev_id, info->irq_num,
 					      info->handler, info->data,
 					      info->cpu);
+		if (ret)
+			return ret;
 		break;
 	case CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER:
 		info = (struct cnxk_bphy_irq_info *)msg->data;
 		cnxk_bphy_intr_unregister(dev->dev_id, info->irq_num);
 		break;
 	case CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET:
-		bphy_dev->queues[queue].rsp = &bphy_dev->mem;
+		mem = rte_zmalloc(NULL, sizeof(*mem), 0);
+		if (!mem)
+			return -ENOMEM;
+
+		*mem = bphy_dev->mem;
+		rsp = mem;
 		break;
 	case CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC:
-		bphy_dev->queues[queue].rsp =
-			(void *)(size_t)roc_bphy_npa_pf_func_get();
+		pf_func = rte_malloc(NULL, sizeof(*pf_func), 0);
+		if (!pf_func)
+			return -ENOMEM;
+
+		*pf_func = roc_bphy_npa_pf_func_get();
+		rsp = pf_func;
 		break;
 	case CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC:
-		bphy_dev->queues[queue].rsp =
-			(void *)(size_t)roc_bphy_sso_pf_func_get();
+		pf_func = rte_malloc(NULL, sizeof(*pf_func), 0);
+		if (!pf_func)
+			return -ENOMEM;
+
+		*pf_func = roc_bphy_sso_pf_func_get();
+		rsp = pf_func;
 		break;
 	default:
-		ret = -EINVAL;
+		return -EINVAL;
 	}
 
-	return ret;
+	/* get rid of last response if any */
+	if (qp->rsp) {
+		RTE_LOG(WARNING, PMD, "Previous response got overwritten\n");
+		rte_free(qp->rsp);
+	}
+	qp->rsp = rsp;
+
+	return 1;
 }
 
 static int
diff a/drivers/raw/cnxk_bphy/rte_pmd_bphy.h b/drivers/raw/cnxk_bphy/rte_pmd_bphy.h	(rejected hunks)
@@ -201,85 +225,162 @@ rte_pmd_bphy_intr_unregister(uint16_t dev_id, int irq_num)
 		.type = CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER,
 		.data = &info
 	};
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
-
-	buf.buf_addr = &msg;
-	bufs[0] = &buf;
 
-	rte_rawdev_enqueue_buffers(dev_id, bufs, 1, 0);
+	return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
+				      NULL, 0);
 }
 
-static __rte_always_inline struct cnxk_bphy_mem *
-rte_pmd_bphy_intr_mem_get(uint16_t dev_id)
+static __rte_always_inline int
+rte_pmd_bphy_intr_mem_get(uint16_t dev_id, struct cnxk_bphy_mem *mem)
 {
 	struct cnxk_bphy_irq_msg msg = {
 		.type = CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET,
 	};
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
-	int ret;
 
-	buf.buf_addr = &msg;
-	bufs[0] = &buf;
-
-	ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return NULL;
+	return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
+				      mem, sizeof(*mem));
+}
 
-	ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return NULL;
+static __rte_always_inline int
+rte_pmd_bphy_npa_pf_func_get(uint16_t dev_id, uint16_t *pf_func)
+{
+	struct cnxk_bphy_irq_msg msg = {
+		.type = CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC,
+	};
 
-	return buf.buf_addr;
+	return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
+				      pf_func, sizeof(*pf_func));
 }
 
-static __rte_always_inline uint16_t
-rte_pmd_bphy_npa_pf_func_get(uint16_t dev_id)
+static __rte_always_inline int
+rte_pmd_bphy_sso_pf_func_get(uint16_t dev_id, uint16_t *pf_func)
 {
 	struct cnxk_bphy_irq_msg msg = {
-		.type = CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC,
+		.type = CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC,
 	};
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
-	int ret;
 
-	buf.buf_addr = &msg;
-	bufs[0] = &buf;
+	return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
+				      pf_func, sizeof(*pf_func));
+}
 
-	ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return 0;
+static __rte_always_inline int
+rte_pmd_bphy_cgx_get_link_info(uint16_t dev_id, uint16_t lmac,
+			       struct cnxk_bphy_cgx_msg_link_info *info)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_GET_LINKINFO,
+	};
 
-	ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return 0;
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, info, sizeof(*info));
+}
 
-	return (uint16_t)(size_t)buf.buf_addr;
+static __rte_always_inline int
+rte_pmd_bphy_cgx_intlbk_disable(uint16_t dev_id, uint16_t lmac)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_INTLBK_DISABLE,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
 }
 
-static __rte_always_inline uint16_t
-rte_pmd_bphy_sso_pf_func_get(uint16_t dev_id)
+static __rte_always_inline int
+rte_pmd_bphy_cgx_intlbk_enable(uint16_t dev_id, uint16_t lmac)
 {
-	struct cnxk_bphy_irq_msg msg = {
-		.type = CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC,
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_INTLBK_ENABLE,
 	};
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
-	int ret;
 
-	buf.buf_addr = &msg;
-	bufs[0] = &buf;
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
 
-	ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return 0;
+static __rte_always_inline int
+rte_pmd_bphy_cgx_ptp_rx_disable(uint16_t dev_id, uint16_t lmac)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_DISABLE,
+	};
 
-	ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return 0;
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_ptp_rx_enable(uint16_t dev_id, uint16_t lmac)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_ENABLE,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_set_link_mode(uint16_t dev_id, uint16_t lmac,
+			       struct cnxk_bphy_cgx_msg_link_mode *mode)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_MODE,
+		.data = mode,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_set_link_state(uint16_t dev_id, uint16_t lmac, bool up)
+{
+	struct cnxk_bphy_cgx_msg_set_link_state state = {
+		.state = up,
+	};
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE,
+		.data = &state,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_start_rxtx(uint16_t dev_id, uint16_t lmac)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_START_RXTX,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_stop_rxtx(uint16_t dev_id, uint16_t lmac)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_STOP_RXTX,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_get_supported_fec(uint16_t dev_id, uint16_t lmac,
+				   enum cnxk_bphy_cgx_eth_link_fec *fec)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_GET_SUPPORTED_FEC,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, fec, sizeof(*fec));
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_set_fec(uint16_t dev_id, uint16_t lmac,
+			 enum cnxk_bphy_cgx_eth_link_fec fec)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_SET_FEC,
+		.data = &fec,
+	};
 
-	return (uint16_t)(size_t)buf.buf_addr;
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
 }
 
 #endif /* _CNXK_BPHY_H_ */
Checking patch drivers/common/cnxk/roc_bphy_irq.c...
error: while searching for:
	return irq_chip->avail_irq_bmask & BIT(irq_num);
}

int
roc_bphy_intr_clear(struct roc_bphy_irq_chip *chip, int irq_num)
{

error: patch failed: drivers/common/cnxk/roc_bphy_irq.c:318
Checking patch drivers/common/cnxk/roc_bphy_irq.h...
error: while searching for:
__roc_api bool roc_bphy_intr_available(struct roc_bphy_irq_chip *irq_chip,
				       int irq_num);
__roc_api int roc_bphy_intr_clear(struct roc_bphy_irq_chip *chip, int irq_num);
__roc_api int roc_bphy_intr_register(struct roc_bphy_irq_chip *irq_chip,
				     struct roc_bphy_intr *intr);


error: patch failed: drivers/common/cnxk/roc_bphy_irq.h:36
Checking patch drivers/common/cnxk/version.map...
error: while searching for:
	roc_bphy_intr_fini;
	roc_bphy_intr_handler;
	roc_bphy_intr_init;
	roc_bphy_intr_register;
	roc_bphy_npa_pf_func_get;
	roc_bphy_sso_pf_func_get;

error: patch failed: drivers/common/cnxk/version.map:42
Checking patch drivers/raw/cnxk_bphy/cnxk_bphy_irq.c...
Hunk #1 succeeded at 33 (offset 1 line).
Applying patch drivers/common/cnxk/roc_bphy_irq.c with 1 reject...
Rejected hunk #1.
Applying patch drivers/common/cnxk/roc_bphy_irq.h with 1 reject...
Rejected hunk #1.
Applying patch drivers/common/cnxk/version.map with 1 reject...
Rejected hunk #1.
Applied patch drivers/raw/cnxk_bphy/cnxk_bphy_irq.c cleanly.
diff a/drivers/common/cnxk/roc_bphy_irq.c b/drivers/common/cnxk/roc_bphy_irq.c	(rejected hunks)
@@ -318,6 +318,12 @@ roc_bphy_intr_available(struct roc_bphy_irq_chip *irq_chip, int irq_num)
 	return irq_chip->avail_irq_bmask & BIT(irq_num);
 }
 
+uint64_t
+roc_bphy_intr_max_get(struct roc_bphy_irq_chip *irq_chip)
+{
+	return irq_chip->max_irq;
+}
+
 int
 roc_bphy_intr_clear(struct roc_bphy_irq_chip *chip, int irq_num)
 {
diff a/drivers/common/cnxk/roc_bphy_irq.h b/drivers/common/cnxk/roc_bphy_irq.h	(rejected hunks)
@@ -36,6 +36,7 @@ __roc_api void roc_bphy_intr_handler(unsigned int irq_num);
 __roc_api bool roc_bphy_intr_available(struct roc_bphy_irq_chip *irq_chip,
 				       int irq_num);
 __roc_api int roc_bphy_intr_clear(struct roc_bphy_irq_chip *chip, int irq_num);
+__roc_api uint64_t roc_bphy_intr_max_get(struct roc_bphy_irq_chip *irq_chip);
 __roc_api int roc_bphy_intr_register(struct roc_bphy_irq_chip *irq_chip,
 				     struct roc_bphy_intr *intr);
 
diff a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map	(rejected hunks)
@@ -42,6 +42,7 @@ INTERNAL {
 	roc_bphy_intr_fini;
 	roc_bphy_intr_handler;
 	roc_bphy_intr_init;
+	roc_bphy_intr_max_get;
 	roc_bphy_intr_register;
 	roc_bphy_npa_pf_func_get;
 	roc_bphy_sso_pf_func_get;

https://lab.dpdk.org/results/dashboard/patchsets/19123/

UNH-IOL DPDK Community Lab

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-10-04 23:44 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-04 23:44 [dpdk-test-report] |WARNING| pw100363-100364 [PATCH] [2/2] raw/cnxk_bphy: use roc calls for max irq dpdklab

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).