* |WARNING| pw113627 [PATCH v3] vhost: prefix logs with context
[not found] <20220701132056.2244694-1-david.marchand@redhat.com>
@ 2022-07-01 13:22 ` checkpatch
0 siblings, 0 replies; 2+ messages in thread
From: checkpatch @ 2022-07-01 13:22 UTC (permalink / raw)
To: test-report; +Cc: David Marchand
Test-Label: checkpatch
Test-Status: WARNING
http://dpdk.org/patch/113627
_coding style issues_
ERROR:COMPLEX_MACRO: Macros with complex values should be enclosed in parentheses
#969: FILE: lib/vhost/vhost.h:632:
+#define VHOST_LOG_DATA(prefix, level, fmt, args...) \
(void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ? \
rte_log(RTE_LOG_ ## level, vhost_data_log_level, \
+ "VHOST_DATA: (%s) " fmt, prefix, ##args) : \
0)
WARNING:TYPO_SPELLING: 'master' may be misspelled - perhaps 'primary'?
#1023: FILE: lib/vhost/vhost_user.c:98:
+ expected_fds, vhost_message_handlers[ctx->msg.request.master].description,
WARNING:TYPO_SPELLING: 'master' may be misspelled - perhaps 'primary'?
#1492: FILE: lib/vhost/vhost_user.c:1157:
+ ack_ctx.msg.request.master);
WARNING:TYPO_SPELLING: 'slave' may be misspelled - perhaps 'secondary'?
#2063: FILE: lib/vhost/vhost_user.c:2409:
+ "invalid file descriptor for slave channel (%d)
", fd);
WARNING:TYPO_SPELLING: 'slave' may be misspelled - perhaps 'secondary'?
#2404: FILE: lib/vhost/vhost_user.c:3226:
+ "vhost read slave message reply failed
");
WARNING:TYPO_SPELLING: 'slave' may be misspelled - perhaps 'secondary'?
#2418: FILE: lib/vhost/vhost_user.c:3237:
+ msg_reply.msg.request.slave, ctx->msg.request.slave);
WARNING:TYPO_SPELLING: 'slave' may be misspelled - perhaps 'secondary'?
#2418: FILE: lib/vhost/vhost_user.c:3237:
+ msg_reply.msg.request.slave, ctx->msg.request.slave);
total: 1 errors, 6 warnings, 2748 lines checked
^ permalink raw reply [flat|nested] 2+ messages in thread
* |WARNING| pw113627 [PATCH] [v3] vhost: prefix logs with context
@ 2022-07-01 13:31 dpdklab
0 siblings, 0 replies; 2+ messages in thread
From: dpdklab @ 2022-07-01 13:31 UTC (permalink / raw)
To: test-report; +Cc: dpdk-test-reports
[-- Attachment #1: Type: text/plain, Size: 15142 bytes --]
Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/113627
_apply patch failure_
Submitter: David Marchand <david.marchand@redhat.com>
Date: Friday, July 01 2022 13:20:56
Applied on: CommitID:7cac53f205ebd04d8ebd3ee6a9dd84f698d4ada3
Apply patch set 113627 failed:
Checking patch lib/vhost/iotlb.c...
Checking patch lib/vhost/socket.c...
error: while searching for:
ret = pthread_mutex_init(&reconn_list.mutex, NULL);
if (ret < 0) {
VHOST_LOG_CONFIG(ERR, "%s: failed to initialize mutex\n", __func__);
return ret;
}
TAILQ_INIT(&reconn_list.head);
error: patch failed: lib/vhost/socket.c:499
error: while searching for:
ret = rte_ctrl_thread_create(&reconn_tid, "vhost_reconn", NULL,
vhost_user_client_reconnect, NULL);
if (ret != 0) {
VHOST_LOG_CONFIG(ERR, "failed to create reconnect thread\n");
if (pthread_mutex_destroy(&reconn_list.mutex))
VHOST_LOG_CONFIG(ERR, "%s: failed to destroy reconnect mutex\n", __func__);
}
return ret;
error: patch failed: lib/vhost/socket.c:507
Hunk #14 succeeded at 530 (offset -2 lines).
Hunk #15 succeeded at 630 (offset -2 lines).
Hunk #16 succeeded at 644 (offset -2 lines).
Hunk #17 succeeded at 735 (offset -2 lines).
Hunk #18 succeeded at 747 (offset -2 lines).
Hunk #19 succeeded at 785 (offset -2 lines).
Hunk #20 succeeded at 798 (offset -2 lines).
Hunk #21 succeeded at 822 (offset -2 lines).
Hunk #22 succeeded at 834 (offset -2 lines).
Hunk #23 succeeded at 877 (offset -2 lines).
Hunk #24 succeeded at 887 (offset -2 lines).
Hunk #25 succeeded at 907 (offset -2 lines).
Hunk #26 succeeded at 931 (offset -2 lines).
Hunk #27 succeeded at 945 (offset -2 lines).
Hunk #28 succeeded at 961 (offset -2 lines).
Hunk #29 succeeded at 988 (offset -2 lines).
Hunk #30 succeeded at 1076 (offset -2 lines).
Hunk #31 succeeded at 1152 (offset -2 lines).
error: while searching for:
"vhost-events", NULL, fdset_event_dispatch,
&vhost_user.fdset);
if (ret != 0) {
VHOST_LOG_CONFIG(ERR, "(%s) failed to create fdset handling thread\n",
path);
fdset_pipe_uninit(&vhost_user.fdset);
return -1;
}
error: patch failed: lib/vhost/socket.c:1170
Checking patch lib/vhost/vdpa.c...
Checking patch lib/vhost/vhost.c...
error: while searching for:
uint16_t max_desc;
if (!rte_dma_is_valid(dma_id)) {
VHOST_LOG_CONFIG(ERR, "DMA %d is not found.\n", dma_id);
return -1;
}
if (rte_dma_info_get(dma_id, &info) != 0) {
VHOST_LOG_CONFIG(ERR, "Fail to get DMA %d information.\n", dma_id);
return -1;
}
if (vchan_id >= info.max_vchans) {
VHOST_LOG_CONFIG(ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
return -1;
}
error: patch failed: lib/vhost/vhost.c:1864
Hunk #20 succeeded at 1887 (offset -4 lines).
Hunk #21 succeeded at 1897 (offset -4 lines).
Hunk #22 succeeded at 1908 (offset -4 lines).
Hunk #23 succeeded at 1946 (offset -4 lines).
Hunk #24 succeeded at 1978 (offset -4 lines).
Checking patch lib/vhost/vhost.h...
error: while searching for:
} \
snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
\
VHOST_LOG_DATA(DEBUG, "(%s) %s", device->ifname, packet); \
} while (0)
#else
#define PRINT_PACKET(device, addr, size, header) do {} while (0)
error: patch failed: lib/vhost/vhost.h:652
error: while searching for:
vq->signalled_used = new;
vq->signalled_used_valid = true;
VHOST_LOG_DATA(DEBUG, "(%s) %s: used_event_idx=%d, old=%d, new=%d\n",
dev->ifname, __func__,
vhost_used_event(vq),
old, new);
if ((vhost_need_event(vhost_used_event(vq), new, old) &&
(vq->callfd >= 0)) ||
error: patch failed: lib/vhost/vhost.h:866
Checking patch lib/vhost/vhost_user.c...
error: while searching for:
return;
/* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */
VHOST_LOG_CONFIG(ERR, "(%s) DMA engine map failed\n",
dev->ifname);
}
}
error: patch failed: lib/vhost/vhost_user.c:144
error: while searching for:
if (rte_errno == EINVAL)
return;
VHOST_LOG_CONFIG(ERR, "(%s) DMA engine unmap failed\n",
dev->ifname);
}
}
}
error: patch failed: lib/vhost/vhost_user.c:161
error: while searching for:
dev->max_guest_pages * sizeof(*page),
RTE_CACHE_LINE_SIZE);
if (dev->guest_pages == NULL) {
VHOST_LOG_CONFIG(ERR, "(%s) cannot realloc guest_pages\n",
dev->ifname);
rte_free(old_pages);
return -1;
}
error: patch failed: lib/vhost/vhost_user.c:947
Hunk #88 succeeded at 2982 (offset -1 lines).
Hunk #89 succeeded at 2991 (offset -1 lines).
Hunk #90 succeeded at 3006 (offset -1 lines).
Hunk #91 succeeded at 3086 (offset -1 lines).
Hunk #92 succeeded at 3128 (offset -1 lines).
Hunk #93 succeeded at 3146 (offset -1 lines).
error: while searching for:
if (vdpa_dev->ops->get_dev_type) {
ret = vdpa_dev->ops->get_dev_type(vdpa_dev, &vdpa_type);
if (ret) {
VHOST_LOG_CONFIG(ERR, "failed to get vdpa dev type.\n");
ret = -1;
goto out;
}
error: patch failed: lib/vhost/vhost_user.c:3159
Hunk #95 succeeded at 3186 (offset -15 lines).
Hunk #96 succeeded at 3207 (offset -15 lines).
Hunk #97 succeeded at 3250 (offset -15 lines).
Hunk #98 succeeded at 3276 (offset -15 lines).
Hunk #99 succeeded at 3323 (offset -15 lines).
Checking patch lib/vhost/virtio_net.c...
Hunk #21 succeeded at 2773 (offset -8 lines).
Hunk #22 succeeded at 2840 (offset -8 lines).
Hunk #23 succeeded at 2894 (offset -8 lines).
Hunk #24 succeeded at 2927 (offset -8 lines).
Hunk #25 succeeded at 2941 (offset -8 lines).
Hunk #26 succeeded at 3123 (offset -8 lines).
Hunk #27 succeeded at 3135 (offset -8 lines).
Hunk #28 succeeded at 3250 (offset -8 lines).
Hunk #29 succeeded at 3304 (offset -8 lines).
Hunk #30 succeeded at 3423 (offset -8 lines).
Hunk #31 succeeded at 3453 (offset -8 lines).
Hunk #32 succeeded at 3467 (offset -8 lines).
Hunk #33 succeeded at 3497 (offset -8 lines).
Hunk #34 succeeded at 3587 (offset -8 lines).
Hunk #35 succeeded at 3600 (offset -8 lines).
Hunk #36 succeeded at 3626 (offset -8 lines).
Hunk #37 succeeded at 3731 (offset -8 lines).
Hunk #38 succeeded at 3766 (offset -8 lines).
Hunk #39 succeeded at 3803 (offset -8 lines).
Applied patch lib/vhost/iotlb.c cleanly.
Applying patch lib/vhost/socket.c with 3 rejects...
Hunk #1 applied cleanly.
Hunk #2 applied cleanly.
Hunk #3 applied cleanly.
Hunk #4 applied cleanly.
Hunk #5 applied cleanly.
Hunk #6 applied cleanly.
Hunk #7 applied cleanly.
Hunk #8 applied cleanly.
Hunk #9 applied cleanly.
Hunk #10 applied cleanly.
Hunk #11 applied cleanly.
Rejected hunk #12.
Rejected hunk #13.
Hunk #14 applied cleanly.
Hunk #15 applied cleanly.
Hunk #16 applied cleanly.
Hunk #17 applied cleanly.
Hunk #18 applied cleanly.
Hunk #19 applied cleanly.
Hunk #20 applied cleanly.
Hunk #21 applied cleanly.
Hunk #22 applied cleanly.
Hunk #23 applied cleanly.
Hunk #24 applied cleanly.
Hunk #25 applied cleanly.
Hunk #26 applied cleanly.
Hunk #27 applied cleanly.
Hunk #28 applied cleanly.
Hunk #29 applied cleanly.
Hunk #30 applied cleanly.
Hunk #31 applied cleanly.
Rejected hunk #32.
Applied patch lib/vhost/vdpa.c cleanly.
Applying patch lib/vhost/vhost.c with 1 reject...
Hunk #1 applied cleanly.
Hunk #2 applied cleanly.
Hunk #3 applied cleanly.
Hunk #4 applied cleanly.
Hunk #5 applied cleanly.
Hunk #6 applied cleanly.
Hunk #7 applied cleanly.
Hunk #8 applied cleanly.
Hunk #9 applied cleanly.
Hunk #10 applied cleanly.
Hunk #11 applied cleanly.
Hunk #12 applied cleanly.
Hunk #13 applied cleanly.
Hunk #14 applied cleanly.
Hunk #15 applied cleanly.
Hunk #16 applied cleanly.
Hunk #17 applied cleanly.
Hunk #18 applied cleanly.
Rejected hunk #19.
Hunk #20 applied cleanly.
Hunk #21 applied cleanly.
Hunk #22 applied cleanly.
Hunk #23 applied cleanly.
Hunk #24 applied cleanly.
Applying patch lib/vhost/vhost.h with 2 rejects...
Hunk #1 applied cleanly.
Rejected hunk #2.
Hunk #3 applied cleanly.
Rejected hunk #4.
Applying patch lib/vhost/vhost_user.c with 4 rejects...
Hunk #1 applied cleanly.
Rejected hunk #2.
Rejected hunk #3.
Hunk #4 applied cleanly.
Hunk #5 applied cleanly.
Hunk #6 applied cleanly.
Hunk #7 applied cleanly.
Hunk #8 applied cleanly.
Hunk #9 applied cleanly.
Hunk #10 applied cleanly.
Hunk #11 applied cleanly.
Hunk #12 applied cleanly.
Hunk #13 applied cleanly.
Hunk #14 applied cleanly.
Hunk #15 applied cleanly.
Hunk #16 applied cleanly.
Hunk #17 applied cleanly.
Hunk #18 applied cleanly.
Hunk #19 applied cleanly.
Hunk #20 applied cleanly.
Hunk #21 applied cleanly.
Hunk #22 applied cleanly.
Hunk #23 applied cleanly.
Hunk #24 applied cleanly.
Hunk #25 applied cleanly.
Hunk #26 applied cleanly.
Hunk #27 applied cleanly.
Hunk #28 applied cleanly.
Hunk #29 applied cleanly.
Hunk #30 applied cleanly.
Rejected hunk #31.
Hunk #32 applied cleanly.
Hunk #33 applied cleanly.
Hunk #34 applied cleanly.
Hunk #35 applied cleanly.
Hunk #36 applied cleanly.
Hunk #37 applied cleanly.
Hunk #38 applied cleanly.
Hunk #39 applied cleanly.
Hunk #40 applied cleanly.
Hunk #41 applied cleanly.
Hunk #42 applied cleanly.
Hunk #43 applied cleanly.
Hunk #44 applied cleanly.
Hunk #45 applied cleanly.
Hunk #46 applied cleanly.
Hunk #47 applied cleanly.
Hunk #48 applied cleanly.
Hunk #49 applied cleanly.
Hunk #50 applied cleanly.
Hunk #51 applied cleanly.
Hunk #52 applied cleanly.
Hunk #53 applied cleanly.
Hunk #54 applied cleanly.
Hunk #55 applied cleanly.
Hunk #56 applied cleanly.
Hunk #57 applied cleanly.
Hunk #58 applied cleanly.
Hunk #59 applied cleanly.
Hunk #60 applied cleanly.
Hunk #61 applied cleanly.
Hunk #62 applied cleanly.
Hunk #63 applied cleanly.
Hunk #64 applied cleanly.
Hunk #65 applied cleanly.
Hunk #66 applied cleanly.
Hunk #67 applied cleanly.
Hunk #68 applied cleanly.
Hunk #69 applied cleanly.
Hunk #70 applied cleanly.
Hunk #71 applied cleanly.
Hunk #72 applied cleanly.
Hunk #73 applied cleanly.
Hunk #74 applied cleanly.
Hunk #75 applied cleanly.
Hunk #76 applied cleanly.
Hunk #77 applied cleanly.
Hunk #78 applied cleanly.
Hunk #79 applied cleanly.
Hunk #80 applied cleanly.
Hunk #81 applied cleanly.
Hunk #82 applied cleanly.
Hunk #83 applied cleanly.
Hunk #84 applied cleanly.
Hunk #85 applied cleanly.
Hunk #86 applied cleanly.
Hunk #87 applied cleanly.
Hunk #88 applied cleanly.
Hunk #89 applied cleanly.
Hunk #90 applied cleanly.
Hunk #91 applied cleanly.
Hunk #92 applied cleanly.
Hunk #93 applied cleanly.
Rejected hunk #94.
Hunk #95 applied cleanly.
Hunk #96 applied cleanly.
Hunk #97 applied cleanly.
Hunk #98 applied cleanly.
Hunk #99 applied cleanly.
Applied patch lib/vhost/virtio_net.c cleanly.
diff a/lib/vhost/socket.c b/lib/vhost/socket.c (rejected hunks)
@@ -499,7 +499,7 @@ vhost_user_reconnect_init(void)
ret = pthread_mutex_init(&reconn_list.mutex, NULL);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR, "%s: failed to initialize mutex\n", __func__);
+ VHOST_LOG_CONFIG("thread", ERR, "%s: failed to initialize mutex\n", __func__);
return ret;
}
TAILQ_INIT(&reconn_list.head);
@@ -507,9 +507,11 @@ vhost_user_reconnect_init(void)
ret = rte_ctrl_thread_create(&reconn_tid, "vhost_reconn", NULL,
vhost_user_client_reconnect, NULL);
if (ret != 0) {
- VHOST_LOG_CONFIG(ERR, "failed to create reconnect thread\n");
+ VHOST_LOG_CONFIG("thread", ERR, "failed to create reconnect thread\n");
if (pthread_mutex_destroy(&reconn_list.mutex))
- VHOST_LOG_CONFIG(ERR, "%s: failed to destroy reconnect mutex\n", __func__);
+ VHOST_LOG_CONFIG("thread", ERR,
+ "%s: failed to destroy reconnect mutex\n",
+ __func__);
}
return ret;
@@ -1170,8 +1162,7 @@ rte_vhost_driver_start(const char *path)
"vhost-events", NULL, fdset_event_dispatch,
&vhost_user.fdset);
if (ret != 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to create fdset handling thread\n",
- path);
+ VHOST_LOG_CONFIG(path, ERR, "failed to create fdset handling thread\n");
fdset_pipe_uninit(&vhost_user.fdset);
return -1;
}
diff a/lib/vhost/vhost.c b/lib/vhost/vhost.c (rejected hunks)
@@ -1864,17 +1871,17 @@ rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
uint16_t max_desc;
if (!rte_dma_is_valid(dma_id)) {
- VHOST_LOG_CONFIG(ERR, "DMA %d is not found.\n", dma_id);
+ VHOST_LOG_CONFIG("dma", ERR, "DMA %d is not found.\n", dma_id);
return -1;
}
if (rte_dma_info_get(dma_id, &info) != 0) {
- VHOST_LOG_CONFIG(ERR, "Fail to get DMA %d information.\n", dma_id);
+ VHOST_LOG_CONFIG("dma", ERR, "Fail to get DMA %d information.\n", dma_id);
return -1;
}
if (vchan_id >= info.max_vchans) {
- VHOST_LOG_CONFIG(ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
+ VHOST_LOG_CONFIG("dma", ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
return -1;
}
diff a/lib/vhost/vhost.h b/lib/vhost/vhost.h (rejected hunks)
@@ -652,7 +652,7 @@ extern int vhost_data_log_level;
} \
snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
\
- VHOST_LOG_DATA(DEBUG, "(%s) %s", device->ifname, packet); \
+ VHOST_LOG_DATA(device->ifname, DEBUG, "%s", packet); \
} while (0)
#else
#define PRINT_PACKET(device, addr, size, header) do {} while (0)
@@ -866,10 +865,9 @@ vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
vq->signalled_used = new;
vq->signalled_used_valid = true;
- VHOST_LOG_DATA(DEBUG, "(%s) %s: used_event_idx=%d, old=%d, new=%d\n",
- dev->ifname, __func__,
- vhost_used_event(vq),
- old, new);
+ VHOST_LOG_DATA(dev->ifname, DEBUG,
+ "%s: used_event_idx=%d, old=%d, new=%d\n",
+ __func__, vhost_used_event(vq), old, new);
if ((vhost_need_event(vhost_used_event(vq), new, old) &&
(vq->callfd >= 0)) ||
diff a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c (rejected hunks)
@@ -144,8 +144,7 @@ async_dma_map(struct virtio_net *dev, bool do_map)
return;
/* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */
- VHOST_LOG_CONFIG(ERR, "(%s) DMA engine map failed\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "DMA engine map failed\n");
}
}
@@ -161,8 +160,7 @@ async_dma_map(struct virtio_net *dev, bool do_map)
if (rte_errno == EINVAL)
return;
- VHOST_LOG_CONFIG(ERR, "(%s) DMA engine unmap failed\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "DMA engine unmap failed\n");
}
}
}
@@ -947,8 +949,7 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
dev->max_guest_pages * sizeof(*page),
RTE_CACHE_LINE_SIZE);
if (dev->guest_pages == NULL) {
- VHOST_LOG_CONFIG(ERR, "(%s) cannot realloc guest_pages\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "cannot realloc guest_pages\n");
rte_free(old_pages);
return -1;
}
@@ -3159,7 +3188,7 @@ vhost_user_msg_handler(int vid, int fd)
if (vdpa_dev->ops->get_dev_type) {
ret = vdpa_dev->ops->get_dev_type(vdpa_dev, &vdpa_type);
if (ret) {
- VHOST_LOG_CONFIG(ERR, "failed to get vdpa dev type.\n");
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to get vdpa dev type.\n");
ret = -1;
goto out;
}
https://lab.dpdk.org/results/dashboard/patchsets/22886/
UNH-IOL DPDK Community Lab
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2022-07-01 13:31 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <20220701132056.2244694-1-david.marchand@redhat.com>
2022-07-01 13:22 ` |WARNING| pw113627 [PATCH v3] vhost: prefix logs with context checkpatch
2022-07-01 13:31 |WARNING| pw113627 [PATCH] [v3] " dpdklab
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).