From: Stephen Hemminger <stephen@networkplumber.org>
To: dev@dpdk.org
Cc: Stephen Hemminger <stephen@networkplumber.org>,
Abdullah Sevincer <abdullah.sevincer@intel.com>,
Timothy McDaniel <timothy.mcdaniel@intel.com>,
Gage Eads <gage.eads@intel.com>
Subject: [PATCH v5 13/18] event/dlb2: use dedicated logtype
Date: Thu, 21 Dec 2023 08:46:30 -0800 [thread overview]
Message-ID: <20231221164841.125006-14-stephen@networkplumber.org> (raw)
In-Reply-To: <20231221164841.125006-1-stephen@networkplumber.org>
Driver was using RTE_LOGTYPE_PMD when it had its own logtype.
Fixes: 5433956d5185 ("event/dlb2: add eventdev probe")
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/dlb2/dlb2.c | 275 +++++++++++++++++++-------------------
1 file changed, 137 insertions(+), 138 deletions(-)
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 050ace0904b4..419876490780 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -169,7 +169,7 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
ret = dlb2_iface_get_num_resources(handle,
&dlb2->hw_rsrc_query_results);
if (ret) {
- DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
+ DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d", ret);
return ret;
}
@@ -259,7 +259,7 @@ set_producer_coremask(const char *key __rte_unused,
const char **mask_str = opaque;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -293,7 +293,7 @@ set_max_cq_depth(const char *key __rte_unused,
int ret;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -304,7 +304,7 @@ set_max_cq_depth(const char *key __rte_unused,
if (*max_cq_depth < DLB2_MIN_CQ_DEPTH_OVERRIDE ||
*max_cq_depth > DLB2_MAX_CQ_DEPTH_OVERRIDE ||
!rte_is_power_of_2(*max_cq_depth)) {
- DLB2_LOG_ERR("dlb2: max_cq_depth %d and %d and a power of 2\n",
+ DLB2_LOG_ERR("dlb2: max_cq_depth %d and %d and a power of 2",
DLB2_MIN_CQ_DEPTH_OVERRIDE,
DLB2_MAX_CQ_DEPTH_OVERRIDE);
return -EINVAL;
@@ -322,7 +322,7 @@ set_max_enq_depth(const char *key __rte_unused,
int ret;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -333,7 +333,7 @@ set_max_enq_depth(const char *key __rte_unused,
if (*max_enq_depth < DLB2_MIN_ENQ_DEPTH_OVERRIDE ||
*max_enq_depth > DLB2_MAX_ENQ_DEPTH_OVERRIDE ||
!rte_is_power_of_2(*max_enq_depth)) {
- DLB2_LOG_ERR("dlb2: max_enq_depth %d and %d and a power of 2\n",
+ DLB2_LOG_ERR("dlb2: max_enq_depth %d and %d and a power of 2",
DLB2_MIN_ENQ_DEPTH_OVERRIDE,
DLB2_MAX_ENQ_DEPTH_OVERRIDE);
return -EINVAL;
@@ -351,7 +351,7 @@ set_max_num_events(const char *key __rte_unused,
int ret;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -361,7 +361,7 @@ set_max_num_events(const char *key __rte_unused,
if (*max_num_events < 0 || *max_num_events >
DLB2_MAX_NUM_LDB_CREDITS) {
- DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
+ DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d",
DLB2_MAX_NUM_LDB_CREDITS);
return -EINVAL;
}
@@ -378,7 +378,7 @@ set_num_dir_credits(const char *key __rte_unused,
int ret;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -388,7 +388,7 @@ set_num_dir_credits(const char *key __rte_unused,
if (*num_dir_credits < 0 ||
*num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)) {
- DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
+ DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d",
DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2));
return -EINVAL;
}
@@ -405,7 +405,7 @@ set_dev_id(const char *key __rte_unused,
int ret;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -425,7 +425,7 @@ set_poll_interval(const char *key __rte_unused,
int ret;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -445,7 +445,7 @@ set_port_cos(const char *key __rte_unused,
int first, last, cos_id, i;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -458,18 +458,18 @@ set_port_cos(const char *key __rte_unused,
} else if (sscanf(value, "%d:%d", &first, &cos_id) == 2) {
last = first;
} else {
- DLB2_LOG_ERR("Error parsing ldb port port_cos devarg. Should be port-port:val, or port:val\n");
+ DLB2_LOG_ERR("Error parsing ldb port port_cos devarg. Should be port-port:val, or port:val");
return -EINVAL;
}
if (first > last || first < 0 ||
last >= DLB2_MAX_NUM_LDB_PORTS) {
- DLB2_LOG_ERR("Error parsing ldb port cos_id arg, invalid port value\n");
+ DLB2_LOG_ERR("Error parsing ldb port cos_id arg, invalid port value");
return -EINVAL;
}
if (cos_id < DLB2_COS_0 || cos_id > DLB2_COS_3) {
- DLB2_LOG_ERR("Error parsing ldb port cos_id devarg, must be between 0 and 4\n");
+ DLB2_LOG_ERR("Error parsing ldb port cos_id devarg, must be between 0 and 4");
return -EINVAL;
}
@@ -487,7 +487,7 @@ set_cos_bw(const char *key __rte_unused,
struct dlb2_cos_bw *cos_bw = opaque;
if (opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -495,11 +495,11 @@ set_cos_bw(const char *key __rte_unused,
if (sscanf(value, "%d:%d:%d:%d", &cos_bw->val[0], &cos_bw->val[1],
&cos_bw->val[2], &cos_bw->val[3]) != 4) {
- DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100\n");
+ DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100");
return -EINVAL;
}
if (cos_bw->val[0] + cos_bw->val[1] + cos_bw->val[2] + cos_bw->val[3] > 100) {
- DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100\n");
+ DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100");
return -EINVAL;
}
@@ -515,7 +515,7 @@ set_sw_credit_quanta(const char *key __rte_unused,
int ret;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -524,7 +524,7 @@ set_sw_credit_quanta(const char *key __rte_unused,
return ret;
if (*sw_credit_quanta <= 0) {
- DLB2_LOG_ERR("sw_credit_quanta must be > 0\n");
+ DLB2_LOG_ERR("sw_credit_quanta must be > 0");
return -EINVAL;
}
@@ -540,7 +540,7 @@ set_hw_credit_quanta(const char *key __rte_unused,
int ret;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -560,7 +560,7 @@ set_default_depth_thresh(const char *key __rte_unused,
int ret;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -579,7 +579,7 @@ set_vector_opts_enab(const char *key __rte_unused,
bool *dlb2_vector_opts_enabled = opaque;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -599,7 +599,7 @@ set_default_ldb_port_allocation(const char *key __rte_unused,
bool *default_ldb_port_allocation = opaque;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -619,7 +619,7 @@ set_enable_cq_weight(const char *key __rte_unused,
bool *enable_cq_weight = opaque;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -640,7 +640,7 @@ set_qid_depth_thresh(const char *key __rte_unused,
int first, last, thresh, i;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -657,18 +657,18 @@ set_qid_depth_thresh(const char *key __rte_unused,
} else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
last = first;
} else {
- DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
+ DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val");
return -EINVAL;
}
if (first > last || first < 0 ||
last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2)) {
- DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
+ DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value");
return -EINVAL;
}
if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
- DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
+ DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d",
DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
return -EINVAL;
}
@@ -688,7 +688,7 @@ set_qid_depth_thresh_v2_5(const char *key __rte_unused,
int first, last, thresh, i;
if (value == NULL || opaque == NULL) {
- DLB2_LOG_ERR("NULL pointer\n");
+ DLB2_LOG_ERR("NULL pointer");
return -EINVAL;
}
@@ -705,18 +705,18 @@ set_qid_depth_thresh_v2_5(const char *key __rte_unused,
} else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
last = first;
} else {
- DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
+ DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val");
return -EINVAL;
}
if (first > last || first < 0 ||
last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5)) {
- DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
+ DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value");
return -EINVAL;
}
if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
- DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
+ DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d",
DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
return -EINVAL;
}
@@ -738,7 +738,7 @@ dlb2_eventdev_info_get(struct rte_eventdev *dev,
if (ret) {
const struct rte_eventdev_data *data = dev->data;
- DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
+ DLB2_LOG_ERR("get resources err=%d, devid=%d",
ret, data->dev_id);
/* fn is void, so fall through and return values set up in
* probe
@@ -781,7 +781,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2,
struct dlb2_create_sched_domain_args *cfg;
if (resources_asked == NULL) {
- DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n");
+ DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter");
ret = EINVAL;
goto error_exit;
}
@@ -809,7 +809,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2,
if (cos_ports > resources_asked->num_ldb_ports ||
(cos_ports && dlb2->max_cos_port >= resources_asked->num_ldb_ports)) {
- DLB2_LOG_ERR("dlb2: num_ldb_ports < cos_ports\n");
+ DLB2_LOG_ERR("dlb2: num_ldb_ports < cos_ports");
ret = EINVAL;
goto error_exit;
}
@@ -832,7 +832,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2,
evdev_dlb2_default_info.max_event_port_dequeue_depth;
if (device_version == DLB2_HW_V2_5) {
- DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, credits=%d\n",
+ DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, credits=%d",
cfg->num_ldb_queues,
resources_asked->num_ldb_ports,
cfg->num_dir_ports,
@@ -840,7 +840,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2,
cfg->num_hist_list_entries,
cfg->num_credits);
} else {
- DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
+ DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d",
cfg->num_ldb_queues,
resources_asked->num_ldb_ports,
cfg->num_dir_ports,
@@ -854,7 +854,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2,
ret = dlb2_iface_sched_domain_create(handle, cfg);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s\n",
+ DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s",
ret,
dlb2_error_strings[cfg->response.status]);
@@ -930,27 +930,27 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev)
dlb2_hw_reset_sched_domain(dev, true);
ret = dlb2_hw_query_resources(dlb2);
if (ret) {
- DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
+ DLB2_LOG_ERR("get resources err=%d, devid=%d",
ret, data->dev_id);
return ret;
}
}
if (config->nb_event_queues > rsrcs->num_queues) {
- DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
+ DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).",
config->nb_event_queues,
rsrcs->num_queues);
return -EINVAL;
}
if (config->nb_event_ports > (rsrcs->num_ldb_ports
+ rsrcs->num_dir_ports)) {
- DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
+ DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).",
config->nb_event_ports,
(rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
return -EINVAL;
}
if (config->nb_events_limit > rsrcs->nb_events_limit) {
- DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
+ DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).",
config->nb_events_limit,
rsrcs->nb_events_limit);
return -EINVAL;
@@ -1000,7 +1000,7 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev)
if (dlb2_hw_create_sched_domain(dlb2, handle, rsrcs,
dlb2->version) < 0) {
- DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
+ DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed");
return -ENODEV;
}
@@ -1068,7 +1068,7 @@ dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group)
ret = dlb2_iface_get_sn_allocation(handle, &cfg);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
return ret;
}
@@ -1088,7 +1088,7 @@ dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num)
ret = dlb2_iface_set_sn_allocation(handle, &cfg);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
return ret;
}
@@ -1107,7 +1107,7 @@ dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group)
ret = dlb2_iface_get_sn_occupancy(handle, &cfg);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
return ret;
}
@@ -1161,7 +1161,7 @@ dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2,
}
if (i == DLB2_NUM_SN_GROUPS) {
- DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
+ DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots",
__func__, sequence_numbers);
return;
}
@@ -1236,7 +1236,7 @@ dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,
ret = dlb2_iface_ldb_queue_create(handle, &cfg);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
return -EINVAL;
}
@@ -1250,7 +1250,7 @@ dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,
queue->sched_type = sched_type;
queue->config_state = DLB2_CONFIGURED;
- DLB2_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
+ DLB2_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d",
qm_qid,
cfg.num_atomic_inflights,
cfg.num_sequence_numbers,
@@ -1272,7 +1272,7 @@ dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
qm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf);
if (qm_qid < 0) {
- DLB2_LOG_ERR("Failed to create the load-balanced queue\n");
+ DLB2_LOG_ERR("Failed to create the load-balanced queue");
return qm_qid;
}
@@ -1380,7 +1380,7 @@ dlb2_init_consume_qe(struct dlb2_port *qm_port, char *mz_name)
RTE_CACHE_LINE_SIZE);
if (qe == NULL) {
- DLB2_LOG_ERR("dlb2: no memory for consume_qe\n");
+ DLB2_LOG_ERR("dlb2: no memory for consume_qe");
return -ENOMEM;
}
qm_port->consume_qe = qe;
@@ -1412,7 +1412,7 @@ dlb2_init_int_arm_qe(struct dlb2_port *qm_port, char *mz_name)
RTE_CACHE_LINE_SIZE);
if (qe == NULL) {
- DLB2_LOG_ERR("dlb2: no memory for complete_qe\n");
+ DLB2_LOG_ERR("dlb2: no memory for complete_qe");
return -ENOMEM;
}
qm_port->int_arm_qe = qe;
@@ -1440,20 +1440,20 @@ dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name)
qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
if (qm_port->qe4 == NULL) {
- DLB2_LOG_ERR("dlb2: no qe4 memory\n");
+ DLB2_LOG_ERR("dlb2: no qe4 memory");
ret = -ENOMEM;
goto error_exit;
}
ret = dlb2_init_int_arm_qe(qm_port, mz_name);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d\n", ret);
+ DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d", ret);
goto error_exit;
}
ret = dlb2_init_consume_qe(qm_port, mz_name);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d\n", ret);
+ DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d", ret);
goto error_exit;
}
@@ -1536,14 +1536,14 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
return -EINVAL;
if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
- DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d\n",
+ DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d",
DLB2_MIN_CQ_DEPTH);
return -EINVAL;
}
if (dlb2->version == DLB2_HW_V2 && ev_port->cq_weight != 0 &&
ev_port->cq_weight > dequeue_depth) {
- DLB2_LOG_ERR("dlb2: invalid cq dequeue depth %d, must be >= cq weight %d\n",
+ DLB2_LOG_ERR("dlb2: invalid cq dequeue depth %d, must be >= cq weight %d",
dequeue_depth, ev_port->cq_weight);
return -EINVAL;
}
@@ -1579,14 +1579,14 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
ret = dlb2_iface_ldb_port_create(handle, &cfg, dlb2->poll_mode);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
goto error_exit;
}
qm_port_id = cfg.response.id;
- DLB2_LOG_DBG("dlb2: ev_port %d uses qm LB port %d <<<<<\n",
+ DLB2_LOG_DBG("dlb2: ev_port %d uses qm LB port %d <<<<<",
ev_port->id, qm_port_id);
qm_port = &ev_port->qm_port;
@@ -1602,7 +1602,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
ret = dlb2_init_qe_mem(qm_port, mz_name);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
+ DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d", ret);
goto error_exit;
}
@@ -1615,7 +1615,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
ret = dlb2_iface_enable_cq_weight(handle, &cq_weight_args);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)",
ret,
dlb2_error_strings[cfg.response. status]);
goto error_exit;
@@ -1680,7 +1680,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
- DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
+ DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d",
qm_port_id,
dequeue_depth,
qm_port->ldb_credits,
@@ -1689,7 +1689,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
qm_port->credits = credit_high_watermark;
qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
- DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, credits=%d\n",
+ DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, credits=%d",
qm_port_id,
dequeue_depth,
qm_port->credits);
@@ -1717,7 +1717,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
rte_spinlock_unlock(&handle->resource_lock);
- DLB2_LOG_ERR("dlb2: create ldb port failed!\n");
+ DLB2_LOG_ERR("dlb2: create ldb port failed!");
return ret;
}
@@ -1761,13 +1761,13 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
return -EINVAL;
if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
- DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d\n",
+ DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d",
DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH);
return -EINVAL;
}
if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
- DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
+ DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d",
DLB2_MIN_ENQUEUE_DEPTH);
return -EINVAL;
}
@@ -1802,14 +1802,14 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
ret = dlb2_iface_dir_port_create(handle, &cfg, dlb2->poll_mode);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
goto error_exit;
}
qm_port_id = cfg.response.id;
- DLB2_LOG_DBG("dlb2: ev_port %d uses qm DIR port %d <<<<<\n",
+ DLB2_LOG_DBG("dlb2: ev_port %d uses qm DIR port %d <<<<<",
ev_port->id, qm_port_id);
qm_port = &ev_port->qm_port;
@@ -1827,7 +1827,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
ret = dlb2_init_qe_mem(qm_port, mz_name);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
+ DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d", ret);
goto error_exit;
}
@@ -1881,7 +1881,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
- DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
+ DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d",
qm_port_id,
dequeue_depth,
dir_credit_high_watermark,
@@ -1890,7 +1890,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
qm_port->credits = credit_high_watermark;
qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
- DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d\n",
+ DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d",
qm_port_id,
dequeue_depth,
credit_high_watermark);
@@ -1916,7 +1916,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
rte_spinlock_unlock(&handle->resource_lock);
- DLB2_LOG_ERR("dlb2: create dir port failed!\n");
+ DLB2_LOG_ERR("dlb2: create dir port failed!");
return ret;
}
@@ -1932,7 +1932,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev,
int ret;
if (dev == NULL || port_conf == NULL) {
- DLB2_LOG_ERR("Null parameter\n");
+ DLB2_LOG_ERR("Null parameter");
return -EINVAL;
}
@@ -1950,7 +1950,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev,
ev_port = &dlb2->ev_ports[ev_port_id];
/* configured? */
if (ev_port->setup_done) {
- DLB2_LOG_ERR("evport %d is already configured\n", ev_port_id);
+ DLB2_LOG_ERR("evport %d is already configured", ev_port_id);
return -EINVAL;
}
@@ -1982,7 +1982,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev,
if (port_conf->enqueue_depth > sw_credit_quanta ||
port_conf->enqueue_depth > hw_credit_quanta) {
- DLB2_LOG_ERR("Invalid port config. Enqueue depth %d must be <= credit quanta %d and batch size %d\n",
+ DLB2_LOG_ERR("Invalid port config. Enqueue depth %d must be <= credit quanta %d and batch size %d",
port_conf->enqueue_depth,
sw_credit_quanta,
hw_credit_quanta);
@@ -2004,7 +2004,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev,
port_conf->dequeue_depth,
port_conf->enqueue_depth);
if (ret < 0) {
- DLB2_LOG_ERR("Failed to create the lB port ve portId=%d\n",
+ DLB2_LOG_ERR("Failed to create the lB port ve portId=%d",
ev_port_id);
return ret;
@@ -2015,7 +2015,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev,
port_conf->dequeue_depth,
port_conf->enqueue_depth);
if (ret < 0) {
- DLB2_LOG_ERR("Failed to create the DIR port\n");
+ DLB2_LOG_ERR("Failed to create the DIR port");
return ret;
}
}
@@ -2082,14 +2082,14 @@ dlb2_hw_map_ldb_qid_to_port(struct dlb2_hw_dev *handle,
ret = dlb2_iface_map_qid(handle, &cfg);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
- DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
+ DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d",
handle->domain_id, cfg.port_id,
cfg.qid,
cfg.priority);
} else {
- DLB2_LOG_DBG("dlb2: mapped queue %d to qm_port %d\n",
+ DLB2_LOG_DBG("dlb2: mapped queue %d to qm_port %d",
qm_qid, qm_port_id);
}
@@ -2117,7 +2117,7 @@ dlb2_event_queue_join_ldb(struct dlb2_eventdev *dlb2,
first_avail = i;
}
if (first_avail == -1) {
- DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.\n",
+ DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.",
ev_port->qm_port.id);
return -EINVAL;
}
@@ -2154,7 +2154,7 @@ dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2,
ret = dlb2_iface_dir_queue_create(handle, &cfg);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
return -EINVAL;
}
@@ -2172,7 +2172,7 @@ dlb2_eventdev_dir_queue_setup(struct dlb2_eventdev *dlb2,
qm_qid = dlb2_hw_create_dir_queue(dlb2, ev_queue, ev_port->qm_port.id);
if (qm_qid < 0) {
- DLB2_LOG_ERR("Failed to create the DIR queue\n");
+ DLB2_LOG_ERR("Failed to create the DIR queue");
return qm_qid;
}
@@ -2202,7 +2202,7 @@ dlb2_do_port_link(struct rte_eventdev *dev,
err = dlb2_event_queue_join_ldb(dlb2, ev_port, ev_queue, prio);
if (err) {
- DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
+ DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d",
ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
ev_queue->id, ev_port->id);
@@ -2240,7 +2240,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
queue_is_dir = ev_queue->qm_queue.is_directed;
if (port_is_dir != queue_is_dir) {
- DLB2_LOG_ERR("%s queue %u can't link to %s port %u\n",
+ DLB2_LOG_ERR("%s queue %u can't link to %s port %u",
queue_is_dir ? "DIR" : "LDB", ev_queue->id,
port_is_dir ? "DIR" : "LDB", ev_port->id);
@@ -2250,7 +2250,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
/* Check if there is space for the requested link */
if (!link_exists && index == -1) {
- DLB2_LOG_ERR("no space for new link\n");
+ DLB2_LOG_ERR("no space for new link");
rte_errno = -ENOSPC;
return -1;
}
@@ -2258,7 +2258,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
/* Check if the directed port is already linked */
if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
!link_exists) {
- DLB2_LOG_ERR("Can't link DIR port %d to >1 queues\n",
+ DLB2_LOG_ERR("Can't link DIR port %d to >1 queues",
ev_port->id);
rte_errno = -EINVAL;
return -1;
@@ -2267,7 +2267,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
/* Check if the directed queue is already linked */
if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
!link_exists) {
- DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
+ DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports",
ev_queue->id);
rte_errno = -EINVAL;
return -1;
@@ -2289,14 +2289,14 @@ dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
RTE_SET_USED(dev);
if (ev_port == NULL) {
- DLB2_LOG_ERR("dlb2: evport not setup\n");
+ DLB2_LOG_ERR("dlb2: evport not setup");
rte_errno = -EINVAL;
return 0;
}
if (!ev_port->setup_done &&
ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) {
- DLB2_LOG_ERR("dlb2: evport not setup\n");
+ DLB2_LOG_ERR("dlb2: evport not setup");
rte_errno = -EINVAL;
return 0;
}
@@ -2305,13 +2305,13 @@ dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
* queues pointer.
*/
if (nb_links == 0) {
- DLB2_LOG_DBG("dlb2: nb_links is 0\n");
+ DLB2_LOG_DBG("dlb2: nb_links is 0");
return 0; /* Ignore and return success */
}
dlb2 = ev_port->dlb2;
- DLB2_LOG_DBG("Linking %u queues to %s port %d\n",
+ DLB2_LOG_DBG("Linking %u queues to %s port %d",
nb_links,
ev_port->qm_port.is_directed ? "DIR" : "LDB",
ev_port->id);
@@ -2381,7 +2381,7 @@ dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle,
ret = dlb2_iface_unmap_qid(handle, &cfg);
if (ret < 0)
- DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
return ret;
@@ -2408,7 +2408,7 @@ dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
* It blindly attempts to unmap all queues.
*/
if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
- DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n",
+ DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.",
ev_queue->qm_queue.id,
ev_port->qm_port.id);
return 0;
@@ -2434,19 +2434,19 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
RTE_SET_USED(dev);
if (!ev_port->setup_done) {
- DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
+ DLB2_LOG_ERR("dlb2: evport %d is not configured",
ev_port->id);
rte_errno = -EINVAL;
return 0;
}
if (queues == NULL || nb_unlinks == 0) {
- DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0\n");
+ DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0");
return 0; /* Ignore and return success */
}
if (ev_port->qm_port.is_directed) {
- DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n",
+ DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d",
ev_port->id);
rte_errno = 0;
return nb_unlinks; /* as if success */
@@ -2459,7 +2459,7 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
int ret, j;
if (queues[i] >= dlb2->num_queues) {
- DLB2_LOG_ERR("dlb2: invalid queue id %d\n", queues[i]);
+ DLB2_LOG_ERR("dlb2: invalid queue id %d", queues[i]);
rte_errno = -EINVAL;
return i; /* return index of offending queue */
}
@@ -2477,7 +2477,7 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue);
if (ret) {
- DLB2_LOG_ERR("unlink err=%d for port %d queue %d\n",
+ DLB2_LOG_ERR("unlink err=%d for port %d queue %d",
ret, ev_port->id, queues[i]);
rte_errno = -ENOENT;
return i; /* return index of offending queue */
@@ -2504,7 +2504,7 @@ dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
RTE_SET_USED(dev);
if (!ev_port->setup_done) {
- DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
+ DLB2_LOG_ERR("dlb2: evport %d is not configured",
ev_port->id);
rte_errno = -EINVAL;
return 0;
@@ -2516,7 +2516,7 @@ dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
ret = dlb2_iface_pending_port_unmaps(handle, &cfg);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
return ret;
}
@@ -2609,7 +2609,7 @@ dlb2_eventdev_start(struct rte_eventdev *dev)
rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
if (dlb2->run_state != DLB2_RUN_STATE_STOPPED) {
- DLB2_LOG_ERR("bad state %d for dev_start\n",
+ DLB2_LOG_ERR("bad state %d for dev_start",
(int)dlb2->run_state);
rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
return -EINVAL;
@@ -2645,13 +2645,13 @@ dlb2_eventdev_start(struct rte_eventdev *dev)
ret = dlb2_iface_sched_domain_start(handle, &cfg);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
return ret;
}
dlb2->run_state = DLB2_RUN_STATE_STARTED;
- DLB2_LOG_DBG("dlb2: sched_domain_start completed OK\n");
+ DLB2_LOG_DBG("dlb2: sched_domain_start completed OK");
return 0;
}
@@ -2746,7 +2746,7 @@ dlb2_check_enqueue_hw_ldb_credits(struct dlb2_port *qm_port)
DLB2_INC_STAT(
qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
1);
- DLB2_LOG_DBG("ldb credits exhausted\n");
+ DLB2_LOG_DBG("ldb credits exhausted");
return 1; /* credits exhausted */
}
}
@@ -2765,7 +2765,7 @@ dlb2_check_enqueue_hw_dir_credits(struct dlb2_port *qm_port)
DLB2_INC_STAT(
qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
1);
- DLB2_LOG_DBG("dir credits exhausted\n");
+ DLB2_LOG_DBG("dir credits exhausted");
return 1; /* credits exhausted */
}
}
@@ -2783,7 +2783,7 @@ dlb2_check_enqueue_hw_credits(struct dlb2_port *qm_port)
if (unlikely(qm_port->cached_credits == 0)) {
DLB2_INC_STAT(
qm_port->ev_port->stats.traffic.tx_nospc_hw_credits, 1);
- DLB2_LOG_DBG("credits exhausted\n");
+ DLB2_LOG_DBG("credits exhausted");
return 1; /* credits exhausted */
}
}
@@ -2817,7 +2817,7 @@ dlb2_consume_qe_immediate(struct dlb2_port *qm_port, int num)
dlb2_movntdq_single(port_data->pp_addr, qe);
- DLB2_LOG_DBG("dlb2: consume immediate - %d QEs\n", num);
+ DLB2_LOG_DBG("dlb2: consume immediate - %d QEs", num);
qm_port->owed_tokens = 0;
@@ -2888,9 +2888,9 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
}
switch (ev->sched_type) {
case RTE_SCHED_TYPE_ORDERED:
- DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED\n");
+ DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED");
if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
- DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d\n",
+ DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d",
*queue_id);
rte_errno = -EINVAL;
return 1;
@@ -2898,18 +2898,18 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
*sched_type = DLB2_SCHED_ORDERED;
break;
case RTE_SCHED_TYPE_ATOMIC:
- DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
+ DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ATOMIC");
*sched_type = DLB2_SCHED_ATOMIC;
break;
case RTE_SCHED_TYPE_PARALLEL:
- DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
+ DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_PARALLEL");
if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
*sched_type = DLB2_SCHED_ORDERED;
else
*sched_type = DLB2_SCHED_UNORDERED;
break;
default:
- DLB2_LOG_ERR("Unsupported LDB sched type in put_qe\n");
+ DLB2_LOG_ERR("Unsupported LDB sched type in put_qe");
DLB2_INC_STAT(ev_port->stats.tx_invalid, 1);
rte_errno = -EINVAL;
return 1;
@@ -2930,7 +2930,7 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
}
cached_credits = &qm_port->cached_credits;
}
- DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
+ DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED");
*sched_type = DLB2_SCHED_DIRECTED;
}
@@ -3156,7 +3156,7 @@ dlb2_event_release(struct dlb2_eventdev *dlb2,
int i;
if (port_id > dlb2->num_ports) {
- DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
+ DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release",
port_id);
rte_errno = -EINVAL;
return;
@@ -3213,7 +3213,7 @@ dlb2_event_release(struct dlb2_eventdev *dlb2,
sw_credit_update:
/* each release returns one credit */
if (unlikely(!ev_port->outstanding_releases)) {
- DLB2_LOG_ERR("%s: Outstanding releases underflowed.\n",
+ DLB2_LOG_ERR("%s: Outstanding releases underflowed.",
__func__);
return;
}
@@ -3367,7 +3367,7 @@ dlb2_process_dequeue_qes(struct dlb2_eventdev_port *ev_port,
* buffer is a mbuf.
*/
if (unlikely(qe->error)) {
- DLB2_LOG_ERR("QE error bit ON\n");
+ DLB2_LOG_ERR("QE error bit ON");
DLB2_INC_STAT(ev_port->stats.traffic.rx_drop, 1);
dlb2_consume_qe_immediate(qm_port, 1);
continue; /* Ignore */
@@ -4281,7 +4281,7 @@ dlb2_get_ldb_queue_depth(struct dlb2_eventdev *dlb2,
ret = dlb2_iface_get_ldb_queue_depth(handle, &cfg);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
return ret;
}
@@ -4301,7 +4301,7 @@ dlb2_get_dir_queue_depth(struct dlb2_eventdev *dlb2,
ret = dlb2_iface_get_dir_queue_depth(handle, &cfg);
if (ret < 0) {
- DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)\n",
+ DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)",
ret, dlb2_error_strings[cfg.response.status]);
return ret;
}
@@ -4392,7 +4392,7 @@ dlb2_drain(struct rte_eventdev *dev)
}
if (i == dlb2->num_ports) {
- DLB2_LOG_ERR("internal error: no LDB ev_ports\n");
+ DLB2_LOG_ERR("internal error: no LDB ev_ports");
return;
}
@@ -4400,7 +4400,7 @@ dlb2_drain(struct rte_eventdev *dev)
rte_event_port_unlink(dev_id, ev_port->id, NULL, 0);
if (rte_errno) {
- DLB2_LOG_ERR("internal error: failed to unlink ev_port %d\n",
+ DLB2_LOG_ERR("internal error: failed to unlink ev_port %d",
ev_port->id);
return;
}
@@ -4418,7 +4418,7 @@ dlb2_drain(struct rte_eventdev *dev)
/* Link the ev_port to the queue */
ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);
if (ret != 1) {
- DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n",
+ DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d",
ev_port->id, qid);
return;
}
@@ -4433,7 +4433,7 @@ dlb2_drain(struct rte_eventdev *dev)
/* Unlink the ev_port from the queue */
ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);
if (ret != 1) {
- DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n",
+ DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d",
ev_port->id, qid);
return;
}
@@ -4448,11 +4448,11 @@ dlb2_eventdev_stop(struct rte_eventdev *dev)
rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
if (dlb2->run_state == DLB2_RUN_STATE_STOPPED) {
- DLB2_LOG_DBG("Internal error: already stopped\n");
+ DLB2_LOG_DBG("Internal error: already stopped");
rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
return;
} else if (dlb2->run_state != DLB2_RUN_STATE_STARTED) {
- DLB2_LOG_ERR("Internal error: bad state %d for dev_stop\n",
+ DLB2_LOG_ERR("Internal error: bad state %d for dev_stop",
(int)dlb2->run_state);
rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
return;
@@ -4608,7 +4608,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
err = dlb2_iface_open(&dlb2->qm_instance, name);
if (err < 0) {
- DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
+ DLB2_LOG_ERR("could not open event hardware device, err=%d",
err);
return err;
}
@@ -4616,14 +4616,14 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
err = dlb2_iface_get_device_version(&dlb2->qm_instance,
&dlb2->revision);
if (err < 0) {
- DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
+ DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d",
err);
return err;
}
err = dlb2_hw_query_resources(dlb2);
if (err) {
- DLB2_LOG_ERR("get resources err=%d for %s\n",
+ DLB2_LOG_ERR("get resources err=%d for %s",
err, name);
return err;
}
@@ -4646,7 +4646,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
break;
}
if (ret) {
- DLB2_LOG_ERR("dlb2: failed to configure class of service, err=%d\n",
+ DLB2_LOG_ERR("dlb2: failed to configure class of service, err=%d",
err);
return err;
}
@@ -4654,7 +4654,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
if (err < 0) {
- DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
+ DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d",
err);
return err;
}
@@ -4662,7 +4662,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
/* Complete xtstats runtime initialization */
err = dlb2_xstats_init(dlb2);
if (err) {
- DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err);
+ DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d", err);
return err;
}
@@ -4692,14 +4692,14 @@ dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
err = dlb2_iface_open(&dlb2->qm_instance, name);
if (err < 0) {
- DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
+ DLB2_LOG_ERR("could not open event hardware device, err=%d",
err);
return err;
}
err = dlb2_hw_query_resources(dlb2);
if (err) {
- DLB2_LOG_ERR("get resources err=%d for %s\n",
+ DLB2_LOG_ERR("get resources err=%d for %s",
err, name);
return err;
}
@@ -4741,9 +4741,8 @@ dlb2_parse_params(const char *params,
struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
if (kvlist == NULL) {
- RTE_LOG(INFO, PMD,
- "Ignoring unsupported parameters when creating device '%s'\n",
- name);
+ DLB2_LOG_INFO("Ignoring unsupported parameters when creating device '%s'",
+ name);
} else {
int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
set_numa_node,
--
2.43.0
next prev parent reply other threads:[~2023-12-21 16:50 UTC|newest]
Thread overview: 214+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-13 1:42 [PATCH 00/26] Replace uses of RTE_LOGTYPE_PMD Stephen Hemminger
2023-12-13 1:42 ` [PATCH 01/26] log: fix doc comment for RTE_LOG_DP() Stephen Hemminger
2023-12-13 1:42 ` [PATCH 02/26] log: add rte_log_dp() Stephen Hemminger
2023-12-13 1:42 ` [PATCH 03/26] net/atlantic: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-13 1:42 ` [PATCH 04/26] net/avp: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 05/26] net/bnxt: " Stephen Hemminger
2023-12-13 3:07 ` Somnath Kotur
2023-12-13 1:42 ` [PATCH 06/26] net/dpaa: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 07/26] net/dpaa2: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 08/26] net/enetc: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 09/26] net/enetfec: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 10/26] net/igc: " Stephen Hemminger
2023-12-13 2:36 ` Guo, Junfeng
2023-12-13 1:42 ` [PATCH 11/26] net/mana: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 12/26] net/mvpp2: do not use PMD logtype Stephen Hemminger
2023-12-13 1:42 ` [PATCH 13/26] net/octeon_ep: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-13 1:42 ` [PATCH 14/26] net/pfe: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 15/26] net/qede: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 16/26] net/virtio: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 17/26] net/vmxnet3: do not use PMD logtype Stephen Hemminger
2023-12-13 1:42 ` [PATCH 18/26] common/cnxk: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-13 1:42 ` [PATCH 19/26] common/cpt: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 20/26] common/sfc_efx: remove use of PMD logtype Stephen Hemminger
2023-12-13 1:42 ` [PATCH 21/26] common/dpaax: do not use " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 22/26] basband/la12xx: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-13 1:42 ` [PATCH 23/26] bus/cdx: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 24/26] bus/fslmc: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 25/26] dma/dpaa, dma/dpaa2: " Stephen Hemminger
2023-12-13 1:42 ` [PATCH 26/26] mempool/dpaa, mempool/dpaa2: do not use logtype PMD Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 00/25] Replace use of RTE_LOGTYPE_PMD Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 01/25] log: fix doc comment for RTE_LOG_DP() Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 02/25] log: add rte_log_dp() Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 03/25] net/atlantic: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 04/25] net/avp: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 05/25] net/bnxt: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 06/25] net/dpaa: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 07/25] net/dpaa2: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 08/25] net/enetc, net/enetfec: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 09/25] net/igc: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 10/25] net/mana: " Stephen Hemminger
2023-12-13 19:16 ` Long Li
2023-12-13 4:16 ` [PATCH v2 11/25] net/mvpp2: do not use PMD logtype Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 12/25] net/octeon_ep: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 13/25] net/pfe: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 14/25] net/qede: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 15/25] net/virtio: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 16/25] net/vmxnet3: do not use PMD logtype Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 17/25] common/cnxk: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 18/25] common/cpt: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 19/25] common/sfc_efx: remove use of PMD logtype Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 20/25] common/dpaax: do not use " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 21/25] basband/la12xx: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 22/25] bus/cdx: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 23/25] bus/fslmc: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 24/25] dma/dpaa, dma/dpaa2: " Stephen Hemminger
2023-12-13 4:16 ` [PATCH v2 25/25] mempool/dpaa, mempool/dpaa2: do not use logtype PMD Stephen Hemminger
2023-12-13 8:00 ` [PATCH v2 00/25] Replace use of RTE_LOGTYPE_PMD David Marchand
2023-12-13 15:09 ` Stephen Hemminger
2023-12-18 14:03 ` David Marchand
2023-12-13 16:35 ` [PATCH v3 00/37] Replace uso " Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 01/37] log: fix doc comment for RTE_LOG_DP() Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 02/37] log: add rte_log_dp() Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 03/37] net/atlantic: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 04/37] net/avp: " Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 05/37] net/bnxt: " Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 06/37] net/dpaa: " Stephen Hemminger
2023-12-15 5:52 ` Hemant Agrawal
2023-12-13 16:35 ` [PATCH v3 07/37] net/dpaa2: " Stephen Hemminger
2023-12-15 5:52 ` Hemant Agrawal
2023-12-13 16:35 ` [PATCH v3 08/37] net/enetc, net/enetfec: " Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 09/37] net/igc: " Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 10/37] net/mana: " Stephen Hemminger
2023-12-13 19:17 ` Long Li
2023-12-13 16:35 ` [PATCH v3 11/37] net/mvpp2: do not use PMD logtype Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 12/37] net/octeon_ep: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 13/37] net/pfe: " Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 14/37] net/qede: " Stephen Hemminger
2023-12-14 0:34 ` [EXT] " Devendra Singh Rawat
2023-12-13 16:35 ` [PATCH v3 15/37] net/virtio: " Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 16/37] net/vmxnet3: do not use PMD logtype Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 17/37] common/cnxk: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 18/37] common/cpt: " Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 19/37] common/sfc_efx: remove use of PMD logtype Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 20/37] common/dpaax: do not use " Stephen Hemminger
2023-12-15 5:56 ` Hemant Agrawal
2023-12-13 16:35 ` [PATCH v3 21/37] basband/la12xx: replace RTE_LOG_DP with rte_log_dp Stephen Hemminger
2023-12-15 5:53 ` Hemant Agrawal
2023-12-13 16:35 ` [PATCH v3 22/37] bus/cdx: " Stephen Hemminger
2023-12-13 16:35 ` [PATCH v3 23/37] bus/fslmc: " Stephen Hemminger
2023-12-15 5:54 ` Hemant Agrawal
2023-12-13 16:35 ` [PATCH v3 24/37] dma/dpaa, dma/dpaa2: " Stephen Hemminger
2023-12-15 5:54 ` Hemant Agrawal
2023-12-13 16:35 ` [PATCH v3 25/37] mempool/dpaa, mempool/dpaa2: do not use logtype PMD Stephen Hemminger
2023-12-15 5:54 ` Hemant Agrawal
2023-12-13 16:36 ` [PATCH v3 26/37] vdpa/ifc: replace use of RTE_LOGTYPE_PMD Stephen Hemminger
2023-12-13 16:36 ` [PATCH v3 27/37] raw/cnxk_bphy: replace PMD logtype with dynamic type Stephen Hemminger
2023-12-13 16:36 ` [PATCH v3 28/37] raw/cnxk_gpio: " Stephen Hemminger
2023-12-13 16:36 ` [PATCH v3 29/37] raw/dpaa2_cmdif: replace PMD logtype with rte_log_dp Stephen Hemminger
2023-12-13 16:36 ` [PATCH v3 30/37] raw/ifpga: replace PMD logtype with dynamic type Stephen Hemminger
2023-12-14 0:01 ` Xu, Rosen
2023-12-13 16:36 ` [PATCH v3 31/37] event/dlb2: replace logtype PMD with rte_log_dp Stephen Hemminger
2023-12-13 16:36 ` [PATCH v3 32/37] event/dpaa, event/dpaa2: use local logtype Stephen Hemminger
2023-12-15 5:55 ` Hemant Agrawal
2023-12-13 16:36 ` [PATCH v3 33/37] event/skeleton: replace logtype PMD with dynamic type Stephen Hemminger
2023-12-13 16:36 ` [PATCH v3 34/37] crypto/caam_jr: replace logtype PMD Stephen Hemminger
2023-12-15 5:54 ` Hemant Agrawal
2023-12-13 16:36 ` [PATCH v3 35/37] crypto/ccp: do not use PMD logtype Stephen Hemminger
2023-12-13 16:36 ` [PATCH v3 36/37] crypto/scheduler: replace use of logtype PMD Stephen Hemminger
2023-12-13 16:36 ` [PATCH v3 37/37] crypto/dpaa_sec, crypto/dpaa2_sec: replace use of PMD logtype Stephen Hemminger
2023-12-15 5:55 ` Hemant Agrawal
2023-12-18 19:22 ` [PATCH v4 00/17] Replace uses of RTE_LOGTYPE_PMD Stephen Hemminger
2023-12-18 19:22 ` [PATCH v4 01/17] common/sfc_efx: remove use of PMD logtype Stephen Hemminger
2023-12-18 19:22 ` [PATCH v4 02/17] mempool/dpaa2: use driver logtype not PMD Stephen Hemminger
2023-12-18 19:22 ` [PATCH v4 03/17] net/dpaa: use dedicated " Stephen Hemminger
2023-12-18 19:22 ` [PATCH v4 04/17] net/dpaa2: used " Stephen Hemminger
2023-12-18 19:22 ` [PATCH v4 05/17] net/mvpp2: use dedicated logtype Stephen Hemminger
2023-12-18 19:22 ` [PATCH v4 06/17] net/vmxnet3: used dedicated logtype not PMD Stephen Hemminger
2023-12-18 19:22 ` [PATCH v4 07/17] raw/cnxk: replace PMD logtype with dynamic type Stephen Hemminger
2023-12-21 9:49 ` David Marchand
2023-12-18 19:22 ` [PATCH v4 08/17] crypto/scheduler: replace use of logtype PMD Stephen Hemminger
2023-12-21 9:40 ` David Marchand
2023-12-18 19:22 ` [PATCH v4 09/17] crypto/ccp: do not use PMD logtype Stephen Hemminger
2023-12-18 19:22 ` [PATCH v4 10/17] crypto/caam_jr: use dedicated logtype Stephen Hemminger
2023-12-21 9:38 ` David Marchand
2023-12-18 19:22 ` [PATCH v4 11/17] crypto/dpaa_sec, crypto/dpaa2_sec: " Stephen Hemminger
2023-12-21 9:39 ` David Marchand
2023-12-18 19:22 ` [PATCH v4 12/17] event/dlb2: " Stephen Hemminger
2023-12-21 9:41 ` David Marchand
2023-12-18 19:22 ` [PATCH v4 13/17] event/dpaa, event/dpaa2: " Stephen Hemminger
2023-12-18 19:22 ` [PATCH v4 14/17] event/skeleton: replace logtype PMD with dynamic type Stephen Hemminger
2023-12-21 9:43 ` David Marchand
2023-12-18 19:22 ` [PATCH v4 15/17] net/nfb: use dynamic logtype Stephen Hemminger
2023-12-18 19:22 ` [PATCH v4 16/17] examples/fips_validation: replace use of PMD logtype Stephen Hemminger
2023-12-18 19:22 ` [PATCH v4 17/17] log: mark PMD logtype as deprecated Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 00/18] Replace uses of PMD logtype Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 01/18] drivers: use dedicated logtypes Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 02/18] common/sfc_efx: remove use of PMD logtype Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 03/18] mempool/dpaa2: use driver logtype not PMD Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 04/18] net/dpaa: use dedicated " Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 05/18] net/dpaa2: used " Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 06/18] net/mvpp2: use dedicated logtype Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 07/18] net/vmxnet3: used dedicated logtype not PMD Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 08/18] raw/cnxk: replace PMD logtype with dynamic type Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 09/18] crypto/scheduler: replace use of logtype PMD Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 10/18] crypto/ccp: do not use PMD logtype Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 11/18] crypto/caam_jr: use dedicated logtype Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 12/18] crypto/dpaa_sec, crypto/dpaa2_sec: " Stephen Hemminger
2023-12-21 16:46 ` Stephen Hemminger [this message]
2023-12-21 16:46 ` [PATCH v5 14/18] event/dpaa, event/dpaa2: " Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 15/18] event/skeleton: replace logtype PMD with dynamic type Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 16/18] net/nfb: use dynamic logtype Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 17/18] examples/fips_validation: replace use of PMD logtype Stephen Hemminger
2023-12-21 16:46 ` [PATCH v5 18/18] log: mark PMD logtype as deprecated Stephen Hemminger
2023-12-22 12:50 ` [PATCH v5 00/18] Replace uses of PMD logtype David Marchand
2023-12-22 17:11 ` [PATCH v6 00/20] Remove " Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 01/20] drivers: use dedicated logtypes Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 02/20] common/sfc_efx: remove use of PMD logtype Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 03/20] mempool/dpaa2: use driver logtype not PMD Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 04/20] net/dpaa: use dedicated " Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 05/20] net/dpaa2: used " Stephen Hemminger
2023-12-23 0:53 ` [EXT] " Jun Yang
2023-12-23 2:27 ` Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 06/20] net/mrvl: do not use PMD logtype Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 07/20] net/mvpp2: use dedicated logtype Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 08/20] net/nfb: use dynamic logtype Stephen Hemminger
2024-01-12 13:57 ` Martin Spinler
2023-12-22 17:11 ` [PATCH v6 09/20] net/vmxnet3: used dedicated logtype not PMD Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 10/20] raw/cnxk: replace PMD logtype with dynamic type Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 11/20] crypto/scheduler: replace use of logtype PMD Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 12/20] crypto/armv8: do not use PMD logtype Stephen Hemminger
2023-12-22 17:42 ` Stephen Hemminger
2023-12-26 3:04 ` Ruifeng Wang
2023-12-22 17:11 ` [PATCH v6 13/20] crypto/ccp: " Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 14/20] crypto/caam_jr: use dedicated logtype Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 15/20] crypto/dpaa_sec, crypto/dpaa2_sec: " Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 16/20] event/dlb2: " Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 17/20] event/dpaa, event/dpaa2: " Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 18/20] event/skeleton: replace logtype PMD with dynamic type Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 19/20] examples/fips_validation: replace use of PMD logtype Stephen Hemminger
2023-12-22 17:11 ` [PATCH v6 20/20] log: remove PMD log type Stephen Hemminger
2024-01-19 13:59 ` [PATCH v6 00/20] Remove uses of PMD logtype David Marchand
2024-01-19 16:58 ` Stephen Hemminger
2024-01-23 2:47 ` Stephen Hemminger
2024-02-03 4:10 ` [PATCH v7 00/19] Replace use " Stephen Hemminger
2024-02-03 4:10 ` [PATCH v7 01/19] common/sfc_efx: remove " Stephen Hemminger
2024-02-03 4:10 ` [PATCH v7 02/19] mempool/dpaa2: use driver logtype not PMD Stephen Hemminger
2024-02-03 4:10 ` [PATCH v7 03/19] net/dpaa: use dedicated " Stephen Hemminger
2024-02-12 14:45 ` [PATCH v7 00/19] Replace use of PMD logtype David Marchand
2024-02-12 16:45 ` Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 00/19] Replace uses of RTE_LOGTYPE_PMD Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 01/19] common/sfc_efx: remove use of PMD logtype Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 02/19] mempool/dpaa2: use driver logtype not PMD Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 03/19] net/dpaa: use dedicated " Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 04/19] net/dpaa2: used " Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 05/19] net/mrvl: do not use PMD logtype Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 06/19] net/mvpp2: use dedicated logtype Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 07/19] net/nfb: use dynamic logtype Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 08/19] net/vmxnet3: used dedicated logtype not PMD Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 09/19] raw/cnxk: replace PMD logtype with dynamic type Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 10/19] crypto/scheduler: replace use of logtype PMD Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 11/19] crypto/armv8: do not use PMD logtype Stephen Hemminger
2024-02-04 4:54 ` Ruifeng Wang
2024-02-03 4:11 ` [PATCH v7 12/19] crypto/caam_jr: use dedicated logtype Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 13/19] crypto/ccp: do not use PMD logtype Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 14/19] crypto/dpaa_sec, crypto/dpaa2_sec: use dedicated logtype Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 15/19] event/dpaa, event/dpaa2: " Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 16/19] event/dlb2: " Stephen Hemminger
2024-02-05 19:16 ` Sevincer, Abdullah
2024-02-05 22:04 ` Stephen Hemminger
2024-02-06 0:51 ` Sevincer, Abdullah
2024-02-03 4:11 ` [PATCH v7 17/19] event/skeleton: replace logtype PMD with dynamic type Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 18/19] examples/fips_validation: replace use of PMD logtype Stephen Hemminger
2024-02-03 4:11 ` [PATCH v7 19/19] log: remove PMD log type Stephen Hemminger
2024-02-05 18:44 ` [PATCH v7 00/19] Replace uses of RTE_LOGTYPE_PMD Patrick Robb
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231221164841.125006-14-stephen@networkplumber.org \
--to=stephen@networkplumber.org \
--cc=abdullah.sevincer@intel.com \
--cc=dev@dpdk.org \
--cc=gage.eads@intel.com \
--cc=timothy.mcdaniel@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).