1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015-2018 Linaro Limited.
4  *
5  * Author: Tor Jeremiassen <tor@ti.com>
6  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7  */
8 
9 #include <asm/bug.h>
10 #include <linux/coresight-pmu.h>
11 #include <linux/err.h>
12 #include <linux/list.h>
13 #include <linux/zalloc.h>
14 #include <stdlib.h>
15 #include <opencsd/c_api/opencsd_c_api.h>
16 
17 #include "cs-etm.h"
18 #include "cs-etm-decoder.h"
19 #include "debug.h"
20 #include "intlist.h"
21 
22 /* use raw logging */
23 #ifdef CS_DEBUG_RAW
24 #define CS_LOG_RAW_FRAMES
25 #ifdef CS_RAW_PACKED
26 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
27 			    OCSD_DFRMTR_PACKED_RAW_OUT)
28 #else
29 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
30 #endif
31 #endif
32 
33 /*
34  * Assume a maximum of 0.1ns elapsed per instruction. This would be the
35  * case with a theoretical 10GHz core executing 1 instruction per cycle.
36  * Used to estimate the sample time for synthesized instructions because
37  * Coresight only emits a timestamp for a range of instructions rather
38  * than per instruction.
39  */
40 const u32 INSTR_PER_NS = 10;
41 
42 struct cs_etm_decoder {
43 	void *data;
44 	void (*packet_printer)(const char *msg, void *data);
45 	bool suppress_printing;
46 	dcd_tree_handle_t dcd_tree;
47 	cs_etm_mem_cb_type mem_access;
48 	ocsd_datapath_resp_t prev_return;
49 	const char *decoder_name;
50 };
51 
52 static u32
cs_etm_decoder__mem_access(const void * context,const ocsd_vaddr_t address,const ocsd_mem_space_acc_t mem_space,const u8 trace_chan_id,const u32 req_size,u8 * buffer)53 cs_etm_decoder__mem_access(const void *context,
54 			   const ocsd_vaddr_t address,
55 			   const ocsd_mem_space_acc_t mem_space,
56 			   const u8 trace_chan_id,
57 			   const u32 req_size,
58 			   u8 *buffer)
59 {
60 	struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
61 
62 	return decoder->mem_access(decoder->data, trace_chan_id, address,
63 				   req_size, buffer, mem_space);
64 }
65 
cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder * decoder,u64 start,u64 end,cs_etm_mem_cb_type cb_func)66 int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
67 				      u64 start, u64 end,
68 				      cs_etm_mem_cb_type cb_func)
69 {
70 	decoder->mem_access = cb_func;
71 
72 	if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end,
73 					       OCSD_MEM_SPACE_ANY,
74 					       cs_etm_decoder__mem_access,
75 					       decoder))
76 		return -1;
77 
78 	return 0;
79 }
80 
cs_etm_decoder__reset(struct cs_etm_decoder * decoder)81 int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
82 {
83 	ocsd_datapath_resp_t dp_ret;
84 
85 	decoder->prev_return = OCSD_RESP_CONT;
86 	decoder->suppress_printing = true;
87 	dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
88 				      0, 0, NULL, NULL);
89 	decoder->suppress_printing = false;
90 	if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
91 		return -1;
92 
93 	return 0;
94 }
95 
cs_etm_decoder__get_packet(struct cs_etm_packet_queue * packet_queue,struct cs_etm_packet * packet)96 int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue,
97 			       struct cs_etm_packet *packet)
98 {
99 	if (!packet_queue || !packet)
100 		return -EINVAL;
101 
102 	/* Nothing to do, might as well just return */
103 	if (packet_queue->packet_count == 0)
104 		return 0;
105 	/*
106 	 * The queueing process in function cs_etm_decoder__buffer_packet()
107 	 * increments the tail *before* using it.  This is somewhat counter
108 	 * intuitive but it has the advantage of centralizing tail management
109 	 * at a single location.  Because of that we need to follow the same
110 	 * heuristic with the head, i.e we increment it before using its
111 	 * value.  Otherwise the first element of the packet queue is not
112 	 * used.
113 	 */
114 	packet_queue->head = (packet_queue->head + 1) &
115 			     (CS_ETM_PACKET_MAX_BUFFER - 1);
116 
117 	*packet = packet_queue->packet_buffer[packet_queue->head];
118 
119 	packet_queue->packet_count--;
120 
121 	return 1;
122 }
123 
124 /*
125  * Calculate the number of nanoseconds elapsed.
126  *
127  * instr_count is updated in place with the remainder of the instructions
128  * which didn't make up a whole nanosecond.
129  */
cs_etm_decoder__dec_instr_count_to_ns(u32 * instr_count)130 static u32 cs_etm_decoder__dec_instr_count_to_ns(u32 *instr_count)
131 {
132 	const u32 instr_copy = *instr_count;
133 
134 	*instr_count %= INSTR_PER_NS;
135 	return instr_copy / INSTR_PER_NS;
136 }
137 
cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params * params,ocsd_etmv3_cfg * config)138 static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params,
139 					    ocsd_etmv3_cfg *config)
140 {
141 	config->reg_idr = params->etmv3.reg_idr;
142 	config->reg_ctrl = params->etmv3.reg_ctrl;
143 	config->reg_ccer = params->etmv3.reg_ccer;
144 	config->reg_trc_id = params->etmv3.reg_trc_id;
145 	config->arch_ver = ARCH_V7;
146 	config->core_prof = profile_CortexA;
147 
148 	return 0;
149 }
150 
151 #define TRCIDR1_TRCARCHMIN_SHIFT 4
152 #define TRCIDR1_TRCARCHMIN_MASK  GENMASK(7, 4)
153 #define TRCIDR1_TRCARCHMIN(x)    (((x) & TRCIDR1_TRCARCHMIN_MASK) >> TRCIDR1_TRCARCHMIN_SHIFT)
154 
cs_etm_decoder__get_etmv4_arch_ver(u32 reg_idr1)155 static enum _ocsd_arch_version cs_etm_decoder__get_etmv4_arch_ver(u32 reg_idr1)
156 {
157 	/*
158 	 * For ETMv4 if the trace minor version is 4 or more then we can assume
159 	 * the architecture is ARCH_AA64 rather than just V8.
160 	 * ARCH_V8 = V8 architecture
161 	 * ARCH_AA64 = Min v8r3 plus additional AA64 PE features
162 	 */
163 	return TRCIDR1_TRCARCHMIN(reg_idr1) >= 4 ? ARCH_AA64 : ARCH_V8;
164 }
165 
cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params * params,ocsd_etmv4_cfg * config)166 static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
167 					     ocsd_etmv4_cfg *config)
168 {
169 	config->reg_configr = params->etmv4.reg_configr;
170 	config->reg_traceidr = params->etmv4.reg_traceidr;
171 	config->reg_idr0 = params->etmv4.reg_idr0;
172 	config->reg_idr1 = params->etmv4.reg_idr1;
173 	config->reg_idr2 = params->etmv4.reg_idr2;
174 	config->reg_idr8 = params->etmv4.reg_idr8;
175 	config->reg_idr9 = 0;
176 	config->reg_idr10 = 0;
177 	config->reg_idr11 = 0;
178 	config->reg_idr12 = 0;
179 	config->reg_idr13 = 0;
180 	config->arch_ver = cs_etm_decoder__get_etmv4_arch_ver(params->etmv4.reg_idr1);
181 	config->core_prof = profile_CortexA;
182 }
183 
cs_etm_decoder__gen_ete_config(struct cs_etm_trace_params * params,ocsd_ete_cfg * config)184 static void cs_etm_decoder__gen_ete_config(struct cs_etm_trace_params *params,
185 					   ocsd_ete_cfg *config)
186 {
187 	config->reg_configr = params->ete.reg_configr;
188 	config->reg_traceidr = params->ete.reg_traceidr;
189 	config->reg_idr0 = params->ete.reg_idr0;
190 	config->reg_idr1 = params->ete.reg_idr1;
191 	config->reg_idr2 = params->ete.reg_idr2;
192 	config->reg_idr8 = params->ete.reg_idr8;
193 	config->reg_devarch = params->ete.reg_devarch;
194 	config->arch_ver = ARCH_AA64;
195 	config->core_prof = profile_CortexA;
196 }
197 
cs_etm_decoder__print_str_cb(const void * p_context,const char * msg,const int str_len)198 static void cs_etm_decoder__print_str_cb(const void *p_context,
199 					 const char *msg,
200 					 const int str_len)
201 {
202 	const struct cs_etm_decoder *decoder = p_context;
203 
204 	if (p_context && str_len && !decoder->suppress_printing)
205 		decoder->packet_printer(msg, decoder->data);
206 }
207 
208 static int
cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params * d_params,struct cs_etm_decoder * decoder)209 cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
210 					 struct cs_etm_decoder *decoder)
211 {
212 	int ret = 0;
213 
214 	if (d_params->packet_printer == NULL)
215 		return -1;
216 
217 	decoder->packet_printer = d_params->packet_printer;
218 
219 	/*
220 	 * Set up a library default logger to process any printers
221 	 * (packet/raw frame) we add later.
222 	 */
223 	ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
224 	if (ret != 0)
225 		return -1;
226 
227 	/* no stdout / err / file output */
228 	ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
229 	if (ret != 0)
230 		return -1;
231 
232 	/*
233 	 * Set the string CB for the default logger, passes strings to
234 	 * perf print logger.
235 	 */
236 	ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
237 					      (void *)decoder,
238 					      cs_etm_decoder__print_str_cb);
239 	if (ret != 0)
240 		ret = -1;
241 
242 	return 0;
243 }
244 
245 #ifdef CS_LOG_RAW_FRAMES
246 static void
cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params * d_params,struct cs_etm_decoder * decoder)247 cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params,
248 				       struct cs_etm_decoder *decoder)
249 {
250 	/* Only log these during a --dump operation */
251 	if (d_params->operation == CS_ETM_OPERATION_PRINT) {
252 		/* set up a library default logger to process the
253 		 *  raw frame printer we add later
254 		 */
255 		ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
256 
257 		/* no stdout / err / file output */
258 		ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
259 
260 		/* set the string CB for the default logger,
261 		 * passes strings to perf print logger.
262 		 */
263 		ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
264 						(void *)decoder,
265 						cs_etm_decoder__print_str_cb);
266 
267 		/* use the built in library printer for the raw frames */
268 		ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
269 					      CS_RAW_DEBUG_FLAGS);
270 	}
271 }
272 #else
273 static void
cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params * d_params __maybe_unused,struct cs_etm_decoder * decoder __maybe_unused)274 cs_etm_decoder__init_raw_frame_logging(
275 		struct cs_etm_decoder_params *d_params __maybe_unused,
276 		struct cs_etm_decoder *decoder __maybe_unused)
277 {
278 }
279 #endif
280 
281 static ocsd_datapath_resp_t
cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue * etmq,struct cs_etm_packet_queue * packet_queue,const uint8_t trace_chan_id)282 cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
283 				  struct cs_etm_packet_queue *packet_queue,
284 				  const uint8_t trace_chan_id)
285 {
286 	u64 estimated_ts;
287 
288 	/* No timestamp packet has been received, nothing to do */
289 	if (!packet_queue->next_cs_timestamp)
290 		return OCSD_RESP_CONT;
291 
292 	estimated_ts = packet_queue->cs_timestamp +
293 			cs_etm_decoder__dec_instr_count_to_ns(&packet_queue->instr_count);
294 
295 	/* Estimated TS can never be higher than the next real one in the trace */
296 	packet_queue->cs_timestamp = min(packet_queue->next_cs_timestamp, estimated_ts);
297 
298 	/* Tell the front end which traceid_queue needs attention */
299 	cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
300 
301 	return OCSD_RESP_WAIT;
302 }
303 
304 static ocsd_datapath_resp_t
cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue * etmq,const ocsd_generic_trace_elem * elem,const uint8_t trace_chan_id,const ocsd_trc_index_t indx)305 cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
306 				  const ocsd_generic_trace_elem *elem,
307 				  const uint8_t trace_chan_id,
308 				  const ocsd_trc_index_t indx)
309 {
310 	struct cs_etm_packet_queue *packet_queue;
311 	u64 converted_timestamp;
312 	u64 estimated_first_ts;
313 
314 	/* First get the packet queue for this traceID */
315 	packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
316 	if (!packet_queue)
317 		return OCSD_RESP_FATAL_SYS_ERR;
318 
319 	/*
320 	 * Coresight timestamps are raw timer values which need to be scaled to ns. Assume
321 	 * 0 is a bad value so don't try to convert it.
322 	 */
323 	converted_timestamp = elem->timestamp ?
324 				cs_etm__convert_sample_time(etmq, elem->timestamp) : 0;
325 
326 	/*
327 	 * We've seen a timestamp packet before - simply record the new value.
328 	 * Function do_soft_timestamp() will report the value to the front end,
329 	 * hence asking the decoder to keep decoding rather than stopping.
330 	 */
331 	if (packet_queue->next_cs_timestamp) {
332 		/*
333 		 * What was next is now where new ranges start from, overwriting
334 		 * any previous estimate in cs_timestamp
335 		 */
336 		packet_queue->cs_timestamp = packet_queue->next_cs_timestamp;
337 		packet_queue->next_cs_timestamp = converted_timestamp;
338 		return OCSD_RESP_CONT;
339 	}
340 
341 	if (!converted_timestamp) {
342 		/*
343 		 * Zero timestamps can be seen due to misconfiguration or hardware bugs.
344 		 * Warn once, and don't try to subtract instr_count as it would result in an
345 		 * underflow.
346 		 */
347 		packet_queue->cs_timestamp = 0;
348 		if (!cs_etm__etmq_is_timeless(etmq))
349 			pr_warning_once("Zero Coresight timestamp found at Idx:%" OCSD_TRC_IDX_STR
350 					". Decoding may be improved by prepending 'Z' to your current --itrace arguments.\n",
351 					indx);
352 
353 	} else if (packet_queue->instr_count / INSTR_PER_NS > converted_timestamp) {
354 		/*
355 		 * Sanity check that the elem->timestamp - packet_queue->instr_count would not
356 		 * result in an underflow. Warn and clamp at 0 if it would.
357 		 */
358 		packet_queue->cs_timestamp = 0;
359 		pr_err("Timestamp calculation underflow at Idx:%" OCSD_TRC_IDX_STR "\n", indx);
360 	} else {
361 		/*
362 		 * This is the first timestamp we've seen since the beginning of traces
363 		 * or a discontinuity.  Since timestamps packets are generated *after*
364 		 * range packets have been generated, we need to estimate the time at
365 		 * which instructions started by subtracting the number of instructions
366 		 * executed to the timestamp. Don't estimate earlier than the last used
367 		 * timestamp though.
368 		 */
369 		estimated_first_ts = converted_timestamp -
370 					(packet_queue->instr_count / INSTR_PER_NS);
371 		packet_queue->cs_timestamp = max(packet_queue->cs_timestamp, estimated_first_ts);
372 	}
373 	packet_queue->next_cs_timestamp = converted_timestamp;
374 	packet_queue->instr_count = 0;
375 
376 	/* Tell the front end which traceid_queue needs attention */
377 	cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
378 
379 	/* Halt processing until we are being told to proceed */
380 	return OCSD_RESP_WAIT;
381 }
382 
383 static void
cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue * packet_queue)384 cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
385 {
386 	packet_queue->next_cs_timestamp = 0;
387 	packet_queue->instr_count = 0;
388 }
389 
390 static ocsd_datapath_resp_t
cs_etm_decoder__buffer_packet(struct cs_etm_queue * etmq,struct cs_etm_packet_queue * packet_queue,const u8 trace_chan_id,enum cs_etm_sample_type sample_type)391 cs_etm_decoder__buffer_packet(struct cs_etm_queue *etmq,
392 			      struct cs_etm_packet_queue *packet_queue,
393 			      const u8 trace_chan_id,
394 			      enum cs_etm_sample_type sample_type)
395 {
396 	u32 et = 0;
397 	int cpu;
398 
399 	if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1)
400 		return OCSD_RESP_FATAL_SYS_ERR;
401 
402 	if (cs_etm__get_cpu(etmq, trace_chan_id, &cpu) < 0)
403 		return OCSD_RESP_FATAL_SYS_ERR;
404 
405 	et = packet_queue->tail;
406 	et = (et + 1) & (CS_ETM_PACKET_MAX_BUFFER - 1);
407 	packet_queue->tail = et;
408 	packet_queue->packet_count++;
409 
410 	packet_queue->packet_buffer[et].sample_type = sample_type;
411 	packet_queue->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
412 	packet_queue->packet_buffer[et].cpu = cpu;
413 	packet_queue->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
414 	packet_queue->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
415 	packet_queue->packet_buffer[et].instr_count = 0;
416 	packet_queue->packet_buffer[et].last_instr_taken_branch = false;
417 	packet_queue->packet_buffer[et].last_instr_size = 0;
418 	packet_queue->packet_buffer[et].last_instr_type = 0;
419 	packet_queue->packet_buffer[et].last_instr_subtype = 0;
420 	packet_queue->packet_buffer[et].last_instr_cond = 0;
421 	packet_queue->packet_buffer[et].flags = 0;
422 	packet_queue->packet_buffer[et].exception_number = UINT32_MAX;
423 	packet_queue->packet_buffer[et].trace_chan_id = trace_chan_id;
424 
425 	if (packet_queue->packet_count == CS_ETM_PACKET_MAX_BUFFER - 1)
426 		return OCSD_RESP_WAIT;
427 
428 	return OCSD_RESP_CONT;
429 }
430 
431 static ocsd_datapath_resp_t
cs_etm_decoder__buffer_range(struct cs_etm_queue * etmq,struct cs_etm_packet_queue * packet_queue,const ocsd_generic_trace_elem * elem,const uint8_t trace_chan_id)432 cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
433 			     struct cs_etm_packet_queue *packet_queue,
434 			     const ocsd_generic_trace_elem *elem,
435 			     const uint8_t trace_chan_id)
436 {
437 	int ret = 0;
438 	struct cs_etm_packet *packet;
439 
440 	ret = cs_etm_decoder__buffer_packet(etmq, packet_queue, trace_chan_id,
441 					    CS_ETM_RANGE);
442 	if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
443 		return ret;
444 
445 	packet = &packet_queue->packet_buffer[packet_queue->tail];
446 
447 	switch (elem->isa) {
448 	case ocsd_isa_aarch64:
449 		packet->isa = CS_ETM_ISA_A64;
450 		break;
451 	case ocsd_isa_arm:
452 		packet->isa = CS_ETM_ISA_A32;
453 		break;
454 	case ocsd_isa_thumb2:
455 		packet->isa = CS_ETM_ISA_T32;
456 		break;
457 	case ocsd_isa_tee:
458 	case ocsd_isa_jazelle:
459 	case ocsd_isa_custom:
460 	case ocsd_isa_unknown:
461 	default:
462 		packet->isa = CS_ETM_ISA_UNKNOWN;
463 	}
464 
465 	packet->start_addr = elem->st_addr;
466 	packet->end_addr = elem->en_addr;
467 	packet->instr_count = elem->num_instr_range;
468 	packet->last_instr_type = elem->last_i_type;
469 	packet->last_instr_subtype = elem->last_i_subtype;
470 	packet->last_instr_cond = elem->last_instr_cond;
471 
472 	if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT)
473 		packet->last_instr_taken_branch = elem->last_instr_exec;
474 	else
475 		packet->last_instr_taken_branch = false;
476 
477 	packet->last_instr_size = elem->last_instr_sz;
478 
479 	/* per-thread scenario, no need to generate a timestamp */
480 	if (cs_etm__etmq_is_timeless(etmq))
481 		goto out;
482 
483 	/*
484 	 * The packet queue is full and we haven't seen a timestamp (had we
485 	 * seen one the packet queue wouldn't be full).  Let the front end
486 	 * deal with it.
487 	 */
488 	if (ret == OCSD_RESP_WAIT)
489 		goto out;
490 
491 	packet_queue->instr_count += elem->num_instr_range;
492 	/* Tell the front end we have a new timestamp to process */
493 	ret = cs_etm_decoder__do_soft_timestamp(etmq, packet_queue,
494 						trace_chan_id);
495 out:
496 	return ret;
497 }
498 
499 static ocsd_datapath_resp_t
cs_etm_decoder__buffer_discontinuity(struct cs_etm_queue * etmq,struct cs_etm_packet_queue * queue,const uint8_t trace_chan_id)500 cs_etm_decoder__buffer_discontinuity(struct cs_etm_queue *etmq,
501 				     struct cs_etm_packet_queue *queue,
502 				     const uint8_t trace_chan_id)
503 {
504 	/*
505 	 * Something happened and who knows when we'll get new traces so
506 	 * reset time statistics.
507 	 */
508 	cs_etm_decoder__reset_timestamp(queue);
509 	return cs_etm_decoder__buffer_packet(etmq, queue, trace_chan_id,
510 					     CS_ETM_DISCONTINUITY);
511 }
512 
513 static ocsd_datapath_resp_t
cs_etm_decoder__buffer_exception(struct cs_etm_queue * etmq,struct cs_etm_packet_queue * queue,const ocsd_generic_trace_elem * elem,const uint8_t trace_chan_id)514 cs_etm_decoder__buffer_exception(struct cs_etm_queue *etmq,
515 				 struct cs_etm_packet_queue *queue,
516 				 const ocsd_generic_trace_elem *elem,
517 				 const uint8_t trace_chan_id)
518 {	int ret = 0;
519 	struct cs_etm_packet *packet;
520 
521 	ret = cs_etm_decoder__buffer_packet(etmq, queue, trace_chan_id,
522 					    CS_ETM_EXCEPTION);
523 	if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
524 		return ret;
525 
526 	packet = &queue->packet_buffer[queue->tail];
527 	packet->exception_number = elem->exception_number;
528 
529 	return ret;
530 }
531 
532 static ocsd_datapath_resp_t
cs_etm_decoder__buffer_exception_ret(struct cs_etm_queue * etmq,struct cs_etm_packet_queue * queue,const uint8_t trace_chan_id)533 cs_etm_decoder__buffer_exception_ret(struct cs_etm_queue *etmq,
534 				     struct cs_etm_packet_queue *queue,
535 				     const uint8_t trace_chan_id)
536 {
537 	return cs_etm_decoder__buffer_packet(etmq, queue, trace_chan_id,
538 					     CS_ETM_EXCEPTION_RET);
539 }
540 
541 static ocsd_datapath_resp_t
cs_etm_decoder__set_tid(struct cs_etm_queue * etmq,struct cs_etm_packet_queue * packet_queue,const ocsd_generic_trace_elem * elem,const uint8_t trace_chan_id)542 cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
543 			struct cs_etm_packet_queue *packet_queue,
544 			const ocsd_generic_trace_elem *elem,
545 			const uint8_t trace_chan_id)
546 {
547 	pid_t tid = -1;
548 
549 	/*
550 	 * Process the PE_CONTEXT packets if we have a valid contextID or VMID.
551 	 * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
552 	 * as VMID, Bit ETM_OPT_CTXTID2 is set in this case.
553 	 */
554 	switch (cs_etm__get_pid_fmt(etmq)) {
555 	case CS_ETM_PIDFMT_CTXTID:
556 		if (elem->context.ctxt_id_valid)
557 			tid = elem->context.context_id;
558 		break;
559 	case CS_ETM_PIDFMT_CTXTID2:
560 		if (elem->context.vmid_valid)
561 			tid = elem->context.vmid;
562 		break;
563 	case CS_ETM_PIDFMT_NONE:
564 	default:
565 		break;
566 	}
567 
568 	if (cs_etm__etmq_set_tid_el(etmq, tid, trace_chan_id,
569 				    elem->context.exception_level))
570 		return OCSD_RESP_FATAL_SYS_ERR;
571 
572 	if (tid == -1)
573 		return OCSD_RESP_CONT;
574 
575 	/*
576 	 * A timestamp is generated after a PE_CONTEXT element so make sure
577 	 * to rely on that coming one.
578 	 */
579 	cs_etm_decoder__reset_timestamp(packet_queue);
580 
581 	return OCSD_RESP_CONT;
582 }
583 
cs_etm_decoder__gen_trace_elem_printer(const void * context,const ocsd_trc_index_t indx,const u8 trace_chan_id __maybe_unused,const ocsd_generic_trace_elem * elem)584 static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
585 				const void *context,
586 				const ocsd_trc_index_t indx,
587 				const u8 trace_chan_id __maybe_unused,
588 				const ocsd_generic_trace_elem *elem)
589 {
590 	ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
591 	struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
592 	struct cs_etm_queue *etmq = decoder->data;
593 	struct cs_etm_packet_queue *packet_queue;
594 
595 	/* First get the packet queue for this traceID */
596 	packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
597 	if (!packet_queue)
598 		return OCSD_RESP_FATAL_SYS_ERR;
599 
600 	switch (elem->elem_type) {
601 	case OCSD_GEN_TRC_ELEM_UNKNOWN:
602 		break;
603 	case OCSD_GEN_TRC_ELEM_EO_TRACE:
604 	case OCSD_GEN_TRC_ELEM_NO_SYNC:
605 	case OCSD_GEN_TRC_ELEM_TRACE_ON:
606 		resp = cs_etm_decoder__buffer_discontinuity(etmq, packet_queue,
607 							    trace_chan_id);
608 		break;
609 	case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
610 		resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem,
611 						    trace_chan_id);
612 		break;
613 	case OCSD_GEN_TRC_ELEM_EXCEPTION:
614 		resp = cs_etm_decoder__buffer_exception(etmq, packet_queue, elem,
615 							trace_chan_id);
616 		break;
617 	case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
618 		resp = cs_etm_decoder__buffer_exception_ret(etmq, packet_queue,
619 							    trace_chan_id);
620 		break;
621 	case OCSD_GEN_TRC_ELEM_TIMESTAMP:
622 		resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
623 							 trace_chan_id,
624 							 indx);
625 		break;
626 	case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
627 		resp = cs_etm_decoder__set_tid(etmq, packet_queue,
628 					       elem, trace_chan_id);
629 		break;
630 	/* Unused packet types */
631 	case OCSD_GEN_TRC_ELEM_I_RANGE_NOPATH:
632 	case OCSD_GEN_TRC_ELEM_ADDR_NACC:
633 	case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
634 	case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
635 	case OCSD_GEN_TRC_ELEM_EVENT:
636 	case OCSD_GEN_TRC_ELEM_SWTRACE:
637 	case OCSD_GEN_TRC_ELEM_CUSTOM:
638 	case OCSD_GEN_TRC_ELEM_SYNC_MARKER:
639 	case OCSD_GEN_TRC_ELEM_MEMTRANS:
640 #if (OCSD_VER_NUM >= 0x010400)
641 	case OCSD_GEN_TRC_ELEM_INSTRUMENTATION:
642 #endif
643 	default:
644 		break;
645 	}
646 
647 	return resp;
648 }
649 
650 static int
cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params * d_params,struct cs_etm_trace_params * t_params,struct cs_etm_decoder * decoder)651 cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
652 				   struct cs_etm_trace_params *t_params,
653 				   struct cs_etm_decoder *decoder)
654 {
655 	ocsd_etmv3_cfg config_etmv3;
656 	ocsd_etmv4_cfg trace_config_etmv4;
657 	ocsd_ete_cfg trace_config_ete;
658 	void *trace_config;
659 	u8 csid;
660 
661 	switch (t_params->protocol) {
662 	case CS_ETM_PROTO_ETMV3:
663 	case CS_ETM_PROTO_PTM:
664 		csid = (t_params->etmv3.reg_idr & CORESIGHT_TRACE_ID_VAL_MASK);
665 		cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
666 		decoder->decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
667 							OCSD_BUILTIN_DCD_ETMV3 :
668 							OCSD_BUILTIN_DCD_PTM;
669 		trace_config = &config_etmv3;
670 		break;
671 	case CS_ETM_PROTO_ETMV4i:
672 		csid = (t_params->etmv4.reg_traceidr & CORESIGHT_TRACE_ID_VAL_MASK);
673 		cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
674 		decoder->decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
675 		trace_config = &trace_config_etmv4;
676 		break;
677 	case CS_ETM_PROTO_ETE:
678 		csid = (t_params->ete.reg_traceidr & CORESIGHT_TRACE_ID_VAL_MASK);
679 		cs_etm_decoder__gen_ete_config(t_params, &trace_config_ete);
680 		decoder->decoder_name = OCSD_BUILTIN_DCD_ETE;
681 		trace_config = &trace_config_ete;
682 		break;
683 	default:
684 		return -1;
685 	}
686 
687 	if (d_params->operation == CS_ETM_OPERATION_DECODE) {
688 		if (ocsd_dt_create_decoder(decoder->dcd_tree,
689 					   decoder->decoder_name,
690 					   OCSD_CREATE_FLG_FULL_DECODER,
691 					   trace_config, &csid))
692 			return -1;
693 
694 		if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
695 					       cs_etm_decoder__gen_trace_elem_printer,
696 					       decoder))
697 			return -1;
698 
699 		return 0;
700 	} else if (d_params->operation == CS_ETM_OPERATION_PRINT) {
701 		if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder->decoder_name,
702 					   OCSD_CREATE_FLG_PACKET_PROC,
703 					   trace_config, &csid))
704 			return -1;
705 
706 		if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0))
707 			return -1;
708 
709 		return 0;
710 	}
711 
712 	return -1;
713 }
714 
715 struct cs_etm_decoder *
cs_etm_decoder__new(int decoders,struct cs_etm_decoder_params * d_params,struct cs_etm_trace_params t_params[])716 cs_etm_decoder__new(int decoders, struct cs_etm_decoder_params *d_params,
717 		    struct cs_etm_trace_params t_params[])
718 {
719 	struct cs_etm_decoder *decoder;
720 	ocsd_dcd_tree_src_t format;
721 	u32 flags;
722 	int i, ret;
723 
724 	if ((!t_params) || (!d_params))
725 		return NULL;
726 
727 	decoder = zalloc(sizeof(*decoder));
728 
729 	if (!decoder)
730 		return NULL;
731 
732 	decoder->data = d_params->data;
733 	decoder->prev_return = OCSD_RESP_CONT;
734 	format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
735 					 OCSD_TRC_SRC_SINGLE);
736 	flags = 0;
737 	flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
738 	flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
739 	flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
740 
741 	/*
742 	 * Drivers may add barrier frames when used with perf, set up to
743 	 * handle this. Barriers const of FSYNC packet repeated 4 times.
744 	 */
745 	flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC;
746 
747 	/* Create decode tree for the data source */
748 	decoder->dcd_tree = ocsd_create_dcd_tree(format, flags);
749 
750 	if (decoder->dcd_tree == 0)
751 		goto err_free_decoder;
752 
753 	/* init library print logging support */
754 	ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
755 	if (ret != 0)
756 		goto err_free_decoder;
757 
758 	/* init raw frame logging if required */
759 	cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
760 
761 	for (i = 0; i < decoders; i++) {
762 		ret = cs_etm_decoder__create_etm_decoder(d_params,
763 							 &t_params[i],
764 							 decoder);
765 		if (ret != 0)
766 			goto err_free_decoder;
767 	}
768 
769 	return decoder;
770 
771 err_free_decoder:
772 	cs_etm_decoder__free(decoder);
773 	return NULL;
774 }
775 
cs_etm_decoder__process_data_block(struct cs_etm_decoder * decoder,u64 indx,const u8 * buf,size_t len,size_t * consumed)776 int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
777 				       u64 indx, const u8 *buf,
778 				       size_t len, size_t *consumed)
779 {
780 	int ret = 0;
781 	ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
782 	ocsd_datapath_resp_t prev_return = decoder->prev_return;
783 	size_t processed = 0;
784 	u32 count;
785 
786 	while (processed < len) {
787 		if (OCSD_DATA_RESP_IS_WAIT(prev_return)) {
788 			cur = ocsd_dt_process_data(decoder->dcd_tree,
789 						   OCSD_OP_FLUSH,
790 						   0,
791 						   0,
792 						   NULL,
793 						   NULL);
794 		} else if (OCSD_DATA_RESP_IS_CONT(prev_return)) {
795 			cur = ocsd_dt_process_data(decoder->dcd_tree,
796 						   OCSD_OP_DATA,
797 						   indx + processed,
798 						   len - processed,
799 						   &buf[processed],
800 						   &count);
801 			processed += count;
802 		} else {
803 			ret = -EINVAL;
804 			break;
805 		}
806 
807 		/*
808 		 * Return to the input code if the packet buffer is full.
809 		 * Flushing will get done once the packet buffer has been
810 		 * processed.
811 		 */
812 		if (OCSD_DATA_RESP_IS_WAIT(cur))
813 			break;
814 
815 		prev_return = cur;
816 	}
817 
818 	decoder->prev_return = cur;
819 	*consumed = processed;
820 
821 	return ret;
822 }
823 
cs_etm_decoder__free(struct cs_etm_decoder * decoder)824 void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
825 {
826 	if (!decoder)
827 		return;
828 
829 	ocsd_destroy_dcd_tree(decoder->dcd_tree);
830 	decoder->dcd_tree = NULL;
831 	free(decoder);
832 }
833 
cs_etm_decoder__get_name(struct cs_etm_decoder * decoder)834 const char *cs_etm_decoder__get_name(struct cs_etm_decoder *decoder)
835 {
836 	return decoder->decoder_name;
837 }
838