1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Memory mapped I/O tracing
4   *
5   * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
6   */
7  
8  #include <linux/kernel.h>
9  #include <linux/mmiotrace.h>
10  #include <linux/pci.h>
11  #include <linux/slab.h>
12  #include <linux/time.h>
13  
14  #include <linux/atomic.h>
15  
16  #include "trace.h"
17  #include "trace_output.h"
18  
19  struct header_iter {
20  	struct pci_dev *dev;
21  };
22  
23  static struct trace_array *mmio_trace_array;
24  static bool overrun_detected;
25  static unsigned long prev_overruns;
26  static atomic_t dropped_count;
27  
mmio_reset_data(struct trace_array * tr)28  static void mmio_reset_data(struct trace_array *tr)
29  {
30  	overrun_detected = false;
31  	prev_overruns = 0;
32  
33  	tracing_reset_online_cpus(&tr->array_buffer);
34  }
35  
mmio_trace_init(struct trace_array * tr)36  static int mmio_trace_init(struct trace_array *tr)
37  {
38  	pr_debug("in %s\n", __func__);
39  	mmio_trace_array = tr;
40  
41  	mmio_reset_data(tr);
42  	enable_mmiotrace();
43  	return 0;
44  }
45  
mmio_trace_reset(struct trace_array * tr)46  static void mmio_trace_reset(struct trace_array *tr)
47  {
48  	pr_debug("in %s\n", __func__);
49  
50  	disable_mmiotrace();
51  	mmio_reset_data(tr);
52  	mmio_trace_array = NULL;
53  }
54  
mmio_trace_start(struct trace_array * tr)55  static void mmio_trace_start(struct trace_array *tr)
56  {
57  	pr_debug("in %s\n", __func__);
58  	mmio_reset_data(tr);
59  }
60  
mmio_print_pcidev(struct trace_seq * s,const struct pci_dev * dev)61  static void mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
62  {
63  	int i;
64  	resource_size_t start, end;
65  	const struct pci_driver *drv = pci_dev_driver(dev);
66  
67  	trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
68  			 dev->bus->number, dev->devfn,
69  			 dev->vendor, dev->device, dev->irq);
70  	for (i = 0; i < 7; i++) {
71  		start = dev->resource[i].start;
72  		trace_seq_printf(s, " %llx",
73  			(unsigned long long)(start |
74  			(dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
75  	}
76  	for (i = 0; i < 7; i++) {
77  		start = dev->resource[i].start;
78  		end = dev->resource[i].end;
79  		trace_seq_printf(s, " %llx",
80  			dev->resource[i].start < dev->resource[i].end ?
81  			(unsigned long long)(end - start) + 1 : 0);
82  	}
83  	if (drv)
84  		trace_seq_printf(s, " %s\n", drv->name);
85  	else
86  		trace_seq_puts(s, " \n");
87  }
88  
destroy_header_iter(struct header_iter * hiter)89  static void destroy_header_iter(struct header_iter *hiter)
90  {
91  	if (!hiter)
92  		return;
93  	pci_dev_put(hiter->dev);
94  	kfree(hiter);
95  }
96  
mmio_pipe_open(struct trace_iterator * iter)97  static void mmio_pipe_open(struct trace_iterator *iter)
98  {
99  	struct header_iter *hiter;
100  	struct trace_seq *s = &iter->seq;
101  
102  	trace_seq_puts(s, "VERSION 20070824\n");
103  
104  	hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
105  	if (!hiter)
106  		return;
107  
108  	hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
109  	iter->private = hiter;
110  }
111  
112  /* XXX: This is not called when the pipe is closed! */
mmio_close(struct trace_iterator * iter)113  static void mmio_close(struct trace_iterator *iter)
114  {
115  	struct header_iter *hiter = iter->private;
116  	destroy_header_iter(hiter);
117  	iter->private = NULL;
118  }
119  
count_overruns(struct trace_iterator * iter)120  static unsigned long count_overruns(struct trace_iterator *iter)
121  {
122  	unsigned long cnt = atomic_xchg(&dropped_count, 0);
123  	unsigned long over = ring_buffer_overruns(iter->array_buffer->buffer);
124  
125  	if (over > prev_overruns)
126  		cnt += over - prev_overruns;
127  	prev_overruns = over;
128  	return cnt;
129  }
130  
mmio_read(struct trace_iterator * iter,struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)131  static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
132  				char __user *ubuf, size_t cnt, loff_t *ppos)
133  {
134  	ssize_t ret;
135  	struct header_iter *hiter = iter->private;
136  	struct trace_seq *s = &iter->seq;
137  	unsigned long n;
138  
139  	n = count_overruns(iter);
140  	if (n) {
141  		/* XXX: This is later than where events were lost. */
142  		trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n);
143  		if (!overrun_detected)
144  			pr_warn("mmiotrace has lost events\n");
145  		overrun_detected = true;
146  		goto print_out;
147  	}
148  
149  	if (!hiter)
150  		return 0;
151  
152  	mmio_print_pcidev(s, hiter->dev);
153  	hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev);
154  
155  	if (!hiter->dev) {
156  		destroy_header_iter(hiter);
157  		iter->private = NULL;
158  	}
159  
160  print_out:
161  	ret = trace_seq_to_user(s, ubuf, cnt);
162  	return (ret == -EBUSY) ? 0 : ret;
163  }
164  
mmio_print_rw(struct trace_iterator * iter)165  static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
166  {
167  	struct trace_entry *entry = iter->ent;
168  	struct trace_mmiotrace_rw *field;
169  	struct mmiotrace_rw *rw;
170  	struct trace_seq *s	= &iter->seq;
171  	unsigned long long t	= ns2usecs(iter->ts);
172  	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
173  	unsigned secs		= (unsigned long)t;
174  
175  	trace_assign_type(field, entry);
176  	rw = &field->rw;
177  
178  	switch (rw->opcode) {
179  	case MMIO_READ:
180  		trace_seq_printf(s,
181  			"R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
182  			rw->width, secs, usec_rem, rw->map_id,
183  			(unsigned long long)rw->phys,
184  			rw->value, rw->pc, 0);
185  		break;
186  	case MMIO_WRITE:
187  		trace_seq_printf(s,
188  			"W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
189  			rw->width, secs, usec_rem, rw->map_id,
190  			(unsigned long long)rw->phys,
191  			rw->value, rw->pc, 0);
192  		break;
193  	case MMIO_UNKNOWN_OP:
194  		trace_seq_printf(s,
195  			"UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
196  			"%02lx 0x%lx %d\n",
197  			secs, usec_rem, rw->map_id,
198  			(unsigned long long)rw->phys,
199  			(rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
200  			(rw->value >> 0) & 0xff, rw->pc, 0);
201  		break;
202  	default:
203  		trace_seq_puts(s, "rw what?\n");
204  		break;
205  	}
206  
207  	return trace_handle_return(s);
208  }
209  
mmio_print_map(struct trace_iterator * iter)210  static enum print_line_t mmio_print_map(struct trace_iterator *iter)
211  {
212  	struct trace_entry *entry = iter->ent;
213  	struct trace_mmiotrace_map *field;
214  	struct mmiotrace_map *m;
215  	struct trace_seq *s	= &iter->seq;
216  	unsigned long long t	= ns2usecs(iter->ts);
217  	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
218  	unsigned secs		= (unsigned long)t;
219  
220  	trace_assign_type(field, entry);
221  	m = &field->map;
222  
223  	switch (m->opcode) {
224  	case MMIO_PROBE:
225  		trace_seq_printf(s,
226  			"MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
227  			secs, usec_rem, m->map_id,
228  			(unsigned long long)m->phys, m->virt, m->len,
229  			0UL, 0);
230  		break;
231  	case MMIO_UNPROBE:
232  		trace_seq_printf(s,
233  			"UNMAP %u.%06lu %d 0x%lx %d\n",
234  			secs, usec_rem, m->map_id, 0UL, 0);
235  		break;
236  	default:
237  		trace_seq_puts(s, "map what?\n");
238  		break;
239  	}
240  
241  	return trace_handle_return(s);
242  }
243  
mmio_print_mark(struct trace_iterator * iter)244  static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
245  {
246  	struct trace_entry *entry = iter->ent;
247  	struct print_entry *print = (struct print_entry *)entry;
248  	const char *msg		= print->buf;
249  	struct trace_seq *s	= &iter->seq;
250  	unsigned long long t	= ns2usecs(iter->ts);
251  	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
252  	unsigned secs		= (unsigned long)t;
253  
254  	/* The trailing newline must be in the message. */
255  	trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg);
256  
257  	return trace_handle_return(s);
258  }
259  
mmio_print_line(struct trace_iterator * iter)260  static enum print_line_t mmio_print_line(struct trace_iterator *iter)
261  {
262  	switch (iter->ent->type) {
263  	case TRACE_MMIO_RW:
264  		return mmio_print_rw(iter);
265  	case TRACE_MMIO_MAP:
266  		return mmio_print_map(iter);
267  	case TRACE_PRINT:
268  		return mmio_print_mark(iter);
269  	default:
270  		return TRACE_TYPE_HANDLED; /* ignore unknown entries */
271  	}
272  }
273  
274  static struct tracer mmio_tracer __read_mostly =
275  {
276  	.name		= "mmiotrace",
277  	.init		= mmio_trace_init,
278  	.reset		= mmio_trace_reset,
279  	.start		= mmio_trace_start,
280  	.pipe_open	= mmio_pipe_open,
281  	.close		= mmio_close,
282  	.read		= mmio_read,
283  	.print_line	= mmio_print_line,
284  	.noboot		= true,
285  };
286  
init_mmio_trace(void)287  __init static int init_mmio_trace(void)
288  {
289  	return register_tracer(&mmio_tracer);
290  }
291  device_initcall(init_mmio_trace);
292  
__trace_mmiotrace_rw(struct trace_array * tr,struct trace_array_cpu * data,struct mmiotrace_rw * rw)293  static void __trace_mmiotrace_rw(struct trace_array *tr,
294  				struct trace_array_cpu *data,
295  				struct mmiotrace_rw *rw)
296  {
297  	struct trace_event_call *call = &event_mmiotrace_rw;
298  	struct trace_buffer *buffer = tr->array_buffer.buffer;
299  	struct ring_buffer_event *event;
300  	struct trace_mmiotrace_rw *entry;
301  	unsigned int trace_ctx;
302  
303  	trace_ctx = tracing_gen_ctx_flags(0);
304  	event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
305  					  sizeof(*entry), trace_ctx);
306  	if (!event) {
307  		atomic_inc(&dropped_count);
308  		return;
309  	}
310  	entry	= ring_buffer_event_data(event);
311  	entry->rw			= *rw;
312  
313  	if (!call_filter_check_discard(call, entry, buffer, event))
314  		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
315  }
316  
mmio_trace_rw(struct mmiotrace_rw * rw)317  void mmio_trace_rw(struct mmiotrace_rw *rw)
318  {
319  	struct trace_array *tr = mmio_trace_array;
320  	struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
321  	__trace_mmiotrace_rw(tr, data, rw);
322  }
323  
__trace_mmiotrace_map(struct trace_array * tr,struct trace_array_cpu * data,struct mmiotrace_map * map)324  static void __trace_mmiotrace_map(struct trace_array *tr,
325  				struct trace_array_cpu *data,
326  				struct mmiotrace_map *map)
327  {
328  	struct trace_event_call *call = &event_mmiotrace_map;
329  	struct trace_buffer *buffer = tr->array_buffer.buffer;
330  	struct ring_buffer_event *event;
331  	struct trace_mmiotrace_map *entry;
332  	unsigned int trace_ctx;
333  
334  	trace_ctx = tracing_gen_ctx_flags(0);
335  	event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
336  					  sizeof(*entry), trace_ctx);
337  	if (!event) {
338  		atomic_inc(&dropped_count);
339  		return;
340  	}
341  	entry	= ring_buffer_event_data(event);
342  	entry->map			= *map;
343  
344  	if (!call_filter_check_discard(call, entry, buffer, event))
345  		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
346  }
347  
mmio_trace_mapping(struct mmiotrace_map * map)348  void mmio_trace_mapping(struct mmiotrace_map *map)
349  {
350  	struct trace_array *tr = mmio_trace_array;
351  	struct trace_array_cpu *data;
352  
353  	preempt_disable();
354  	data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
355  	__trace_mmiotrace_map(tr, data, map);
356  	preempt_enable();
357  }
358  
mmio_trace_printk(const char * fmt,va_list args)359  int mmio_trace_printk(const char *fmt, va_list args)
360  {
361  	return trace_vprintk(0, fmt, args);
362  }
363