1  // SPDX-License-Identifier: GPL-2.0
2  
3  #include <linux/jiffies.h>
4  #include <linux/module.h>
5  #include <linux/percpu.h>
6  #include <linux/preempt.h>
7  #include <linux/time.h>
8  #include <linux/spinlock.h>
9  
10  #include "eytzinger.h"
11  #include "time_stats.h"
12  
13  static const struct time_unit time_units[] = {
14  	{ "ns",		1		 },
15  	{ "us",		NSEC_PER_USEC	 },
16  	{ "ms",		NSEC_PER_MSEC	 },
17  	{ "s",		NSEC_PER_SEC	 },
18  	{ "m",          (u64) NSEC_PER_SEC * 60},
19  	{ "h",          (u64) NSEC_PER_SEC * 3600},
20  	{ "d",          (u64) NSEC_PER_SEC * 3600 * 24},
21  	{ "w",          (u64) NSEC_PER_SEC * 3600 * 24 * 7},
22  	{ "y",          (u64) NSEC_PER_SEC * ((3600 * 24 * 7 * 365) + (3600 * (24 / 4) * 7))}, /* 365.25d */
23  	{ "eon",        U64_MAX          },
24  };
25  
bch2_pick_time_units(u64 ns)26  const struct time_unit *bch2_pick_time_units(u64 ns)
27  {
28  	const struct time_unit *u;
29  
30  	for (u = time_units;
31  	     u + 1 < time_units + ARRAY_SIZE(time_units) &&
32  	     ns >= u[1].nsecs << 1;
33  	     u++)
34  		;
35  
36  	return u;
37  }
38  
quantiles_update(struct quantiles * q,u64 v)39  static void quantiles_update(struct quantiles *q, u64 v)
40  {
41  	unsigned i = 0;
42  
43  	while (i < ARRAY_SIZE(q->entries)) {
44  		struct quantile_entry *e = q->entries + i;
45  
46  		if (unlikely(!e->step)) {
47  			e->m = v;
48  			e->step = max_t(unsigned, v / 2, 1024);
49  		} else if (e->m > v) {
50  			e->m = e->m >= e->step
51  				? e->m - e->step
52  				: 0;
53  		} else if (e->m < v) {
54  			e->m = e->m + e->step > e->m
55  				? e->m + e->step
56  				: U32_MAX;
57  		}
58  
59  		if ((e->m > v ? e->m - v : v - e->m) < e->step)
60  			e->step = max_t(unsigned, e->step / 2, 1);
61  
62  		if (v >= e->m)
63  			break;
64  
65  		i = eytzinger0_child(i, v > e->m);
66  	}
67  }
68  
time_stats_update_one(struct bch2_time_stats * stats,u64 start,u64 end)69  static inline void time_stats_update_one(struct bch2_time_stats *stats,
70  					      u64 start, u64 end)
71  {
72  	u64 duration, freq;
73  	bool initted = stats->last_event != 0;
74  
75  	if (time_after64(end, start)) {
76  		struct quantiles *quantiles = time_stats_to_quantiles(stats);
77  
78  		duration = end - start;
79  		mean_and_variance_update(&stats->duration_stats, duration);
80  		mean_and_variance_weighted_update(&stats->duration_stats_weighted,
81  				duration, initted, TIME_STATS_MV_WEIGHT);
82  		stats->max_duration = max(stats->max_duration, duration);
83  		stats->min_duration = min(stats->min_duration, duration);
84  		stats->total_duration += duration;
85  
86  		if (quantiles)
87  			quantiles_update(quantiles, duration);
88  	}
89  
90  	if (stats->last_event && time_after64(end, stats->last_event)) {
91  		freq = end - stats->last_event;
92  		mean_and_variance_update(&stats->freq_stats, freq);
93  		mean_and_variance_weighted_update(&stats->freq_stats_weighted,
94  				freq, initted, TIME_STATS_MV_WEIGHT);
95  		stats->max_freq = max(stats->max_freq, freq);
96  		stats->min_freq = min(stats->min_freq, freq);
97  	}
98  
99  	stats->last_event = end;
100  }
101  
__bch2_time_stats_clear_buffer(struct bch2_time_stats * stats,struct time_stat_buffer * b)102  void __bch2_time_stats_clear_buffer(struct bch2_time_stats *stats,
103  				    struct time_stat_buffer *b)
104  {
105  	for (struct time_stat_buffer_entry *i = b->entries;
106  	     i < b->entries + ARRAY_SIZE(b->entries);
107  	     i++)
108  		time_stats_update_one(stats, i->start, i->end);
109  	b->nr = 0;
110  }
111  
time_stats_clear_buffer(struct bch2_time_stats * stats,struct time_stat_buffer * b)112  static noinline void time_stats_clear_buffer(struct bch2_time_stats *stats,
113  					     struct time_stat_buffer *b)
114  {
115  	unsigned long flags;
116  
117  	spin_lock_irqsave(&stats->lock, flags);
118  	__bch2_time_stats_clear_buffer(stats, b);
119  	spin_unlock_irqrestore(&stats->lock, flags);
120  }
121  
__bch2_time_stats_update(struct bch2_time_stats * stats,u64 start,u64 end)122  void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
123  {
124  	unsigned long flags;
125  
126  	if (!stats->buffer) {
127  		spin_lock_irqsave(&stats->lock, flags);
128  		time_stats_update_one(stats, start, end);
129  
130  		if (mean_and_variance_weighted_get_mean(stats->freq_stats_weighted, TIME_STATS_MV_WEIGHT) < 32 &&
131  		    stats->duration_stats.n > 1024)
132  			stats->buffer =
133  				alloc_percpu_gfp(struct time_stat_buffer,
134  						 GFP_ATOMIC);
135  		spin_unlock_irqrestore(&stats->lock, flags);
136  	} else {
137  		struct time_stat_buffer *b;
138  
139  		preempt_disable();
140  		b = this_cpu_ptr(stats->buffer);
141  
142  		BUG_ON(b->nr >= ARRAY_SIZE(b->entries));
143  		b->entries[b->nr++] = (struct time_stat_buffer_entry) {
144  			.start = start,
145  			.end = end
146  		};
147  
148  		if (unlikely(b->nr == ARRAY_SIZE(b->entries)))
149  			time_stats_clear_buffer(stats, b);
150  		preempt_enable();
151  	}
152  }
153  
bch2_time_stats_reset(struct bch2_time_stats * stats)154  void bch2_time_stats_reset(struct bch2_time_stats *stats)
155  {
156  	spin_lock_irq(&stats->lock);
157  	unsigned offset = offsetof(struct bch2_time_stats, min_duration);
158  	memset((void *) stats + offset, 0, sizeof(*stats) - offset);
159  
160  	if (stats->buffer) {
161  		int cpu;
162  		for_each_possible_cpu(cpu)
163  			per_cpu_ptr(stats->buffer, cpu)->nr = 0;
164  	}
165  	spin_unlock_irq(&stats->lock);
166  }
167  
bch2_time_stats_exit(struct bch2_time_stats * stats)168  void bch2_time_stats_exit(struct bch2_time_stats *stats)
169  {
170  	free_percpu(stats->buffer);
171  }
172  
bch2_time_stats_init(struct bch2_time_stats * stats)173  void bch2_time_stats_init(struct bch2_time_stats *stats)
174  {
175  	memset(stats, 0, sizeof(*stats));
176  	stats->min_duration = U64_MAX;
177  	stats->min_freq = U64_MAX;
178  	spin_lock_init(&stats->lock);
179  }
180