1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Block stat tracking code
4   *
5   * Copyright (C) 2016 Jens Axboe
6   */
7  #include <linux/kernel.h>
8  #include <linux/rculist.h>
9  
10  #include "blk-stat.h"
11  #include "blk-mq.h"
12  #include "blk.h"
13  
14  struct blk_queue_stats {
15  	struct list_head callbacks;
16  	spinlock_t lock;
17  	int accounting;
18  };
19  
blk_rq_stat_init(struct blk_rq_stat * stat)20  void blk_rq_stat_init(struct blk_rq_stat *stat)
21  {
22  	stat->min = -1ULL;
23  	stat->max = stat->nr_samples = stat->mean = 0;
24  	stat->batch = 0;
25  }
26  
27  /* src is a per-cpu stat, mean isn't initialized */
blk_rq_stat_sum(struct blk_rq_stat * dst,struct blk_rq_stat * src)28  void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
29  {
30  	if (dst->nr_samples + src->nr_samples <= dst->nr_samples)
31  		return;
32  
33  	dst->min = min(dst->min, src->min);
34  	dst->max = max(dst->max, src->max);
35  
36  	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
37  				dst->nr_samples + src->nr_samples);
38  
39  	dst->nr_samples += src->nr_samples;
40  }
41  
blk_rq_stat_add(struct blk_rq_stat * stat,u64 value)42  void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
43  {
44  	stat->min = min(stat->min, value);
45  	stat->max = max(stat->max, value);
46  	stat->batch += value;
47  	stat->nr_samples++;
48  }
49  
blk_stat_add(struct request * rq,u64 now)50  void blk_stat_add(struct request *rq, u64 now)
51  {
52  	struct request_queue *q = rq->q;
53  	struct blk_stat_callback *cb;
54  	struct blk_rq_stat *stat;
55  	int bucket, cpu;
56  	u64 value;
57  
58  	value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
59  
60  	rcu_read_lock();
61  	cpu = get_cpu();
62  	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
63  		if (!blk_stat_is_active(cb))
64  			continue;
65  
66  		bucket = cb->bucket_fn(rq);
67  		if (bucket < 0)
68  			continue;
69  
70  		stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
71  		blk_rq_stat_add(stat, value);
72  	}
73  	put_cpu();
74  	rcu_read_unlock();
75  }
76  
blk_stat_timer_fn(struct timer_list * t)77  static void blk_stat_timer_fn(struct timer_list *t)
78  {
79  	struct blk_stat_callback *cb = from_timer(cb, t, timer);
80  	unsigned int bucket;
81  	int cpu;
82  
83  	for (bucket = 0; bucket < cb->buckets; bucket++)
84  		blk_rq_stat_init(&cb->stat[bucket]);
85  
86  	for_each_online_cpu(cpu) {
87  		struct blk_rq_stat *cpu_stat;
88  
89  		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
90  		for (bucket = 0; bucket < cb->buckets; bucket++) {
91  			blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
92  			blk_rq_stat_init(&cpu_stat[bucket]);
93  		}
94  	}
95  
96  	cb->timer_fn(cb);
97  }
98  
99  struct blk_stat_callback *
blk_stat_alloc_callback(void (* timer_fn)(struct blk_stat_callback *),int (* bucket_fn)(const struct request *),unsigned int buckets,void * data)100  blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
101  			int (*bucket_fn)(const struct request *),
102  			unsigned int buckets, void *data)
103  {
104  	struct blk_stat_callback *cb;
105  
106  	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
107  	if (!cb)
108  		return NULL;
109  
110  	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
111  				 GFP_KERNEL);
112  	if (!cb->stat) {
113  		kfree(cb);
114  		return NULL;
115  	}
116  	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
117  				      __alignof__(struct blk_rq_stat));
118  	if (!cb->cpu_stat) {
119  		kfree(cb->stat);
120  		kfree(cb);
121  		return NULL;
122  	}
123  
124  	cb->timer_fn = timer_fn;
125  	cb->bucket_fn = bucket_fn;
126  	cb->data = data;
127  	cb->buckets = buckets;
128  	timer_setup(&cb->timer, blk_stat_timer_fn, 0);
129  
130  	return cb;
131  }
132  
blk_stat_add_callback(struct request_queue * q,struct blk_stat_callback * cb)133  void blk_stat_add_callback(struct request_queue *q,
134  			   struct blk_stat_callback *cb)
135  {
136  	unsigned int bucket;
137  	unsigned long flags;
138  	int cpu;
139  
140  	for_each_possible_cpu(cpu) {
141  		struct blk_rq_stat *cpu_stat;
142  
143  		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
144  		for (bucket = 0; bucket < cb->buckets; bucket++)
145  			blk_rq_stat_init(&cpu_stat[bucket]);
146  	}
147  
148  	spin_lock_irqsave(&q->stats->lock, flags);
149  	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
150  	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
151  	spin_unlock_irqrestore(&q->stats->lock, flags);
152  }
153  
blk_stat_remove_callback(struct request_queue * q,struct blk_stat_callback * cb)154  void blk_stat_remove_callback(struct request_queue *q,
155  			      struct blk_stat_callback *cb)
156  {
157  	unsigned long flags;
158  
159  	spin_lock_irqsave(&q->stats->lock, flags);
160  	list_del_rcu(&cb->list);
161  	if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
162  		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
163  	spin_unlock_irqrestore(&q->stats->lock, flags);
164  
165  	del_timer_sync(&cb->timer);
166  }
167  
blk_stat_free_callback_rcu(struct rcu_head * head)168  static void blk_stat_free_callback_rcu(struct rcu_head *head)
169  {
170  	struct blk_stat_callback *cb;
171  
172  	cb = container_of(head, struct blk_stat_callback, rcu);
173  	free_percpu(cb->cpu_stat);
174  	kfree(cb->stat);
175  	kfree(cb);
176  }
177  
blk_stat_free_callback(struct blk_stat_callback * cb)178  void blk_stat_free_callback(struct blk_stat_callback *cb)
179  {
180  	if (cb)
181  		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
182  }
183  
blk_stat_disable_accounting(struct request_queue * q)184  void blk_stat_disable_accounting(struct request_queue *q)
185  {
186  	unsigned long flags;
187  
188  	spin_lock_irqsave(&q->stats->lock, flags);
189  	if (!--q->stats->accounting && list_empty(&q->stats->callbacks))
190  		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
191  	spin_unlock_irqrestore(&q->stats->lock, flags);
192  }
193  EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
194  
blk_stat_enable_accounting(struct request_queue * q)195  void blk_stat_enable_accounting(struct request_queue *q)
196  {
197  	unsigned long flags;
198  
199  	spin_lock_irqsave(&q->stats->lock, flags);
200  	if (!q->stats->accounting++ && list_empty(&q->stats->callbacks))
201  		blk_queue_flag_set(QUEUE_FLAG_STATS, q);
202  	spin_unlock_irqrestore(&q->stats->lock, flags);
203  }
204  EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
205  
blk_alloc_queue_stats(void)206  struct blk_queue_stats *blk_alloc_queue_stats(void)
207  {
208  	struct blk_queue_stats *stats;
209  
210  	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
211  	if (!stats)
212  		return NULL;
213  
214  	INIT_LIST_HEAD(&stats->callbacks);
215  	spin_lock_init(&stats->lock);
216  	stats->accounting = 0;
217  
218  	return stats;
219  }
220  
blk_free_queue_stats(struct blk_queue_stats * stats)221  void blk_free_queue_stats(struct blk_queue_stats *stats)
222  {
223  	if (!stats)
224  		return;
225  
226  	WARN_ON(!list_empty(&stats->callbacks));
227  
228  	kfree(stats);
229  }
230