1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  Block device elevator/IO-scheduler.
4   *
5   *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6   *
7   * 30042000 Jens Axboe <axboe@kernel.dk> :
8   *
9   * Split the elevator a bit so that it is possible to choose a different
10   * one or even write a new "plug in". There are three pieces:
11   * - elevator_fn, inserts a new request in the queue list
12   * - elevator_merge_fn, decides whether a new buffer can be merged with
13   *   an existing request
14   * - elevator_dequeue_fn, called when a request is taken off the active list
15   *
16   * 20082000 Dave Jones <davej@suse.de> :
17   * Removed tests for max-bomb-segments, which was breaking elvtune
18   *  when run without -bN
19   *
20   * Jens:
21   * - Rework again to work with bio instead of buffer_heads
22   * - loose bi_dev comparisons, partition handling is right now
23   * - completely modularize elevator setup and teardown
24   *
25   */
26  #include <linux/kernel.h>
27  #include <linux/fs.h>
28  #include <linux/blkdev.h>
29  #include <linux/bio.h>
30  #include <linux/module.h>
31  #include <linux/slab.h>
32  #include <linux/init.h>
33  #include <linux/compiler.h>
34  #include <linux/blktrace_api.h>
35  #include <linux/hash.h>
36  #include <linux/uaccess.h>
37  #include <linux/pm_runtime.h>
38  
39  #include <trace/events/block.h>
40  
41  #include "elevator.h"
42  #include "blk.h"
43  #include "blk-mq-sched.h"
44  #include "blk-pm.h"
45  #include "blk-wbt.h"
46  #include "blk-cgroup.h"
47  
48  static DEFINE_SPINLOCK(elv_list_lock);
49  static LIST_HEAD(elv_list);
50  
51  /*
52   * Merge hash stuff.
53   */
54  #define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
55  
56  /*
57   * Query io scheduler to see if the current process issuing bio may be
58   * merged with rq.
59   */
elv_iosched_allow_bio_merge(struct request * rq,struct bio * bio)60  static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
61  {
62  	struct request_queue *q = rq->q;
63  	struct elevator_queue *e = q->elevator;
64  
65  	if (e->type->ops.allow_merge)
66  		return e->type->ops.allow_merge(q, rq, bio);
67  
68  	return true;
69  }
70  
71  /*
72   * can we safely merge with this request?
73   */
elv_bio_merge_ok(struct request * rq,struct bio * bio)74  bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
75  {
76  	if (!blk_rq_merge_ok(rq, bio))
77  		return false;
78  
79  	if (!elv_iosched_allow_bio_merge(rq, bio))
80  		return false;
81  
82  	return true;
83  }
84  EXPORT_SYMBOL(elv_bio_merge_ok);
85  
86  /**
87   * elevator_match - Check whether @e's name or alias matches @name
88   * @e: Scheduler to test
89   * @name: Elevator name to test
90   *
91   * Return true if the elevator @e's name or alias matches @name.
92   */
elevator_match(const struct elevator_type * e,const char * name)93  static bool elevator_match(const struct elevator_type *e, const char *name)
94  {
95  	return !strcmp(e->elevator_name, name) ||
96  		(e->elevator_alias && !strcmp(e->elevator_alias, name));
97  }
98  
__elevator_find(const char * name)99  static struct elevator_type *__elevator_find(const char *name)
100  {
101  	struct elevator_type *e;
102  
103  	list_for_each_entry(e, &elv_list, list)
104  		if (elevator_match(e, name))
105  			return e;
106  	return NULL;
107  }
108  
elevator_find_get(const char * name)109  static struct elevator_type *elevator_find_get(const char *name)
110  {
111  	struct elevator_type *e;
112  
113  	spin_lock(&elv_list_lock);
114  	e = __elevator_find(name);
115  	if (e && (!elevator_tryget(e)))
116  		e = NULL;
117  	spin_unlock(&elv_list_lock);
118  	return e;
119  }
120  
121  static const struct kobj_type elv_ktype;
122  
elevator_alloc(struct request_queue * q,struct elevator_type * e)123  struct elevator_queue *elevator_alloc(struct request_queue *q,
124  				  struct elevator_type *e)
125  {
126  	struct elevator_queue *eq;
127  
128  	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
129  	if (unlikely(!eq))
130  		return NULL;
131  
132  	__elevator_get(e);
133  	eq->type = e;
134  	kobject_init(&eq->kobj, &elv_ktype);
135  	mutex_init(&eq->sysfs_lock);
136  	hash_init(eq->hash);
137  
138  	return eq;
139  }
140  EXPORT_SYMBOL(elevator_alloc);
141  
elevator_release(struct kobject * kobj)142  static void elevator_release(struct kobject *kobj)
143  {
144  	struct elevator_queue *e;
145  
146  	e = container_of(kobj, struct elevator_queue, kobj);
147  	elevator_put(e->type);
148  	kfree(e);
149  }
150  
elevator_exit(struct request_queue * q)151  void elevator_exit(struct request_queue *q)
152  {
153  	struct elevator_queue *e = q->elevator;
154  
155  	ioc_clear_queue(q);
156  	blk_mq_sched_free_rqs(q);
157  
158  	mutex_lock(&e->sysfs_lock);
159  	blk_mq_exit_sched(q, e);
160  	mutex_unlock(&e->sysfs_lock);
161  
162  	kobject_put(&e->kobj);
163  }
164  
__elv_rqhash_del(struct request * rq)165  static inline void __elv_rqhash_del(struct request *rq)
166  {
167  	hash_del(&rq->hash);
168  	rq->rq_flags &= ~RQF_HASHED;
169  }
170  
elv_rqhash_del(struct request_queue * q,struct request * rq)171  void elv_rqhash_del(struct request_queue *q, struct request *rq)
172  {
173  	if (ELV_ON_HASH(rq))
174  		__elv_rqhash_del(rq);
175  }
176  EXPORT_SYMBOL_GPL(elv_rqhash_del);
177  
elv_rqhash_add(struct request_queue * q,struct request * rq)178  void elv_rqhash_add(struct request_queue *q, struct request *rq)
179  {
180  	struct elevator_queue *e = q->elevator;
181  
182  	BUG_ON(ELV_ON_HASH(rq));
183  	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
184  	rq->rq_flags |= RQF_HASHED;
185  }
186  EXPORT_SYMBOL_GPL(elv_rqhash_add);
187  
elv_rqhash_reposition(struct request_queue * q,struct request * rq)188  void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
189  {
190  	__elv_rqhash_del(rq);
191  	elv_rqhash_add(q, rq);
192  }
193  
elv_rqhash_find(struct request_queue * q,sector_t offset)194  struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
195  {
196  	struct elevator_queue *e = q->elevator;
197  	struct hlist_node *next;
198  	struct request *rq;
199  
200  	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
201  		BUG_ON(!ELV_ON_HASH(rq));
202  
203  		if (unlikely(!rq_mergeable(rq))) {
204  			__elv_rqhash_del(rq);
205  			continue;
206  		}
207  
208  		if (rq_hash_key(rq) == offset)
209  			return rq;
210  	}
211  
212  	return NULL;
213  }
214  
215  /*
216   * RB-tree support functions for inserting/lookup/removal of requests
217   * in a sorted RB tree.
218   */
elv_rb_add(struct rb_root * root,struct request * rq)219  void elv_rb_add(struct rb_root *root, struct request *rq)
220  {
221  	struct rb_node **p = &root->rb_node;
222  	struct rb_node *parent = NULL;
223  	struct request *__rq;
224  
225  	while (*p) {
226  		parent = *p;
227  		__rq = rb_entry(parent, struct request, rb_node);
228  
229  		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
230  			p = &(*p)->rb_left;
231  		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
232  			p = &(*p)->rb_right;
233  	}
234  
235  	rb_link_node(&rq->rb_node, parent, p);
236  	rb_insert_color(&rq->rb_node, root);
237  }
238  EXPORT_SYMBOL(elv_rb_add);
239  
elv_rb_del(struct rb_root * root,struct request * rq)240  void elv_rb_del(struct rb_root *root, struct request *rq)
241  {
242  	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
243  	rb_erase(&rq->rb_node, root);
244  	RB_CLEAR_NODE(&rq->rb_node);
245  }
246  EXPORT_SYMBOL(elv_rb_del);
247  
elv_rb_find(struct rb_root * root,sector_t sector)248  struct request *elv_rb_find(struct rb_root *root, sector_t sector)
249  {
250  	struct rb_node *n = root->rb_node;
251  	struct request *rq;
252  
253  	while (n) {
254  		rq = rb_entry(n, struct request, rb_node);
255  
256  		if (sector < blk_rq_pos(rq))
257  			n = n->rb_left;
258  		else if (sector > blk_rq_pos(rq))
259  			n = n->rb_right;
260  		else
261  			return rq;
262  	}
263  
264  	return NULL;
265  }
266  EXPORT_SYMBOL(elv_rb_find);
267  
elv_merge(struct request_queue * q,struct request ** req,struct bio * bio)268  enum elv_merge elv_merge(struct request_queue *q, struct request **req,
269  		struct bio *bio)
270  {
271  	struct elevator_queue *e = q->elevator;
272  	struct request *__rq;
273  
274  	/*
275  	 * Levels of merges:
276  	 * 	nomerges:  No merges at all attempted
277  	 * 	noxmerges: Only simple one-hit cache try
278  	 * 	merges:	   All merge tries attempted
279  	 */
280  	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
281  		return ELEVATOR_NO_MERGE;
282  
283  	/*
284  	 * First try one-hit cache.
285  	 */
286  	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
287  		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
288  
289  		if (ret != ELEVATOR_NO_MERGE) {
290  			*req = q->last_merge;
291  			return ret;
292  		}
293  	}
294  
295  	if (blk_queue_noxmerges(q))
296  		return ELEVATOR_NO_MERGE;
297  
298  	/*
299  	 * See if our hash lookup can find a potential backmerge.
300  	 */
301  	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
302  	if (__rq && elv_bio_merge_ok(__rq, bio)) {
303  		*req = __rq;
304  
305  		if (blk_discard_mergable(__rq))
306  			return ELEVATOR_DISCARD_MERGE;
307  		return ELEVATOR_BACK_MERGE;
308  	}
309  
310  	if (e->type->ops.request_merge)
311  		return e->type->ops.request_merge(q, req, bio);
312  
313  	return ELEVATOR_NO_MERGE;
314  }
315  
316  /*
317   * Attempt to do an insertion back merge. Only check for the case where
318   * we can append 'rq' to an existing request, so we can throw 'rq' away
319   * afterwards.
320   *
321   * Returns true if we merged, false otherwise. 'free' will contain all
322   * requests that need to be freed.
323   */
elv_attempt_insert_merge(struct request_queue * q,struct request * rq,struct list_head * free)324  bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
325  			      struct list_head *free)
326  {
327  	struct request *__rq;
328  	bool ret;
329  
330  	if (blk_queue_nomerges(q))
331  		return false;
332  
333  	/*
334  	 * First try one-hit cache.
335  	 */
336  	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
337  		list_add(&rq->queuelist, free);
338  		return true;
339  	}
340  
341  	if (blk_queue_noxmerges(q))
342  		return false;
343  
344  	ret = false;
345  	/*
346  	 * See if our hash lookup can find a potential backmerge.
347  	 */
348  	while (1) {
349  		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
350  		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
351  			break;
352  
353  		list_add(&rq->queuelist, free);
354  		/* The merged request could be merged with others, try again */
355  		ret = true;
356  		rq = __rq;
357  	}
358  
359  	return ret;
360  }
361  
elv_merged_request(struct request_queue * q,struct request * rq,enum elv_merge type)362  void elv_merged_request(struct request_queue *q, struct request *rq,
363  		enum elv_merge type)
364  {
365  	struct elevator_queue *e = q->elevator;
366  
367  	if (e->type->ops.request_merged)
368  		e->type->ops.request_merged(q, rq, type);
369  
370  	if (type == ELEVATOR_BACK_MERGE)
371  		elv_rqhash_reposition(q, rq);
372  
373  	q->last_merge = rq;
374  }
375  
elv_merge_requests(struct request_queue * q,struct request * rq,struct request * next)376  void elv_merge_requests(struct request_queue *q, struct request *rq,
377  			     struct request *next)
378  {
379  	struct elevator_queue *e = q->elevator;
380  
381  	if (e->type->ops.requests_merged)
382  		e->type->ops.requests_merged(q, rq, next);
383  
384  	elv_rqhash_reposition(q, rq);
385  	q->last_merge = rq;
386  }
387  
elv_latter_request(struct request_queue * q,struct request * rq)388  struct request *elv_latter_request(struct request_queue *q, struct request *rq)
389  {
390  	struct elevator_queue *e = q->elevator;
391  
392  	if (e->type->ops.next_request)
393  		return e->type->ops.next_request(q, rq);
394  
395  	return NULL;
396  }
397  
elv_former_request(struct request_queue * q,struct request * rq)398  struct request *elv_former_request(struct request_queue *q, struct request *rq)
399  {
400  	struct elevator_queue *e = q->elevator;
401  
402  	if (e->type->ops.former_request)
403  		return e->type->ops.former_request(q, rq);
404  
405  	return NULL;
406  }
407  
408  #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
409  
410  static ssize_t
elv_attr_show(struct kobject * kobj,struct attribute * attr,char * page)411  elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
412  {
413  	struct elv_fs_entry *entry = to_elv(attr);
414  	struct elevator_queue *e;
415  	ssize_t error;
416  
417  	if (!entry->show)
418  		return -EIO;
419  
420  	e = container_of(kobj, struct elevator_queue, kobj);
421  	mutex_lock(&e->sysfs_lock);
422  	error = e->type ? entry->show(e, page) : -ENOENT;
423  	mutex_unlock(&e->sysfs_lock);
424  	return error;
425  }
426  
427  static ssize_t
elv_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)428  elv_attr_store(struct kobject *kobj, struct attribute *attr,
429  	       const char *page, size_t length)
430  {
431  	struct elv_fs_entry *entry = to_elv(attr);
432  	struct elevator_queue *e;
433  	ssize_t error;
434  
435  	if (!entry->store)
436  		return -EIO;
437  
438  	e = container_of(kobj, struct elevator_queue, kobj);
439  	mutex_lock(&e->sysfs_lock);
440  	error = e->type ? entry->store(e, page, length) : -ENOENT;
441  	mutex_unlock(&e->sysfs_lock);
442  	return error;
443  }
444  
445  static const struct sysfs_ops elv_sysfs_ops = {
446  	.show	= elv_attr_show,
447  	.store	= elv_attr_store,
448  };
449  
450  static const struct kobj_type elv_ktype = {
451  	.sysfs_ops	= &elv_sysfs_ops,
452  	.release	= elevator_release,
453  };
454  
elv_register_queue(struct request_queue * q,bool uevent)455  int elv_register_queue(struct request_queue *q, bool uevent)
456  {
457  	struct elevator_queue *e = q->elevator;
458  	int error;
459  
460  	lockdep_assert_held(&q->sysfs_lock);
461  
462  	error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
463  	if (!error) {
464  		struct elv_fs_entry *attr = e->type->elevator_attrs;
465  		if (attr) {
466  			while (attr->attr.name) {
467  				if (sysfs_create_file(&e->kobj, &attr->attr))
468  					break;
469  				attr++;
470  			}
471  		}
472  		if (uevent)
473  			kobject_uevent(&e->kobj, KOBJ_ADD);
474  
475  		set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
476  	}
477  	return error;
478  }
479  
elv_unregister_queue(struct request_queue * q)480  void elv_unregister_queue(struct request_queue *q)
481  {
482  	struct elevator_queue *e = q->elevator;
483  
484  	lockdep_assert_held(&q->sysfs_lock);
485  
486  	if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
487  		kobject_uevent(&e->kobj, KOBJ_REMOVE);
488  		kobject_del(&e->kobj);
489  	}
490  }
491  
elv_register(struct elevator_type * e)492  int elv_register(struct elevator_type *e)
493  {
494  	/* finish request is mandatory */
495  	if (WARN_ON_ONCE(!e->ops.finish_request))
496  		return -EINVAL;
497  	/* insert_requests and dispatch_request are mandatory */
498  	if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
499  		return -EINVAL;
500  
501  	/* create icq_cache if requested */
502  	if (e->icq_size) {
503  		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
504  		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
505  			return -EINVAL;
506  
507  		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
508  			 "%s_io_cq", e->elevator_name);
509  		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
510  						 e->icq_align, 0, NULL);
511  		if (!e->icq_cache)
512  			return -ENOMEM;
513  	}
514  
515  	/* register, don't allow duplicate names */
516  	spin_lock(&elv_list_lock);
517  	if (__elevator_find(e->elevator_name)) {
518  		spin_unlock(&elv_list_lock);
519  		kmem_cache_destroy(e->icq_cache);
520  		return -EBUSY;
521  	}
522  	list_add_tail(&e->list, &elv_list);
523  	spin_unlock(&elv_list_lock);
524  
525  	printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
526  
527  	return 0;
528  }
529  EXPORT_SYMBOL_GPL(elv_register);
530  
elv_unregister(struct elevator_type * e)531  void elv_unregister(struct elevator_type *e)
532  {
533  	/* unregister */
534  	spin_lock(&elv_list_lock);
535  	list_del_init(&e->list);
536  	spin_unlock(&elv_list_lock);
537  
538  	/*
539  	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
540  	 * sure all RCU operations are complete before proceeding.
541  	 */
542  	if (e->icq_cache) {
543  		rcu_barrier();
544  		kmem_cache_destroy(e->icq_cache);
545  		e->icq_cache = NULL;
546  	}
547  }
548  EXPORT_SYMBOL_GPL(elv_unregister);
549  
elv_support_iosched(struct request_queue * q)550  static inline bool elv_support_iosched(struct request_queue *q)
551  {
552  	if (!queue_is_mq(q) ||
553  	    (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
554  		return false;
555  	return true;
556  }
557  
558  /*
559   * For single queue devices, default to using mq-deadline. If we have multiple
560   * queues or mq-deadline is not available, default to "none".
561   */
elevator_get_default(struct request_queue * q)562  static struct elevator_type *elevator_get_default(struct request_queue *q)
563  {
564  	if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
565  		return NULL;
566  
567  	if (q->nr_hw_queues != 1 &&
568  	    !blk_mq_is_shared_tags(q->tag_set->flags))
569  		return NULL;
570  
571  	return elevator_find_get("mq-deadline");
572  }
573  
574  /*
575   * Use the default elevator settings. If the chosen elevator initialization
576   * fails, fall back to the "none" elevator (no elevator).
577   */
elevator_init_mq(struct request_queue * q)578  void elevator_init_mq(struct request_queue *q)
579  {
580  	struct elevator_type *e;
581  	int err;
582  
583  	if (!elv_support_iosched(q))
584  		return;
585  
586  	WARN_ON_ONCE(blk_queue_registered(q));
587  
588  	if (unlikely(q->elevator))
589  		return;
590  
591  	e = elevator_get_default(q);
592  	if (!e)
593  		return;
594  
595  	/*
596  	 * We are called before adding disk, when there isn't any FS I/O,
597  	 * so freezing queue plus canceling dispatch work is enough to
598  	 * drain any dispatch activities originated from passthrough
599  	 * requests, then no need to quiesce queue which may add long boot
600  	 * latency, especially when lots of disks are involved.
601  	 */
602  	blk_mq_freeze_queue(q);
603  	blk_mq_cancel_work_sync(q);
604  
605  	err = blk_mq_init_sched(q, e);
606  
607  	blk_mq_unfreeze_queue(q);
608  
609  	if (err) {
610  		pr_warn("\"%s\" elevator initialization failed, "
611  			"falling back to \"none\"\n", e->elevator_name);
612  	}
613  
614  	elevator_put(e);
615  }
616  
617  /*
618   * Switch to new_e io scheduler.
619   *
620   * If switching fails, we are most likely running out of memory and not able
621   * to restore the old io scheduler, so leaving the io scheduler being none.
622   */
elevator_switch(struct request_queue * q,struct elevator_type * new_e)623  int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
624  {
625  	int ret;
626  
627  	lockdep_assert_held(&q->sysfs_lock);
628  
629  	blk_mq_freeze_queue(q);
630  	blk_mq_quiesce_queue(q);
631  
632  	if (q->elevator) {
633  		elv_unregister_queue(q);
634  		elevator_exit(q);
635  	}
636  
637  	ret = blk_mq_init_sched(q, new_e);
638  	if (ret)
639  		goto out_unfreeze;
640  
641  	ret = elv_register_queue(q, true);
642  	if (ret) {
643  		elevator_exit(q);
644  		goto out_unfreeze;
645  	}
646  	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
647  
648  out_unfreeze:
649  	blk_mq_unquiesce_queue(q);
650  	blk_mq_unfreeze_queue(q);
651  
652  	if (ret) {
653  		pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
654  			new_e->elevator_name);
655  	}
656  
657  	return ret;
658  }
659  
elevator_disable(struct request_queue * q)660  void elevator_disable(struct request_queue *q)
661  {
662  	lockdep_assert_held(&q->sysfs_lock);
663  
664  	blk_mq_freeze_queue(q);
665  	blk_mq_quiesce_queue(q);
666  
667  	elv_unregister_queue(q);
668  	elevator_exit(q);
669  	blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
670  	q->elevator = NULL;
671  	q->nr_requests = q->tag_set->queue_depth;
672  	blk_add_trace_msg(q, "elv switch: none");
673  
674  	blk_mq_unquiesce_queue(q);
675  	blk_mq_unfreeze_queue(q);
676  }
677  
678  /*
679   * Switch this queue to the given IO scheduler.
680   */
elevator_change(struct request_queue * q,const char * elevator_name)681  static int elevator_change(struct request_queue *q, const char *elevator_name)
682  {
683  	struct elevator_type *e;
684  	int ret;
685  
686  	/* Make sure queue is not in the middle of being removed */
687  	if (!blk_queue_registered(q))
688  		return -ENOENT;
689  
690  	if (!strncmp(elevator_name, "none", 4)) {
691  		if (q->elevator)
692  			elevator_disable(q);
693  		return 0;
694  	}
695  
696  	if (q->elevator && elevator_match(q->elevator->type, elevator_name))
697  		return 0;
698  
699  	e = elevator_find_get(elevator_name);
700  	if (!e)
701  		return -EINVAL;
702  	ret = elevator_switch(q, e);
703  	elevator_put(e);
704  	return ret;
705  }
706  
elv_iosched_load_module(struct gendisk * disk,const char * buf,size_t count)707  int elv_iosched_load_module(struct gendisk *disk, const char *buf,
708  			    size_t count)
709  {
710  	char elevator_name[ELV_NAME_MAX];
711  	struct elevator_type *found;
712  	const char *name;
713  
714  	if (!elv_support_iosched(disk->queue))
715  		return -EOPNOTSUPP;
716  
717  	strscpy(elevator_name, buf, sizeof(elevator_name));
718  	name = strstrip(elevator_name);
719  
720  	spin_lock(&elv_list_lock);
721  	found = __elevator_find(name);
722  	spin_unlock(&elv_list_lock);
723  
724  	if (!found)
725  		request_module("%s-iosched", name);
726  
727  	return 0;
728  }
729  
elv_iosched_store(struct gendisk * disk,const char * buf,size_t count)730  ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
731  			  size_t count)
732  {
733  	char elevator_name[ELV_NAME_MAX];
734  	int ret;
735  
736  	if (!elv_support_iosched(disk->queue))
737  		return count;
738  
739  	strscpy(elevator_name, buf, sizeof(elevator_name));
740  	ret = elevator_change(disk->queue, strstrip(elevator_name));
741  	if (!ret)
742  		return count;
743  	return ret;
744  }
745  
elv_iosched_show(struct gendisk * disk,char * name)746  ssize_t elv_iosched_show(struct gendisk *disk, char *name)
747  {
748  	struct request_queue *q = disk->queue;
749  	struct elevator_queue *eq = q->elevator;
750  	struct elevator_type *cur = NULL, *e;
751  	int len = 0;
752  
753  	if (!elv_support_iosched(q))
754  		return sprintf(name, "none\n");
755  
756  	if (!q->elevator) {
757  		len += sprintf(name+len, "[none] ");
758  	} else {
759  		len += sprintf(name+len, "none ");
760  		cur = eq->type;
761  	}
762  
763  	spin_lock(&elv_list_lock);
764  	list_for_each_entry(e, &elv_list, list) {
765  		if (e == cur)
766  			len += sprintf(name+len, "[%s] ", e->elevator_name);
767  		else
768  			len += sprintf(name+len, "%s ", e->elevator_name);
769  	}
770  	spin_unlock(&elv_list_lock);
771  
772  	len += sprintf(name+len, "\n");
773  	return len;
774  }
775  
elv_rb_former_request(struct request_queue * q,struct request * rq)776  struct request *elv_rb_former_request(struct request_queue *q,
777  				      struct request *rq)
778  {
779  	struct rb_node *rbprev = rb_prev(&rq->rb_node);
780  
781  	if (rbprev)
782  		return rb_entry_rq(rbprev);
783  
784  	return NULL;
785  }
786  EXPORT_SYMBOL(elv_rb_former_request);
787  
elv_rb_latter_request(struct request_queue * q,struct request * rq)788  struct request *elv_rb_latter_request(struct request_queue *q,
789  				      struct request *rq)
790  {
791  	struct rb_node *rbnext = rb_next(&rq->rb_node);
792  
793  	if (rbnext)
794  		return rb_entry_rq(rbnext);
795  
796  	return NULL;
797  }
798  EXPORT_SYMBOL(elv_rb_latter_request);
799  
elevator_setup(char * str)800  static int __init elevator_setup(char *str)
801  {
802  	pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
803  		"Please use sysfs to set IO scheduler for individual devices.\n");
804  	return 1;
805  }
806  
807  __setup("elevator=", elevator_setup);
808