1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Main bcache entry point - handle a read or a write request and decide what to
4 * do with it; the make_request functions are called by the block layer.
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "request.h"
14 #include "writeback.h"
15
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include <linux/backing-dev.h>
20
21 #include <trace/events/bcache.h>
22
23 #define CUTOFF_CACHE_ADD 95
24 #define CUTOFF_CACHE_READA 90
25
26 struct kmem_cache *bch_search_cache;
27
28 static CLOSURE_CALLBACK(bch_data_insert_start);
29
cache_mode(struct cached_dev * dc)30 static unsigned int cache_mode(struct cached_dev *dc)
31 {
32 return BDEV_CACHE_MODE(&dc->sb);
33 }
34
verify(struct cached_dev * dc)35 static bool verify(struct cached_dev *dc)
36 {
37 return dc->verify;
38 }
39
bio_csum(struct bio * bio,struct bkey * k)40 static void bio_csum(struct bio *bio, struct bkey *k)
41 {
42 struct bio_vec bv;
43 struct bvec_iter iter;
44 uint64_t csum = 0;
45
46 bio_for_each_segment(bv, bio, iter) {
47 void *d = bvec_kmap_local(&bv);
48
49 csum = crc64_be(csum, d, bv.bv_len);
50 kunmap_local(d);
51 }
52
53 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
54 }
55
56 /* Insert data into cache */
57
CLOSURE_CALLBACK(bch_data_insert_keys)58 static CLOSURE_CALLBACK(bch_data_insert_keys)
59 {
60 closure_type(op, struct data_insert_op, cl);
61 atomic_t *journal_ref = NULL;
62 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
63 int ret;
64
65 if (!op->replace)
66 journal_ref = bch_journal(op->c, &op->insert_keys,
67 op->flush_journal ? cl : NULL);
68
69 ret = bch_btree_insert(op->c, &op->insert_keys,
70 journal_ref, replace_key);
71 if (ret == -ESRCH) {
72 op->replace_collision = true;
73 } else if (ret) {
74 op->status = BLK_STS_RESOURCE;
75 op->insert_data_done = true;
76 }
77
78 if (journal_ref)
79 atomic_dec_bug(journal_ref);
80
81 if (!op->insert_data_done) {
82 continue_at(cl, bch_data_insert_start, op->wq);
83 return;
84 }
85
86 bch_keylist_free(&op->insert_keys);
87 closure_return(cl);
88 }
89
bch_keylist_realloc(struct keylist * l,unsigned int u64s,struct cache_set * c)90 static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
91 struct cache_set *c)
92 {
93 size_t oldsize = bch_keylist_nkeys(l);
94 size_t newsize = oldsize + u64s;
95
96 /*
97 * The journalling code doesn't handle the case where the keys to insert
98 * is bigger than an empty write: If we just return -ENOMEM here,
99 * bch_data_insert_keys() will insert the keys created so far
100 * and finish the rest when the keylist is empty.
101 */
102 if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
103 return -ENOMEM;
104
105 return __bch_keylist_realloc(l, u64s);
106 }
107
bch_data_invalidate(struct closure * cl)108 static void bch_data_invalidate(struct closure *cl)
109 {
110 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
111 struct bio *bio = op->bio;
112
113 pr_debug("invalidating %i sectors from %llu\n",
114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
115
116 while (bio_sectors(bio)) {
117 unsigned int sectors = min(bio_sectors(bio),
118 1U << (KEY_SIZE_BITS - 1));
119
120 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
121 goto out;
122
123 bio->bi_iter.bi_sector += sectors;
124 bio->bi_iter.bi_size -= sectors << 9;
125
126 bch_keylist_add(&op->insert_keys,
127 &KEY(op->inode,
128 bio->bi_iter.bi_sector,
129 sectors));
130 }
131
132 op->insert_data_done = true;
133 /* get in bch_data_insert() */
134 bio_put(bio);
135 out:
136 continue_at(cl, bch_data_insert_keys, op->wq);
137 }
138
CLOSURE_CALLBACK(bch_data_insert_error)139 static CLOSURE_CALLBACK(bch_data_insert_error)
140 {
141 closure_type(op, struct data_insert_op, cl);
142
143 /*
144 * Our data write just errored, which means we've got a bunch of keys to
145 * insert that point to data that wasn't successfully written.
146 *
147 * We don't have to insert those keys but we still have to invalidate
148 * that region of the cache - so, if we just strip off all the pointers
149 * from the keys we'll accomplish just that.
150 */
151
152 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
153
154 while (src != op->insert_keys.top) {
155 struct bkey *n = bkey_next(src);
156
157 SET_KEY_PTRS(src, 0);
158 memmove(dst, src, bkey_bytes(src));
159
160 dst = bkey_next(dst);
161 src = n;
162 }
163
164 op->insert_keys.top = dst;
165
166 bch_data_insert_keys(&cl->work);
167 }
168
bch_data_insert_endio(struct bio * bio)169 static void bch_data_insert_endio(struct bio *bio)
170 {
171 struct closure *cl = bio->bi_private;
172 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
173
174 if (bio->bi_status) {
175 /* TODO: We could try to recover from this. */
176 if (op->writeback)
177 op->status = bio->bi_status;
178 else if (!op->replace)
179 set_closure_fn(cl, bch_data_insert_error, op->wq);
180 else
181 set_closure_fn(cl, NULL, NULL);
182 }
183
184 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
185 }
186
CLOSURE_CALLBACK(bch_data_insert_start)187 static CLOSURE_CALLBACK(bch_data_insert_start)
188 {
189 closure_type(op, struct data_insert_op, cl);
190 struct bio *bio = op->bio, *n;
191
192 if (op->bypass)
193 return bch_data_invalidate(cl);
194
195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
196 wake_up_gc(op->c);
197
198 /*
199 * Journal writes are marked REQ_PREFLUSH; if the original write was a
200 * flush, it'll wait on the journal write.
201 */
202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
203
204 do {
205 unsigned int i;
206 struct bkey *k;
207 struct bio_set *split = &op->c->bio_split;
208
209 /* 1 for the device pointer and 1 for the chksum */
210 if (bch_keylist_realloc(&op->insert_keys,
211 3 + (op->csum ? 1 : 0),
212 op->c)) {
213 continue_at(cl, bch_data_insert_keys, op->wq);
214 return;
215 }
216
217 k = op->insert_keys.top;
218 bkey_init(k);
219 SET_KEY_INODE(k, op->inode);
220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
221
222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
223 op->write_point, op->write_prio,
224 op->writeback))
225 goto err;
226
227 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
228
229 n->bi_end_io = bch_data_insert_endio;
230 n->bi_private = cl;
231
232 if (op->writeback) {
233 SET_KEY_DIRTY(k, true);
234
235 for (i = 0; i < KEY_PTRS(k); i++)
236 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
237 GC_MARK_DIRTY);
238 }
239
240 SET_KEY_CSUM(k, op->csum);
241 if (KEY_CSUM(k))
242 bio_csum(n, k);
243
244 trace_bcache_cache_insert(k);
245 bch_keylist_push(&op->insert_keys);
246
247 n->bi_opf = REQ_OP_WRITE;
248 bch_submit_bbio(n, op->c, k, 0);
249 } while (n != bio);
250
251 op->insert_data_done = true;
252 continue_at(cl, bch_data_insert_keys, op->wq);
253 return;
254 err:
255 /* bch_alloc_sectors() blocks if s->writeback = true */
256 BUG_ON(op->writeback);
257
258 /*
259 * But if it's not a writeback write we'd rather just bail out if
260 * there aren't any buckets ready to write to - it might take awhile and
261 * we might be starving btree writes for gc or something.
262 */
263
264 if (!op->replace) {
265 /*
266 * Writethrough write: We can't complete the write until we've
267 * updated the index. But we don't want to delay the write while
268 * we wait for buckets to be freed up, so just invalidate the
269 * rest of the write.
270 */
271 op->bypass = true;
272 return bch_data_invalidate(cl);
273 } else {
274 /*
275 * From a cache miss, we can just insert the keys for the data
276 * we have written or bail out if we didn't do anything.
277 */
278 op->insert_data_done = true;
279 bio_put(bio);
280
281 if (!bch_keylist_empty(&op->insert_keys))
282 continue_at(cl, bch_data_insert_keys, op->wq);
283 else
284 closure_return(cl);
285 }
286 }
287
288 /**
289 * bch_data_insert - stick some data in the cache
290 * @cl: closure pointer.
291 *
292 * This is the starting point for any data to end up in a cache device; it could
293 * be from a normal write, or a writeback write, or a write to a flash only
294 * volume - it's also used by the moving garbage collector to compact data in
295 * mostly empty buckets.
296 *
297 * It first writes the data to the cache, creating a list of keys to be inserted
298 * (if the data had to be fragmented there will be multiple keys); after the
299 * data is written it calls bch_journal, and after the keys have been added to
300 * the next journal write they're inserted into the btree.
301 *
302 * It inserts the data in op->bio; bi_sector is used for the key offset,
303 * and op->inode is used for the key inode.
304 *
305 * If op->bypass is true, instead of inserting the data it invalidates the
306 * region of the cache represented by op->bio and op->inode.
307 */
CLOSURE_CALLBACK(bch_data_insert)308 CLOSURE_CALLBACK(bch_data_insert)
309 {
310 closure_type(op, struct data_insert_op, cl);
311
312 trace_bcache_write(op->c, op->inode, op->bio,
313 op->writeback, op->bypass);
314
315 bch_keylist_init(&op->insert_keys);
316 bio_get(op->bio);
317 bch_data_insert_start(&cl->work);
318 }
319
320 /*
321 * Congested? Return 0 (not congested) or the limit (in sectors)
322 * beyond which we should bypass the cache due to congestion.
323 */
bch_get_congested(const struct cache_set * c)324 unsigned int bch_get_congested(const struct cache_set *c)
325 {
326 int i;
327
328 if (!c->congested_read_threshold_us &&
329 !c->congested_write_threshold_us)
330 return 0;
331
332 i = (local_clock_us() - c->congested_last_us) / 1024;
333 if (i < 0)
334 return 0;
335
336 i += atomic_read(&c->congested);
337 if (i >= 0)
338 return 0;
339
340 i += CONGESTED_MAX;
341
342 if (i > 0)
343 i = fract_exp_two(i, 6);
344
345 i -= hweight32(get_random_u32());
346
347 return i > 0 ? i : 1;
348 }
349
add_sequential(struct task_struct * t)350 static void add_sequential(struct task_struct *t)
351 {
352 ewma_add(t->sequential_io_avg,
353 t->sequential_io, 8, 0);
354
355 t->sequential_io = 0;
356 }
357
iohash(struct cached_dev * dc,uint64_t k)358 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
359 {
360 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
361 }
362
check_should_bypass(struct cached_dev * dc,struct bio * bio)363 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
364 {
365 struct cache_set *c = dc->disk.c;
366 unsigned int mode = cache_mode(dc);
367 unsigned int sectors, congested;
368 struct task_struct *task = current;
369 struct io *i;
370
371 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
372 (bio_op(bio) == REQ_OP_DISCARD))
373 goto skip;
374
375 if (c->gc_stats.in_use > CUTOFF_CACHE_ADD) {
376 /*
377 * If cached buckets are all clean now, 'true' will be
378 * returned and all requests will bypass the cache device.
379 * Then c->sectors_to_gc has no chance to be negative, and
380 * gc thread won't wake up and caching won't work forever.
381 * Here call force_wake_up_gc() to avoid such aftermath.
382 */
383 if (BDEV_STATE(&dc->sb) == BDEV_STATE_CLEAN &&
384 c->gc_mark_valid)
385 force_wake_up_gc(c);
386
387 goto skip;
388 }
389
390 if (mode == CACHE_MODE_NONE ||
391 (mode == CACHE_MODE_WRITEAROUND &&
392 op_is_write(bio_op(bio))))
393 goto skip;
394
395 /*
396 * If the bio is for read-ahead or background IO, bypass it or
397 * not depends on the following situations,
398 * - If the IO is for meta data, always cache it and no bypass
399 * - If the IO is not meta data, check dc->cache_reada_policy,
400 * BCH_CACHE_READA_ALL: cache it and not bypass
401 * BCH_CACHE_READA_META_ONLY: not cache it and bypass
402 * That is, read-ahead request for metadata always get cached
403 * (eg, for gfs2 or xfs).
404 */
405 if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
406 if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
407 (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
408 goto skip;
409 }
410
411 if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
412 bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
413 pr_debug("skipping unaligned io\n");
414 goto skip;
415 }
416
417 if (bypass_torture_test(dc)) {
418 if (get_random_u32_below(4) == 3)
419 goto skip;
420 else
421 goto rescale;
422 }
423
424 congested = bch_get_congested(c);
425 if (!congested && !dc->sequential_cutoff)
426 goto rescale;
427
428 spin_lock(&dc->io_lock);
429
430 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
431 if (i->last == bio->bi_iter.bi_sector &&
432 time_before(jiffies, i->jiffies))
433 goto found;
434
435 i = list_first_entry(&dc->io_lru, struct io, lru);
436
437 add_sequential(task);
438 i->sequential = 0;
439 found:
440 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
441 i->sequential += bio->bi_iter.bi_size;
442
443 i->last = bio_end_sector(bio);
444 i->jiffies = jiffies + msecs_to_jiffies(5000);
445 task->sequential_io = i->sequential;
446
447 hlist_del(&i->hash);
448 hlist_add_head(&i->hash, iohash(dc, i->last));
449 list_move_tail(&i->lru, &dc->io_lru);
450
451 spin_unlock(&dc->io_lock);
452
453 sectors = max(task->sequential_io,
454 task->sequential_io_avg) >> 9;
455
456 if (dc->sequential_cutoff &&
457 sectors >= dc->sequential_cutoff >> 9) {
458 trace_bcache_bypass_sequential(bio);
459 goto skip;
460 }
461
462 if (congested && sectors >= congested) {
463 trace_bcache_bypass_congested(bio);
464 goto skip;
465 }
466
467 rescale:
468 bch_rescale_priorities(c, bio_sectors(bio));
469 return false;
470 skip:
471 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
472 return true;
473 }
474
475 /* Cache lookup */
476
477 struct search {
478 /* Stack frame for bio_complete */
479 struct closure cl;
480
481 struct bbio bio;
482 struct bio *orig_bio;
483 struct bio *cache_miss;
484 struct bcache_device *d;
485
486 unsigned int insert_bio_sectors;
487 unsigned int recoverable:1;
488 unsigned int write:1;
489 unsigned int read_dirty_data:1;
490 unsigned int cache_missed:1;
491
492 struct block_device *orig_bdev;
493 unsigned long start_time;
494
495 struct btree_op op;
496 struct data_insert_op iop;
497 };
498
bch_cache_read_endio(struct bio * bio)499 static void bch_cache_read_endio(struct bio *bio)
500 {
501 struct bbio *b = container_of(bio, struct bbio, bio);
502 struct closure *cl = bio->bi_private;
503 struct search *s = container_of(cl, struct search, cl);
504
505 /*
506 * If the bucket was reused while our bio was in flight, we might have
507 * read the wrong data. Set s->error but not error so it doesn't get
508 * counted against the cache device, but we'll still reread the data
509 * from the backing device.
510 */
511
512 if (bio->bi_status)
513 s->iop.status = bio->bi_status;
514 else if (!KEY_DIRTY(&b->key) &&
515 ptr_stale(s->iop.c, &b->key, 0)) {
516 atomic_long_inc(&s->iop.c->cache_read_races);
517 s->iop.status = BLK_STS_IOERR;
518 }
519
520 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
521 }
522
523 /*
524 * Read from a single key, handling the initial cache miss if the key starts in
525 * the middle of the bio
526 */
cache_lookup_fn(struct btree_op * op,struct btree * b,struct bkey * k)527 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
528 {
529 struct search *s = container_of(op, struct search, op);
530 struct bio *n, *bio = &s->bio.bio;
531 struct bkey *bio_key;
532 unsigned int ptr;
533
534 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
535 return MAP_CONTINUE;
536
537 if (KEY_INODE(k) != s->iop.inode ||
538 KEY_START(k) > bio->bi_iter.bi_sector) {
539 unsigned int bio_sectors = bio_sectors(bio);
540 unsigned int sectors = KEY_INODE(k) == s->iop.inode
541 ? min_t(uint64_t, INT_MAX,
542 KEY_START(k) - bio->bi_iter.bi_sector)
543 : INT_MAX;
544 int ret = s->d->cache_miss(b, s, bio, sectors);
545
546 if (ret != MAP_CONTINUE)
547 return ret;
548
549 /* if this was a complete miss we shouldn't get here */
550 BUG_ON(bio_sectors <= sectors);
551 }
552
553 if (!KEY_SIZE(k))
554 return MAP_CONTINUE;
555
556 /* XXX: figure out best pointer - for multiple cache devices */
557 ptr = 0;
558
559 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
560
561 if (KEY_DIRTY(k))
562 s->read_dirty_data = true;
563
564 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
565 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
566 GFP_NOIO, &s->d->bio_split);
567
568 bio_key = &container_of(n, struct bbio, bio)->key;
569 bch_bkey_copy_single_ptr(bio_key, k, ptr);
570
571 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
572 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
573
574 n->bi_end_io = bch_cache_read_endio;
575 n->bi_private = &s->cl;
576
577 /*
578 * The bucket we're reading from might be reused while our bio
579 * is in flight, and we could then end up reading the wrong
580 * data.
581 *
582 * We guard against this by checking (in cache_read_endio()) if
583 * the pointer is stale again; if so, we treat it as an error
584 * and reread from the backing device (but we don't pass that
585 * error up anywhere).
586 */
587
588 __bch_submit_bbio(n, b->c);
589 return n == bio ? MAP_DONE : MAP_CONTINUE;
590 }
591
CLOSURE_CALLBACK(cache_lookup)592 static CLOSURE_CALLBACK(cache_lookup)
593 {
594 closure_type(s, struct search, iop.cl);
595 struct bio *bio = &s->bio.bio;
596 struct cached_dev *dc;
597 int ret;
598
599 bch_btree_op_init(&s->op, -1);
600
601 ret = bch_btree_map_keys(&s->op, s->iop.c,
602 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
603 cache_lookup_fn, MAP_END_KEY);
604 if (ret == -EAGAIN) {
605 continue_at(cl, cache_lookup, bcache_wq);
606 return;
607 }
608
609 /*
610 * We might meet err when searching the btree, If that happens, we will
611 * get negative ret, in this scenario we should not recover data from
612 * backing device (when cache device is dirty) because we don't know
613 * whether bkeys the read request covered are all clean.
614 *
615 * And after that happened, s->iop.status is still its initial value
616 * before we submit s->bio.bio
617 */
618 if (ret < 0) {
619 BUG_ON(ret == -EINTR);
620 if (s->d && s->d->c &&
621 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
622 dc = container_of(s->d, struct cached_dev, disk);
623 if (dc && atomic_read(&dc->has_dirty))
624 s->recoverable = false;
625 }
626 if (!s->iop.status)
627 s->iop.status = BLK_STS_IOERR;
628 }
629
630 closure_return(cl);
631 }
632
633 /* Common code for the make_request functions */
634
request_endio(struct bio * bio)635 static void request_endio(struct bio *bio)
636 {
637 struct closure *cl = bio->bi_private;
638
639 if (bio->bi_status) {
640 struct search *s = container_of(cl, struct search, cl);
641
642 s->iop.status = bio->bi_status;
643 /* Only cache read errors are recoverable */
644 s->recoverable = false;
645 }
646
647 bio_put(bio);
648 closure_put(cl);
649 }
650
backing_request_endio(struct bio * bio)651 static void backing_request_endio(struct bio *bio)
652 {
653 struct closure *cl = bio->bi_private;
654
655 if (bio->bi_status) {
656 struct search *s = container_of(cl, struct search, cl);
657 struct cached_dev *dc = container_of(s->d,
658 struct cached_dev, disk);
659 /*
660 * If a bio has REQ_PREFLUSH for writeback mode, it is
661 * speically assembled in cached_dev_write() for a non-zero
662 * write request which has REQ_PREFLUSH. we don't set
663 * s->iop.status by this failure, the status will be decided
664 * by result of bch_data_insert() operation.
665 */
666 if (unlikely(s->iop.writeback &&
667 bio->bi_opf & REQ_PREFLUSH)) {
668 pr_err("Can't flush %pg: returned bi_status %i\n",
669 dc->bdev, bio->bi_status);
670 } else {
671 /* set to orig_bio->bi_status in bio_complete() */
672 s->iop.status = bio->bi_status;
673 }
674 s->recoverable = false;
675 /* should count I/O error for backing device here */
676 bch_count_backing_io_errors(dc, bio);
677 }
678
679 bio_put(bio);
680 closure_put(cl);
681 }
682
bio_complete(struct search * s)683 static void bio_complete(struct search *s)
684 {
685 if (s->orig_bio) {
686 /* Count on bcache device */
687 bio_end_io_acct_remapped(s->orig_bio, s->start_time,
688 s->orig_bdev);
689 trace_bcache_request_end(s->d, s->orig_bio);
690 s->orig_bio->bi_status = s->iop.status;
691 bio_endio(s->orig_bio);
692 s->orig_bio = NULL;
693 }
694 }
695
do_bio_hook(struct search * s,struct bio * orig_bio,bio_end_io_t * end_io_fn)696 static void do_bio_hook(struct search *s,
697 struct bio *orig_bio,
698 bio_end_io_t *end_io_fn)
699 {
700 struct bio *bio = &s->bio.bio;
701
702 bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO);
703 /*
704 * bi_end_io can be set separately somewhere else, e.g. the
705 * variants in,
706 * - cache_bio->bi_end_io from cached_dev_cache_miss()
707 * - n->bi_end_io from cache_lookup_fn()
708 */
709 bio->bi_end_io = end_io_fn;
710 bio->bi_private = &s->cl;
711
712 bio_cnt_set(bio, 3);
713 }
714
CLOSURE_CALLBACK(search_free)715 static CLOSURE_CALLBACK(search_free)
716 {
717 closure_type(s, struct search, cl);
718
719 atomic_dec(&s->iop.c->search_inflight);
720
721 if (s->iop.bio)
722 bio_put(s->iop.bio);
723
724 bio_complete(s);
725 closure_debug_destroy(cl);
726 mempool_free(s, &s->iop.c->search);
727 }
728
search_alloc(struct bio * bio,struct bcache_device * d,struct block_device * orig_bdev,unsigned long start_time)729 static inline struct search *search_alloc(struct bio *bio,
730 struct bcache_device *d, struct block_device *orig_bdev,
731 unsigned long start_time)
732 {
733 struct search *s;
734
735 s = mempool_alloc(&d->c->search, GFP_NOIO);
736
737 closure_init(&s->cl, NULL);
738 do_bio_hook(s, bio, request_endio);
739 atomic_inc(&d->c->search_inflight);
740
741 s->orig_bio = bio;
742 s->cache_miss = NULL;
743 s->cache_missed = 0;
744 s->d = d;
745 s->recoverable = 1;
746 s->write = op_is_write(bio_op(bio));
747 s->read_dirty_data = 0;
748 /* Count on the bcache device */
749 s->orig_bdev = orig_bdev;
750 s->start_time = start_time;
751 s->iop.c = d->c;
752 s->iop.bio = NULL;
753 s->iop.inode = d->id;
754 s->iop.write_point = hash_long((unsigned long) current, 16);
755 s->iop.write_prio = 0;
756 s->iop.status = 0;
757 s->iop.flags = 0;
758 s->iop.flush_journal = op_is_flush(bio->bi_opf);
759 s->iop.wq = bcache_wq;
760
761 return s;
762 }
763
764 /* Cached devices */
765
CLOSURE_CALLBACK(cached_dev_bio_complete)766 static CLOSURE_CALLBACK(cached_dev_bio_complete)
767 {
768 closure_type(s, struct search, cl);
769 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
770
771 cached_dev_put(dc);
772 search_free(&cl->work);
773 }
774
775 /* Process reads */
776
CLOSURE_CALLBACK(cached_dev_read_error_done)777 static CLOSURE_CALLBACK(cached_dev_read_error_done)
778 {
779 closure_type(s, struct search, cl);
780
781 if (s->iop.replace_collision)
782 bch_mark_cache_miss_collision(s->iop.c, s->d);
783
784 if (s->iop.bio)
785 bio_free_pages(s->iop.bio);
786
787 cached_dev_bio_complete(&cl->work);
788 }
789
CLOSURE_CALLBACK(cached_dev_read_error)790 static CLOSURE_CALLBACK(cached_dev_read_error)
791 {
792 closure_type(s, struct search, cl);
793 struct bio *bio = &s->bio.bio;
794
795 /*
796 * If read request hit dirty data (s->read_dirty_data is true),
797 * then recovery a failed read request from cached device may
798 * get a stale data back. So read failure recovery is only
799 * permitted when read request hit clean data in cache device,
800 * or when cache read race happened.
801 */
802 if (s->recoverable && !s->read_dirty_data) {
803 /* Retry from the backing device: */
804 trace_bcache_read_retry(s->orig_bio);
805
806 s->iop.status = 0;
807 do_bio_hook(s, s->orig_bio, backing_request_endio);
808
809 /* XXX: invalidate cache */
810
811 /* I/O request sent to backing device */
812 closure_bio_submit(s->iop.c, bio, cl);
813 }
814
815 continue_at(cl, cached_dev_read_error_done, NULL);
816 }
817
CLOSURE_CALLBACK(cached_dev_cache_miss_done)818 static CLOSURE_CALLBACK(cached_dev_cache_miss_done)
819 {
820 closure_type(s, struct search, cl);
821 struct bcache_device *d = s->d;
822
823 if (s->iop.replace_collision)
824 bch_mark_cache_miss_collision(s->iop.c, s->d);
825
826 if (s->iop.bio)
827 bio_free_pages(s->iop.bio);
828
829 cached_dev_bio_complete(&cl->work);
830 closure_put(&d->cl);
831 }
832
CLOSURE_CALLBACK(cached_dev_read_done)833 static CLOSURE_CALLBACK(cached_dev_read_done)
834 {
835 closure_type(s, struct search, cl);
836 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
837
838 /*
839 * We had a cache miss; cache_bio now contains data ready to be inserted
840 * into the cache.
841 *
842 * First, we copy the data we just read from cache_bio's bounce buffers
843 * to the buffers the original bio pointed to:
844 */
845
846 if (s->iop.bio) {
847 bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
848 s->iop.bio->bi_iter.bi_sector =
849 s->cache_miss->bi_iter.bi_sector;
850 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
851 bio_clone_blkg_association(s->iop.bio, s->cache_miss);
852 bch_bio_map(s->iop.bio, NULL);
853
854 bio_copy_data(s->cache_miss, s->iop.bio);
855
856 bio_put(s->cache_miss);
857 s->cache_miss = NULL;
858 }
859
860 if (verify(dc) && s->recoverable && !s->read_dirty_data)
861 bch_data_verify(dc, s->orig_bio);
862
863 closure_get(&dc->disk.cl);
864 bio_complete(s);
865
866 if (s->iop.bio &&
867 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
868 BUG_ON(!s->iop.replace);
869 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
870 }
871
872 continue_at(cl, cached_dev_cache_miss_done, NULL);
873 }
874
CLOSURE_CALLBACK(cached_dev_read_done_bh)875 static CLOSURE_CALLBACK(cached_dev_read_done_bh)
876 {
877 closure_type(s, struct search, cl);
878 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
879
880 bch_mark_cache_accounting(s->iop.c, s->d,
881 !s->cache_missed, s->iop.bypass);
882 trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
883
884 if (s->iop.status)
885 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
886 else if (s->iop.bio || verify(dc))
887 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
888 else
889 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
890 }
891
cached_dev_cache_miss(struct btree * b,struct search * s,struct bio * bio,unsigned int sectors)892 static int cached_dev_cache_miss(struct btree *b, struct search *s,
893 struct bio *bio, unsigned int sectors)
894 {
895 int ret = MAP_CONTINUE;
896 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
897 struct bio *miss, *cache_bio;
898 unsigned int size_limit;
899
900 s->cache_missed = 1;
901
902 if (s->cache_miss || s->iop.bypass) {
903 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
904 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
905 goto out_submit;
906 }
907
908 /* Limitation for valid replace key size and cache_bio bvecs number */
909 size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
910 (1 << KEY_SIZE_BITS) - 1);
911 s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
912
913 s->iop.replace_key = KEY(s->iop.inode,
914 bio->bi_iter.bi_sector + s->insert_bio_sectors,
915 s->insert_bio_sectors);
916
917 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
918 if (ret)
919 return ret;
920
921 s->iop.replace = true;
922
923 miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
924 &s->d->bio_split);
925
926 /* btree_search_recurse()'s btree iterator is no good anymore */
927 ret = miss == bio ? MAP_DONE : -EINTR;
928
929 cache_bio = bio_alloc_bioset(miss->bi_bdev,
930 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
931 0, GFP_NOWAIT, &dc->disk.bio_split);
932 if (!cache_bio)
933 goto out_submit;
934
935 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
936 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
937
938 cache_bio->bi_end_io = backing_request_endio;
939 cache_bio->bi_private = &s->cl;
940
941 bch_bio_map(cache_bio, NULL);
942 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
943 goto out_put;
944
945 s->cache_miss = miss;
946 s->iop.bio = cache_bio;
947 bio_get(cache_bio);
948 /* I/O request sent to backing device */
949 closure_bio_submit(s->iop.c, cache_bio, &s->cl);
950
951 return ret;
952 out_put:
953 bio_put(cache_bio);
954 out_submit:
955 miss->bi_end_io = backing_request_endio;
956 miss->bi_private = &s->cl;
957 /* I/O request sent to backing device */
958 closure_bio_submit(s->iop.c, miss, &s->cl);
959 return ret;
960 }
961
cached_dev_read(struct cached_dev * dc,struct search * s)962 static void cached_dev_read(struct cached_dev *dc, struct search *s)
963 {
964 struct closure *cl = &s->cl;
965
966 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
967 continue_at(cl, cached_dev_read_done_bh, NULL);
968 }
969
970 /* Process writes */
971
CLOSURE_CALLBACK(cached_dev_write_complete)972 static CLOSURE_CALLBACK(cached_dev_write_complete)
973 {
974 closure_type(s, struct search, cl);
975 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
976
977 up_read_non_owner(&dc->writeback_lock);
978 cached_dev_bio_complete(&cl->work);
979 }
980
cached_dev_write(struct cached_dev * dc,struct search * s)981 static void cached_dev_write(struct cached_dev *dc, struct search *s)
982 {
983 struct closure *cl = &s->cl;
984 struct bio *bio = &s->bio.bio;
985 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
986 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
987
988 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
989
990 down_read_non_owner(&dc->writeback_lock);
991 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
992 /*
993 * We overlap with some dirty data undergoing background
994 * writeback, force this write to writeback
995 */
996 s->iop.bypass = false;
997 s->iop.writeback = true;
998 }
999
1000 /*
1001 * Discards aren't _required_ to do anything, so skipping if
1002 * check_overlapping returned true is ok
1003 *
1004 * But check_overlapping drops dirty keys for which io hasn't started,
1005 * so we still want to call it.
1006 */
1007 if (bio_op(bio) == REQ_OP_DISCARD)
1008 s->iop.bypass = true;
1009
1010 if (should_writeback(dc, s->orig_bio,
1011 cache_mode(dc),
1012 s->iop.bypass)) {
1013 s->iop.bypass = false;
1014 s->iop.writeback = true;
1015 }
1016
1017 if (s->iop.bypass) {
1018 s->iop.bio = s->orig_bio;
1019 bio_get(s->iop.bio);
1020
1021 if (bio_op(bio) == REQ_OP_DISCARD &&
1022 !bdev_max_discard_sectors(dc->bdev))
1023 goto insert_data;
1024
1025 /* I/O request sent to backing device */
1026 bio->bi_end_io = backing_request_endio;
1027 closure_bio_submit(s->iop.c, bio, cl);
1028
1029 } else if (s->iop.writeback) {
1030 bch_writeback_add(dc);
1031 s->iop.bio = bio;
1032
1033 if (bio->bi_opf & REQ_PREFLUSH) {
1034 /*
1035 * Also need to send a flush to the backing
1036 * device.
1037 */
1038 struct bio *flush;
1039
1040 flush = bio_alloc_bioset(bio->bi_bdev, 0,
1041 REQ_OP_WRITE | REQ_PREFLUSH,
1042 GFP_NOIO, &dc->disk.bio_split);
1043 if (!flush) {
1044 s->iop.status = BLK_STS_RESOURCE;
1045 goto insert_data;
1046 }
1047 flush->bi_end_io = backing_request_endio;
1048 flush->bi_private = cl;
1049 /* I/O request sent to backing device */
1050 closure_bio_submit(s->iop.c, flush, cl);
1051 }
1052 } else {
1053 s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1054 &dc->disk.bio_split);
1055 /* I/O request sent to backing device */
1056 bio->bi_end_io = backing_request_endio;
1057 closure_bio_submit(s->iop.c, bio, cl);
1058 }
1059
1060 insert_data:
1061 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1062 continue_at(cl, cached_dev_write_complete, NULL);
1063 }
1064
CLOSURE_CALLBACK(cached_dev_nodata)1065 static CLOSURE_CALLBACK(cached_dev_nodata)
1066 {
1067 closure_type(s, struct search, cl);
1068 struct bio *bio = &s->bio.bio;
1069
1070 if (s->iop.flush_journal)
1071 bch_journal_meta(s->iop.c, cl);
1072
1073 /* If it's a flush, we send the flush to the backing device too */
1074 bio->bi_end_io = backing_request_endio;
1075 closure_bio_submit(s->iop.c, bio, cl);
1076
1077 continue_at(cl, cached_dev_bio_complete, NULL);
1078 }
1079
1080 struct detached_dev_io_private {
1081 struct bcache_device *d;
1082 unsigned long start_time;
1083 bio_end_io_t *bi_end_io;
1084 void *bi_private;
1085 struct block_device *orig_bdev;
1086 };
1087
detached_dev_end_io(struct bio * bio)1088 static void detached_dev_end_io(struct bio *bio)
1089 {
1090 struct detached_dev_io_private *ddip;
1091
1092 ddip = bio->bi_private;
1093 bio->bi_end_io = ddip->bi_end_io;
1094 bio->bi_private = ddip->bi_private;
1095
1096 /* Count on the bcache device */
1097 bio_end_io_acct_remapped(bio, ddip->start_time, ddip->orig_bdev);
1098
1099 if (bio->bi_status) {
1100 struct cached_dev *dc = container_of(ddip->d,
1101 struct cached_dev, disk);
1102 /* should count I/O error for backing device here */
1103 bch_count_backing_io_errors(dc, bio);
1104 }
1105
1106 kfree(ddip);
1107 bio->bi_end_io(bio);
1108 }
1109
detached_dev_do_request(struct bcache_device * d,struct bio * bio,struct block_device * orig_bdev,unsigned long start_time)1110 static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
1111 struct block_device *orig_bdev, unsigned long start_time)
1112 {
1113 struct detached_dev_io_private *ddip;
1114 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1115
1116 /*
1117 * no need to call closure_get(&dc->disk.cl),
1118 * because upper layer had already opened bcache device,
1119 * which would call closure_get(&dc->disk.cl)
1120 */
1121 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1122 if (!ddip) {
1123 bio->bi_status = BLK_STS_RESOURCE;
1124 bio->bi_end_io(bio);
1125 return;
1126 }
1127
1128 ddip->d = d;
1129 /* Count on the bcache device */
1130 ddip->orig_bdev = orig_bdev;
1131 ddip->start_time = start_time;
1132 ddip->bi_end_io = bio->bi_end_io;
1133 ddip->bi_private = bio->bi_private;
1134 bio->bi_end_io = detached_dev_end_io;
1135 bio->bi_private = ddip;
1136
1137 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1138 !bdev_max_discard_sectors(dc->bdev))
1139 bio->bi_end_io(bio);
1140 else
1141 submit_bio_noacct(bio);
1142 }
1143
quit_max_writeback_rate(struct cache_set * c,struct cached_dev * this_dc)1144 static void quit_max_writeback_rate(struct cache_set *c,
1145 struct cached_dev *this_dc)
1146 {
1147 int i;
1148 struct bcache_device *d;
1149 struct cached_dev *dc;
1150
1151 /*
1152 * mutex bch_register_lock may compete with other parallel requesters,
1153 * or attach/detach operations on other backing device. Waiting to
1154 * the mutex lock may increase I/O request latency for seconds or more.
1155 * To avoid such situation, if mutext_trylock() failed, only writeback
1156 * rate of current cached device is set to 1, and __update_write_back()
1157 * will decide writeback rate of other cached devices (remember now
1158 * c->idle_counter is 0 already).
1159 */
1160 if (mutex_trylock(&bch_register_lock)) {
1161 for (i = 0; i < c->devices_max_used; i++) {
1162 if (!c->devices[i])
1163 continue;
1164
1165 if (UUID_FLASH_ONLY(&c->uuids[i]))
1166 continue;
1167
1168 d = c->devices[i];
1169 dc = container_of(d, struct cached_dev, disk);
1170 /*
1171 * set writeback rate to default minimum value,
1172 * then let update_writeback_rate() to decide the
1173 * upcoming rate.
1174 */
1175 atomic_long_set(&dc->writeback_rate.rate, 1);
1176 }
1177 mutex_unlock(&bch_register_lock);
1178 } else
1179 atomic_long_set(&this_dc->writeback_rate.rate, 1);
1180 }
1181
1182 /* Cached devices - read & write stuff */
1183
cached_dev_submit_bio(struct bio * bio)1184 void cached_dev_submit_bio(struct bio *bio)
1185 {
1186 struct search *s;
1187 struct block_device *orig_bdev = bio->bi_bdev;
1188 struct bcache_device *d = orig_bdev->bd_disk->private_data;
1189 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1190 unsigned long start_time;
1191 int rw = bio_data_dir(bio);
1192
1193 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1194 dc->io_disable)) {
1195 bio->bi_status = BLK_STS_IOERR;
1196 bio_endio(bio);
1197 return;
1198 }
1199
1200 if (likely(d->c)) {
1201 if (atomic_read(&d->c->idle_counter))
1202 atomic_set(&d->c->idle_counter, 0);
1203 /*
1204 * If at_max_writeback_rate of cache set is true and new I/O
1205 * comes, quit max writeback rate of all cached devices
1206 * attached to this cache set, and set at_max_writeback_rate
1207 * to false.
1208 */
1209 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1210 atomic_set(&d->c->at_max_writeback_rate, 0);
1211 quit_max_writeback_rate(d->c, dc);
1212 }
1213 }
1214
1215 start_time = bio_start_io_acct(bio);
1216
1217 bio_set_dev(bio, dc->bdev);
1218 bio->bi_iter.bi_sector += dc->sb.data_offset;
1219
1220 if (cached_dev_get(dc)) {
1221 s = search_alloc(bio, d, orig_bdev, start_time);
1222 trace_bcache_request_start(s->d, bio);
1223
1224 if (!bio->bi_iter.bi_size) {
1225 /*
1226 * can't call bch_journal_meta from under
1227 * submit_bio_noacct
1228 */
1229 continue_at_nobarrier(&s->cl,
1230 cached_dev_nodata,
1231 bcache_wq);
1232 } else {
1233 s->iop.bypass = check_should_bypass(dc, bio);
1234
1235 if (rw)
1236 cached_dev_write(dc, s);
1237 else
1238 cached_dev_read(dc, s);
1239 }
1240 } else
1241 /* I/O request sent to backing device */
1242 detached_dev_do_request(d, bio, orig_bdev, start_time);
1243 }
1244
cached_dev_ioctl(struct bcache_device * d,blk_mode_t mode,unsigned int cmd,unsigned long arg)1245 static int cached_dev_ioctl(struct bcache_device *d, blk_mode_t mode,
1246 unsigned int cmd, unsigned long arg)
1247 {
1248 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1249
1250 if (dc->io_disable)
1251 return -EIO;
1252 if (!dc->bdev->bd_disk->fops->ioctl)
1253 return -ENOTTY;
1254 return dc->bdev->bd_disk->fops->ioctl(dc->bdev, mode, cmd, arg);
1255 }
1256
bch_cached_dev_request_init(struct cached_dev * dc)1257 void bch_cached_dev_request_init(struct cached_dev *dc)
1258 {
1259 dc->disk.cache_miss = cached_dev_cache_miss;
1260 dc->disk.ioctl = cached_dev_ioctl;
1261 }
1262
1263 /* Flash backed devices */
1264
flash_dev_cache_miss(struct btree * b,struct search * s,struct bio * bio,unsigned int sectors)1265 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1266 struct bio *bio, unsigned int sectors)
1267 {
1268 unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1269
1270 swap(bio->bi_iter.bi_size, bytes);
1271 zero_fill_bio(bio);
1272 swap(bio->bi_iter.bi_size, bytes);
1273
1274 bio_advance(bio, bytes);
1275
1276 if (!bio->bi_iter.bi_size)
1277 return MAP_DONE;
1278
1279 return MAP_CONTINUE;
1280 }
1281
CLOSURE_CALLBACK(flash_dev_nodata)1282 static CLOSURE_CALLBACK(flash_dev_nodata)
1283 {
1284 closure_type(s, struct search, cl);
1285
1286 if (s->iop.flush_journal)
1287 bch_journal_meta(s->iop.c, cl);
1288
1289 continue_at(cl, search_free, NULL);
1290 }
1291
flash_dev_submit_bio(struct bio * bio)1292 void flash_dev_submit_bio(struct bio *bio)
1293 {
1294 struct search *s;
1295 struct closure *cl;
1296 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1297
1298 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1299 bio->bi_status = BLK_STS_IOERR;
1300 bio_endio(bio);
1301 return;
1302 }
1303
1304 s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
1305 cl = &s->cl;
1306 bio = &s->bio.bio;
1307
1308 trace_bcache_request_start(s->d, bio);
1309
1310 if (!bio->bi_iter.bi_size) {
1311 /*
1312 * can't call bch_journal_meta from under submit_bio_noacct
1313 */
1314 continue_at_nobarrier(&s->cl,
1315 flash_dev_nodata,
1316 bcache_wq);
1317 return;
1318 } else if (bio_data_dir(bio)) {
1319 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1320 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1321 &KEY(d->id, bio_end_sector(bio), 0));
1322
1323 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1324 s->iop.writeback = true;
1325 s->iop.bio = bio;
1326
1327 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1328 } else {
1329 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1330 }
1331
1332 continue_at(cl, search_free, NULL);
1333 }
1334
flash_dev_ioctl(struct bcache_device * d,blk_mode_t mode,unsigned int cmd,unsigned long arg)1335 static int flash_dev_ioctl(struct bcache_device *d, blk_mode_t mode,
1336 unsigned int cmd, unsigned long arg)
1337 {
1338 return -ENOTTY;
1339 }
1340
bch_flash_dev_request_init(struct bcache_device * d)1341 void bch_flash_dev_request_init(struct bcache_device *d)
1342 {
1343 d->cache_miss = flash_dev_cache_miss;
1344 d->ioctl = flash_dev_ioctl;
1345 }
1346
bch_request_exit(void)1347 void bch_request_exit(void)
1348 {
1349 kmem_cache_destroy(bch_search_cache);
1350 }
1351
bch_request_init(void)1352 int __init bch_request_init(void)
1353 {
1354 bch_search_cache = KMEM_CACHE(search, 0);
1355 if (!bch_search_cache)
1356 return -ENOMEM;
1357
1358 return 0;
1359 }
1360