1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3 
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "fs.h"
7 #include "fs-io.h"
8 #include "fs-io-direct.h"
9 #include "fs-io-pagecache.h"
10 #include "io_read.h"
11 #include "io_write.h"
12 
13 #include <linux/kthread.h>
14 #include <linux/pagemap.h>
15 #include <linux/prefetch.h>
16 #include <linux/task_io_accounting_ops.h>
17 
18 /* O_DIRECT reads */
19 
20 struct dio_read {
21 	struct closure			cl;
22 	struct kiocb			*req;
23 	long				ret;
24 	bool				should_dirty;
25 	struct bch_read_bio		rbio;
26 };
27 
bio_check_or_release(struct bio * bio,bool check_dirty)28 static void bio_check_or_release(struct bio *bio, bool check_dirty)
29 {
30 	if (check_dirty) {
31 		bio_check_pages_dirty(bio);
32 	} else {
33 		bio_release_pages(bio, false);
34 		bio_put(bio);
35 	}
36 }
37 
CLOSURE_CALLBACK(bch2_dio_read_complete)38 static CLOSURE_CALLBACK(bch2_dio_read_complete)
39 {
40 	closure_type(dio, struct dio_read, cl);
41 
42 	dio->req->ki_complete(dio->req, dio->ret);
43 	bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
44 }
45 
bch2_direct_IO_read_endio(struct bio * bio)46 static void bch2_direct_IO_read_endio(struct bio *bio)
47 {
48 	struct dio_read *dio = bio->bi_private;
49 
50 	if (bio->bi_status)
51 		dio->ret = blk_status_to_errno(bio->bi_status);
52 
53 	closure_put(&dio->cl);
54 }
55 
bch2_direct_IO_read_split_endio(struct bio * bio)56 static void bch2_direct_IO_read_split_endio(struct bio *bio)
57 {
58 	struct dio_read *dio = bio->bi_private;
59 	bool should_dirty = dio->should_dirty;
60 
61 	bch2_direct_IO_read_endio(bio);
62 	bio_check_or_release(bio, should_dirty);
63 }
64 
bch2_direct_IO_read(struct kiocb * req,struct iov_iter * iter)65 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
66 {
67 	struct file *file = req->ki_filp;
68 	struct bch_inode_info *inode = file_bch_inode(file);
69 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
70 	struct bch_io_opts opts;
71 	struct dio_read *dio;
72 	struct bio *bio;
73 	loff_t offset = req->ki_pos;
74 	bool sync = is_sync_kiocb(req);
75 	size_t shorten;
76 	ssize_t ret;
77 
78 	bch2_inode_opts_get(&opts, c, &inode->ei_inode);
79 
80 	/* bios must be 512 byte aligned: */
81 	if ((offset|iter->count) & (SECTOR_SIZE - 1))
82 		return -EINVAL;
83 
84 	ret = min_t(loff_t, iter->count,
85 		    max_t(loff_t, 0, i_size_read(&inode->v) - offset));
86 
87 	if (!ret)
88 		return ret;
89 
90 	shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
91 	if (shorten >= iter->count)
92 		shorten = 0;
93 	iter->count -= shorten;
94 
95 	bio = bio_alloc_bioset(NULL,
96 			       bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
97 			       REQ_OP_READ,
98 			       GFP_KERNEL,
99 			       &c->dio_read_bioset);
100 
101 	bio->bi_end_io = bch2_direct_IO_read_endio;
102 
103 	dio = container_of(bio, struct dio_read, rbio.bio);
104 	closure_init(&dio->cl, NULL);
105 
106 	/*
107 	 * this is a _really_ horrible hack just to avoid an atomic sub at the
108 	 * end:
109 	 */
110 	if (!sync) {
111 		set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
112 		atomic_set(&dio->cl.remaining,
113 			   CLOSURE_REMAINING_INITIALIZER -
114 			   CLOSURE_RUNNING +
115 			   CLOSURE_DESTRUCTOR);
116 	} else {
117 		atomic_set(&dio->cl.remaining,
118 			   CLOSURE_REMAINING_INITIALIZER + 1);
119 		dio->cl.closure_get_happened = true;
120 	}
121 
122 	dio->req	= req;
123 	dio->ret	= ret;
124 	/*
125 	 * This is one of the sketchier things I've encountered: we have to skip
126 	 * the dirtying of requests that are internal from the kernel (i.e. from
127 	 * loopback), because we'll deadlock on page_lock.
128 	 */
129 	dio->should_dirty = iter_is_iovec(iter);
130 
131 	goto start;
132 	while (iter->count) {
133 		bio = bio_alloc_bioset(NULL,
134 				       bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
135 				       REQ_OP_READ,
136 				       GFP_KERNEL,
137 				       &c->bio_read);
138 		bio->bi_end_io		= bch2_direct_IO_read_split_endio;
139 start:
140 		bio->bi_opf		= REQ_OP_READ|REQ_SYNC;
141 		bio->bi_iter.bi_sector	= offset >> 9;
142 		bio->bi_private		= dio;
143 
144 		ret = bio_iov_iter_get_pages(bio, iter);
145 		if (ret < 0) {
146 			/* XXX: fault inject this path */
147 			bio->bi_status = BLK_STS_RESOURCE;
148 			bio_endio(bio);
149 			break;
150 		}
151 
152 		offset += bio->bi_iter.bi_size;
153 
154 		if (dio->should_dirty)
155 			bio_set_pages_dirty(bio);
156 
157 		if (iter->count)
158 			closure_get(&dio->cl);
159 
160 		bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
161 	}
162 
163 	iter->count += shorten;
164 
165 	if (sync) {
166 		closure_sync(&dio->cl);
167 		closure_debug_destroy(&dio->cl);
168 		ret = dio->ret;
169 		bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
170 		return ret;
171 	} else {
172 		return -EIOCBQUEUED;
173 	}
174 }
175 
bch2_read_iter(struct kiocb * iocb,struct iov_iter * iter)176 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
177 {
178 	struct file *file = iocb->ki_filp;
179 	struct bch_inode_info *inode = file_bch_inode(file);
180 	struct address_space *mapping = file->f_mapping;
181 	size_t count = iov_iter_count(iter);
182 	ssize_t ret = 0;
183 
184 	if (!count)
185 		return 0; /* skip atime */
186 
187 	if (iocb->ki_flags & IOCB_DIRECT) {
188 		struct blk_plug plug;
189 
190 		if (unlikely(mapping->nrpages)) {
191 			ret = filemap_write_and_wait_range(mapping,
192 						iocb->ki_pos,
193 						iocb->ki_pos + count - 1);
194 			if (ret < 0)
195 				goto out;
196 		}
197 
198 		file_accessed(file);
199 
200 		blk_start_plug(&plug);
201 		ret = bch2_direct_IO_read(iocb, iter);
202 		blk_finish_plug(&plug);
203 
204 		if (ret >= 0)
205 			iocb->ki_pos += ret;
206 	} else {
207 		bch2_pagecache_add_get(inode);
208 		ret = filemap_read(iocb, iter, ret);
209 		bch2_pagecache_add_put(inode);
210 	}
211 out:
212 	return bch2_err_class(ret);
213 }
214 
215 /* O_DIRECT writes */
216 
217 struct dio_write {
218 	struct kiocb			*req;
219 	struct address_space		*mapping;
220 	struct bch_inode_info		*inode;
221 	struct mm_struct		*mm;
222 	const struct iovec		*iov;
223 	unsigned			loop:1,
224 					extending:1,
225 					sync:1,
226 					flush:1;
227 	struct quota_res		quota_res;
228 	u64				written;
229 
230 	struct iov_iter			iter;
231 	struct iovec			inline_vecs[2];
232 
233 	/* must be last: */
234 	struct bch_write_op		op;
235 };
236 
bch2_check_range_allocated(struct bch_fs * c,subvol_inum inum,u64 offset,u64 size,unsigned nr_replicas,bool compressed)237 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
238 				       u64 offset, u64 size,
239 				       unsigned nr_replicas, bool compressed)
240 {
241 	struct btree_trans *trans = bch2_trans_get(c);
242 	struct btree_iter iter;
243 	struct bkey_s_c k;
244 	u64 end = offset + size;
245 	u32 snapshot;
246 	bool ret = true;
247 	int err;
248 retry:
249 	bch2_trans_begin(trans);
250 
251 	err = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
252 	if (err)
253 		goto err;
254 
255 	for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
256 			   SPOS(inum.inum, offset, snapshot),
257 			   BTREE_ITER_slots, k, err) {
258 		if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
259 			break;
260 
261 		if (k.k->p.snapshot != snapshot ||
262 		    nr_replicas > bch2_bkey_replicas(c, k) ||
263 		    (!compressed && bch2_bkey_sectors_compressed(k))) {
264 			ret = false;
265 			break;
266 		}
267 	}
268 
269 	offset = iter.pos.offset;
270 	bch2_trans_iter_exit(trans, &iter);
271 err:
272 	if (bch2_err_matches(err, BCH_ERR_transaction_restart))
273 		goto retry;
274 	bch2_trans_put(trans);
275 
276 	return err ? false : ret;
277 }
278 
bch2_dio_write_check_allocated(struct dio_write * dio)279 static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
280 {
281 	struct bch_fs *c = dio->op.c;
282 	struct bch_inode_info *inode = dio->inode;
283 	struct bio *bio = &dio->op.wbio.bio;
284 
285 	return bch2_check_range_allocated(c, inode_inum(inode),
286 				dio->op.pos.offset, bio_sectors(bio),
287 				dio->op.opts.data_replicas,
288 				dio->op.opts.compression != 0);
289 }
290 
291 static void bch2_dio_write_loop_async(struct bch_write_op *);
292 static __always_inline long bch2_dio_write_done(struct dio_write *dio);
293 
294 /*
295  * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
296  * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
297  * caller's stack, we're not guaranteed that it will live for the duration of
298  * the IO:
299  */
bch2_dio_write_copy_iov(struct dio_write * dio)300 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
301 {
302 	struct iovec *iov = dio->inline_vecs;
303 
304 	/*
305 	 * iov_iter has a single embedded iovec - nothing to do:
306 	 */
307 	if (iter_is_ubuf(&dio->iter))
308 		return 0;
309 
310 	/*
311 	 * We don't currently handle non-iovec iov_iters here - return an error,
312 	 * and we'll fall back to doing the IO synchronously:
313 	 */
314 	if (!iter_is_iovec(&dio->iter))
315 		return -1;
316 
317 	if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
318 		dio->iov = iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
319 				    GFP_KERNEL);
320 		if (unlikely(!iov))
321 			return -ENOMEM;
322 	}
323 
324 	memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
325 	dio->iter.__iov = iov;
326 	return 0;
327 }
328 
CLOSURE_CALLBACK(bch2_dio_write_flush_done)329 static CLOSURE_CALLBACK(bch2_dio_write_flush_done)
330 {
331 	closure_type(dio, struct dio_write, op.cl);
332 	struct bch_fs *c = dio->op.c;
333 
334 	closure_debug_destroy(cl);
335 
336 	dio->op.error = bch2_journal_error(&c->journal);
337 
338 	bch2_dio_write_done(dio);
339 }
340 
bch2_dio_write_flush(struct dio_write * dio)341 static noinline void bch2_dio_write_flush(struct dio_write *dio)
342 {
343 	struct bch_fs *c = dio->op.c;
344 	struct bch_inode_unpacked inode;
345 	int ret;
346 
347 	dio->flush = 0;
348 
349 	closure_init(&dio->op.cl, NULL);
350 
351 	if (!dio->op.error) {
352 		ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
353 		if (ret) {
354 			dio->op.error = ret;
355 		} else {
356 			bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq,
357 						     &dio->op.cl);
358 			bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
359 		}
360 	}
361 
362 	if (dio->sync) {
363 		closure_sync(&dio->op.cl);
364 		closure_debug_destroy(&dio->op.cl);
365 	} else {
366 		continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
367 	}
368 }
369 
bch2_dio_write_done(struct dio_write * dio)370 static __always_inline long bch2_dio_write_done(struct dio_write *dio)
371 {
372 	struct bch_fs *c = dio->op.c;
373 	struct kiocb *req = dio->req;
374 	struct bch_inode_info *inode = dio->inode;
375 	bool sync = dio->sync;
376 	long ret;
377 
378 	if (unlikely(dio->flush)) {
379 		bch2_dio_write_flush(dio);
380 		if (!sync)
381 			return -EIOCBQUEUED;
382 	}
383 
384 	bch2_pagecache_block_put(inode);
385 
386 	kfree(dio->iov);
387 
388 	ret = dio->op.error ?: ((long) dio->written << 9);
389 	bio_put(&dio->op.wbio.bio);
390 
391 	bch2_write_ref_put(c, BCH_WRITE_REF_dio_write);
392 
393 	/* inode->i_dio_count is our ref on inode and thus bch_fs */
394 	inode_dio_end(&inode->v);
395 
396 	if (ret < 0)
397 		ret = bch2_err_class(ret);
398 
399 	if (!sync) {
400 		req->ki_complete(req, ret);
401 		ret = -EIOCBQUEUED;
402 	}
403 	return ret;
404 }
405 
bch2_dio_write_end(struct dio_write * dio)406 static __always_inline void bch2_dio_write_end(struct dio_write *dio)
407 {
408 	struct bch_fs *c = dio->op.c;
409 	struct kiocb *req = dio->req;
410 	struct bch_inode_info *inode = dio->inode;
411 	struct bio *bio = &dio->op.wbio.bio;
412 
413 	req->ki_pos	+= (u64) dio->op.written << 9;
414 	dio->written	+= dio->op.written;
415 
416 	if (dio->extending) {
417 		spin_lock(&inode->v.i_lock);
418 		if (req->ki_pos > inode->v.i_size)
419 			i_size_write(&inode->v, req->ki_pos);
420 		spin_unlock(&inode->v.i_lock);
421 	}
422 
423 	if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
424 		mutex_lock(&inode->ei_quota_lock);
425 		__bch2_i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
426 		__bch2_quota_reservation_put(c, inode, &dio->quota_res);
427 		mutex_unlock(&inode->ei_quota_lock);
428 	}
429 
430 	bio_release_pages(bio, false);
431 
432 	if (unlikely(dio->op.error))
433 		set_bit(EI_INODE_ERROR, &inode->ei_flags);
434 }
435 
bch2_dio_write_loop(struct dio_write * dio)436 static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
437 {
438 	struct bch_fs *c = dio->op.c;
439 	struct kiocb *req = dio->req;
440 	struct address_space *mapping = dio->mapping;
441 	struct bch_inode_info *inode = dio->inode;
442 	struct bch_io_opts opts;
443 	struct bio *bio = &dio->op.wbio.bio;
444 	unsigned unaligned, iter_count;
445 	bool sync = dio->sync, dropped_locks;
446 	long ret;
447 
448 	bch2_inode_opts_get(&opts, c, &inode->ei_inode);
449 
450 	while (1) {
451 		iter_count = dio->iter.count;
452 
453 		EBUG_ON(current->faults_disabled_mapping);
454 		current->faults_disabled_mapping = mapping;
455 
456 		ret = bio_iov_iter_get_pages(bio, &dio->iter);
457 
458 		dropped_locks = fdm_dropped_locks();
459 
460 		current->faults_disabled_mapping = NULL;
461 
462 		/*
463 		 * If the fault handler returned an error but also signalled
464 		 * that it dropped & retook ei_pagecache_lock, we just need to
465 		 * re-shoot down the page cache and retry:
466 		 */
467 		if (dropped_locks && ret)
468 			ret = 0;
469 
470 		if (unlikely(ret < 0))
471 			goto err;
472 
473 		if (unlikely(dropped_locks)) {
474 			ret = bch2_write_invalidate_inode_pages_range(mapping,
475 					req->ki_pos,
476 					req->ki_pos + iter_count - 1);
477 			if (unlikely(ret))
478 				goto err;
479 
480 			if (!bio->bi_iter.bi_size)
481 				continue;
482 		}
483 
484 		unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
485 		bio->bi_iter.bi_size -= unaligned;
486 		iov_iter_revert(&dio->iter, unaligned);
487 
488 		if (!bio->bi_iter.bi_size) {
489 			/*
490 			 * bio_iov_iter_get_pages was only able to get <
491 			 * blocksize worth of pages:
492 			 */
493 			ret = -EFAULT;
494 			goto err;
495 		}
496 
497 		bch2_write_op_init(&dio->op, c, opts);
498 		dio->op.end_io		= sync
499 			? NULL
500 			: bch2_dio_write_loop_async;
501 		dio->op.target		= dio->op.opts.foreground_target;
502 		dio->op.write_point	= writepoint_hashed((unsigned long) current);
503 		dio->op.nr_replicas	= dio->op.opts.data_replicas;
504 		dio->op.subvol		= inode->ei_inum.subvol;
505 		dio->op.pos		= POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
506 		dio->op.devs_need_flush	= &inode->ei_devs_need_flush;
507 
508 		if (sync)
509 			dio->op.flags |= BCH_WRITE_SYNC;
510 		dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
511 
512 		ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
513 						 bio_sectors(bio), true);
514 		if (unlikely(ret))
515 			goto err;
516 
517 		ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
518 						dio->op.opts.data_replicas, 0);
519 		if (unlikely(ret) &&
520 		    !bch2_dio_write_check_allocated(dio))
521 			goto err;
522 
523 		task_io_account_write(bio->bi_iter.bi_size);
524 
525 		if (unlikely(dio->iter.count) &&
526 		    !dio->sync &&
527 		    !dio->loop &&
528 		    bch2_dio_write_copy_iov(dio))
529 			dio->sync = sync = true;
530 
531 		dio->loop = true;
532 		closure_call(&dio->op.cl, bch2_write, NULL, NULL);
533 
534 		if (!sync)
535 			return -EIOCBQUEUED;
536 
537 		bch2_dio_write_end(dio);
538 
539 		if (likely(!dio->iter.count) || dio->op.error)
540 			break;
541 
542 		bio_reset(bio, NULL, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
543 	}
544 out:
545 	return bch2_dio_write_done(dio);
546 err:
547 	dio->op.error = ret;
548 
549 	bio_release_pages(bio, false);
550 
551 	bch2_quota_reservation_put(c, inode, &dio->quota_res);
552 	goto out;
553 }
554 
bch2_dio_write_continue(struct dio_write * dio)555 static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
556 {
557 	struct mm_struct *mm = dio->mm;
558 
559 	bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
560 
561 	if (mm)
562 		kthread_use_mm(mm);
563 	bch2_dio_write_loop(dio);
564 	if (mm)
565 		kthread_unuse_mm(mm);
566 }
567 
bch2_dio_write_loop_async(struct bch_write_op * op)568 static void bch2_dio_write_loop_async(struct bch_write_op *op)
569 {
570 	struct dio_write *dio = container_of(op, struct dio_write, op);
571 
572 	bch2_dio_write_end(dio);
573 
574 	if (likely(!dio->iter.count) || dio->op.error)
575 		bch2_dio_write_done(dio);
576 	else
577 		bch2_dio_write_continue(dio);
578 }
579 
bch2_direct_write(struct kiocb * req,struct iov_iter * iter)580 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
581 {
582 	struct file *file = req->ki_filp;
583 	struct address_space *mapping = file->f_mapping;
584 	struct bch_inode_info *inode = file_bch_inode(file);
585 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
586 	struct dio_write *dio;
587 	struct bio *bio;
588 	bool locked = true, extending;
589 	ssize_t ret;
590 
591 	prefetch(&c->opts);
592 	prefetch((void *) &c->opts + 64);
593 	prefetch(&inode->ei_inode);
594 	prefetch((void *) &inode->ei_inode + 64);
595 
596 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_dio_write))
597 		return -EROFS;
598 
599 	inode_lock(&inode->v);
600 
601 	ret = generic_write_checks(req, iter);
602 	if (unlikely(ret <= 0))
603 		goto err_put_write_ref;
604 
605 	ret = file_remove_privs(file);
606 	if (unlikely(ret))
607 		goto err_put_write_ref;
608 
609 	ret = file_update_time(file);
610 	if (unlikely(ret))
611 		goto err_put_write_ref;
612 
613 	if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1))) {
614 		ret = -EINVAL;
615 		goto err_put_write_ref;
616 	}
617 
618 	inode_dio_begin(&inode->v);
619 	bch2_pagecache_block_get(inode);
620 
621 	extending = req->ki_pos + iter->count > inode->v.i_size;
622 	if (!extending) {
623 		inode_unlock(&inode->v);
624 		locked = false;
625 	}
626 
627 	bio = bio_alloc_bioset(NULL,
628 			       bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
629 			       REQ_OP_WRITE | REQ_SYNC | REQ_IDLE,
630 			       GFP_KERNEL,
631 			       &c->dio_write_bioset);
632 	dio = container_of(bio, struct dio_write, op.wbio.bio);
633 	dio->req		= req;
634 	dio->mapping		= mapping;
635 	dio->inode		= inode;
636 	dio->mm			= current->mm;
637 	dio->iov		= NULL;
638 	dio->loop		= false;
639 	dio->extending		= extending;
640 	dio->sync		= is_sync_kiocb(req) || extending;
641 	dio->flush		= iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
642 	dio->quota_res.sectors	= 0;
643 	dio->written		= 0;
644 	dio->iter		= *iter;
645 	dio->op.c		= c;
646 
647 	if (unlikely(mapping->nrpages)) {
648 		ret = bch2_write_invalidate_inode_pages_range(mapping,
649 						req->ki_pos,
650 						req->ki_pos + iter->count - 1);
651 		if (unlikely(ret))
652 			goto err_put_bio;
653 	}
654 
655 	ret = bch2_dio_write_loop(dio);
656 out:
657 	if (locked)
658 		inode_unlock(&inode->v);
659 	return ret;
660 err_put_bio:
661 	bch2_pagecache_block_put(inode);
662 	bio_put(bio);
663 	inode_dio_end(&inode->v);
664 err_put_write_ref:
665 	bch2_write_ref_put(c, BCH_WRITE_REF_dio_write);
666 	goto out;
667 }
668 
bch2_fs_fs_io_direct_exit(struct bch_fs * c)669 void bch2_fs_fs_io_direct_exit(struct bch_fs *c)
670 {
671 	bioset_exit(&c->dio_write_bioset);
672 	bioset_exit(&c->dio_read_bioset);
673 }
674 
bch2_fs_fs_io_direct_init(struct bch_fs * c)675 int bch2_fs_fs_io_direct_init(struct bch_fs *c)
676 {
677 	if (bioset_init(&c->dio_read_bioset,
678 			4, offsetof(struct dio_read, rbio.bio),
679 			BIOSET_NEED_BVECS))
680 		return -BCH_ERR_ENOMEM_dio_read_bioset_init;
681 
682 	if (bioset_init(&c->dio_write_bioset,
683 			4, offsetof(struct dio_write, op.wbio.bio),
684 			BIOSET_NEED_BVECS))
685 		return -BCH_ERR_ENOMEM_dio_write_bioset_init;
686 
687 	return 0;
688 }
689 
690 #endif /* NO_BCACHEFS_FS */
691