/linux-6.12.1/include/linux/ |
D | bio.h | 22 #define bio_prio(bio) (bio)->bi_ioprio argument 23 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) argument 25 #define bio_iter_iovec(bio, iter) \ argument 26 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 28 #define bio_iter_page(bio, iter) \ argument 29 bvec_iter_page((bio)->bi_io_vec, (iter)) 30 #define bio_iter_len(bio, iter) \ argument 31 bvec_iter_len((bio)->bi_io_vec, (iter)) 32 #define bio_iter_offset(bio, iter) \ argument 33 bvec_iter_offset((bio)->bi_io_vec, (iter)) [all …]
|
D | bio-integrity.h | 17 struct bio *bip_bio; /* parent bio */ 42 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) in bio_integrity() argument 44 if (bio->bi_opf & REQ_INTEGRITY) in bio_integrity() 45 return bio->bi_integrity; in bio_integrity() 50 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) in bio_integrity_flagged() argument 52 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_flagged() 71 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp, 73 int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len, 75 int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed); 76 void bio_integrity_unmap_user(struct bio *bio); [all …]
|
/linux-6.12.1/block/ |
D | bio.c | 31 struct bio *free_list; 32 struct bio *free_list_irq; 114 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size() 213 void bio_uninit(struct bio *bio) in bio_uninit() argument 216 if (bio->bi_blkg) { in bio_uninit() 217 blkg_put(bio->bi_blkg); in bio_uninit() 218 bio->bi_blkg = NULL; in bio_uninit() 221 if (bio_integrity(bio)) in bio_uninit() 222 bio_integrity_free(bio); in bio_uninit() 224 bio_crypt_free_ctx(bio); in bio_uninit() [all …]
|
D | blk-map.c | 48 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) in bio_copy_from_iter() argument 53 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter() 79 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument 84 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_to_iter() 109 static int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument 111 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user() 122 else if (bio_data_dir(bio) == READ) in bio_uncopy_user() 123 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user() 125 bio_free_pages(bio); in bio_uncopy_user() 136 struct bio *bio; in bio_copy_user_iov() local [all …]
|
D | blk-merge.c | 21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_first_bvec() argument 23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in bio_get_first_bvec() 26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_last_bvec() argument 28 struct bvec_iter iter = bio->bi_iter; in bio_get_last_bvec() 31 bio_get_first_bvec(bio, bv); in bio_get_last_bvec() 32 if (bv->bv_len == bio->bi_iter.bi_size) in bio_get_last_bvec() 35 bio_advance_iter(bio, &iter, iter.bi_size); in bio_get_last_bvec() 42 *bv = bio->bi_io_vec[idx]; in bio_get_last_bvec() 53 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap() 66 bio_get_first_bvec(prev_rq->bio, &pb); in bio_will_gap() [all …]
|
D | blk.h | 40 int __bio_queue_enter(struct request_queue *q, struct bio *bio); 41 void submit_bio_noacct_nocheck(struct bio *bio); 42 void bio_await_chain(struct bio *bio); 68 static inline int bio_queue_enter(struct bio *bio) in bio_queue_enter() argument 70 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_queue_enter() 74 return __bio_queue_enter(q, bio); in bio_queue_enter() 205 void bio_integrity_free(struct bio *bio); 213 bool __bio_integrity_endio(struct bio *bio); 214 static inline bool bio_integrity_endio(struct bio *bio) in bio_integrity_endio() argument 216 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_endio() [all …]
|
D | bounce.c | 76 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq() 102 static void bounce_end_io(struct bio *bio) in bounce_end_io() argument 104 struct bio *bio_orig = bio->bi_private; in bounce_end_io() 112 bio_for_each_segment_all(bvec, bio, iter_all) { in bounce_end_io() 121 bio_orig->bi_status = bio->bi_status; in bounce_end_io() 123 bio_put(bio); in bounce_end_io() 126 static void bounce_end_io_write(struct bio *bio) in bounce_end_io_write() argument 128 bounce_end_io(bio); in bounce_end_io_write() 131 static void bounce_end_io_read(struct bio *bio) in bounce_end_io_read() argument 133 struct bio *bio_orig = bio->bi_private; in bounce_end_io_read() [all …]
|
D | blk-core.c | 327 int __bio_queue_enter(struct request_queue *q, struct bio *bio) in __bio_queue_enter() argument 330 struct gendisk *disk = bio->bi_bdev->bd_disk; in __bio_queue_enter() 332 if (bio->bi_opf & REQ_NOWAIT) { in __bio_queue_enter() 335 bio_wouldblock_error(bio); in __bio_queue_enter() 357 bio_io_error(bio); in __bio_queue_enter() 502 static inline void bio_check_ro(struct bio *bio) in bio_check_ro() argument 504 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { in bio_check_ro() 505 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro() 508 if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED)) in bio_check_ro() 511 bdev_set_flag(bio->bi_bdev, BD_RO_WARNED); in bio_check_ro() [all …]
|
D | blk-crypto-internal.h | 31 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); 37 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument 40 bio->bi_crypt_context); in bio_crypt_ctx_back_mergeable() 44 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 46 return bio_crypt_ctx_mergeable(bio->bi_crypt_context, in bio_crypt_ctx_front_mergeable() 47 bio->bi_iter.bi_size, req->crypt_ctx); in bio_crypt_ctx_front_mergeable() 97 struct bio *bio) in bio_crypt_rq_ctx_compatible() argument 103 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 109 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument 134 void __bio_crypt_advance(struct bio *bio, unsigned int bytes); [all …]
|
D | blk-lib.c | 38 struct bio *blk_alloc_discard_bio(struct block_device *bdev, in blk_alloc_discard_bio() 42 struct bio *bio; in blk_alloc_discard_bio() local 47 bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); in blk_alloc_discard_bio() 48 if (!bio) in blk_alloc_discard_bio() 50 bio->bi_iter.bi_sector = *sector; in blk_alloc_discard_bio() 51 bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT; in blk_alloc_discard_bio() 60 return bio; in blk_alloc_discard_bio() 64 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() 66 struct bio *bio; in __blkdev_issue_discard() local 68 while ((bio = blk_alloc_discard_bio(bdev, §or, &nr_sects, in __blkdev_issue_discard() [all …]
|
D | bio-integrity.c | 31 void bio_integrity_free(struct bio *bio) in bio_integrity_free() argument 33 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_free() 34 struct bio_set *bs = bio->bi_pool; in bio_integrity_free() 44 bio->bi_integrity = NULL; in bio_integrity_free() 45 bio->bi_opf &= ~REQ_INTEGRITY; in bio_integrity_free() 58 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, in bio_integrity_alloc() argument 63 struct bio_set *bs = bio->bi_pool; in bio_integrity_alloc() 66 if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) in bio_integrity_alloc() 93 bip->bip_bio = bio; in bio_integrity_alloc() 94 bio->bi_integrity = bip; in bio_integrity_alloc() [all …]
|
D | blk-rq-qos.h | 38 void (*throttle)(struct rq_qos *, struct bio *); 39 void (*track)(struct rq_qos *, struct request *, struct bio *); 40 void (*merge)(struct rq_qos *, struct request *, struct bio *); 44 void (*done_bio)(struct rq_qos *, struct bio *); 45 void (*cleanup)(struct rq_qos *, struct bio *); 103 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); 107 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); 108 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); 109 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio); 110 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio); [all …]
|
D | blk-crypto-fallback.c | 52 struct bio *bio; member 144 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) in blk_crypto_fallback_encrypt_endio() 146 struct bio *src_bio = enc_bio->bi_private; in blk_crypto_fallback_encrypt_endio() 160 static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) in blk_crypto_fallback_clone_bio() 165 struct bio *bio; in blk_crypto_fallback_clone_bio() local 167 bio = bio_kmalloc(nr_segs, GFP_NOIO); in blk_crypto_fallback_clone_bio() 168 if (!bio) in blk_crypto_fallback_clone_bio() 170 bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs, in blk_crypto_fallback_clone_bio() 173 bio_set_flag(bio, BIO_REMAPPED); in blk_crypto_fallback_clone_bio() 174 bio->bi_ioprio = bio_src->bi_ioprio; in blk_crypto_fallback_clone_bio() [all …]
|
/linux-6.12.1/drivers/md/bcache/ |
D | request.c | 40 static void bio_csum(struct bio *bio, struct bkey *k) in bio_csum() argument 46 bio_for_each_segment(bv, bio, iter) { in bio_csum() 111 struct bio *bio = op->bio; in bch_data_invalidate() local 114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 116 while (bio_sectors(bio)) { in bch_data_invalidate() 117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate() 123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate() 124 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate() 128 bio->bi_iter.bi_sector, in bch_data_invalidate() 134 bio_put(bio); in bch_data_invalidate() [all …]
|
D | io.c | 17 void bch_bbio_free(struct bio *bio, struct cache_set *c) in bch_bbio_free() argument 19 struct bbio *b = container_of(bio, struct bbio, bio); in bch_bbio_free() 24 struct bio *bch_bbio_alloc(struct cache_set *c) in bch_bbio_alloc() 27 struct bio *bio = &b->bio; in bch_bbio_alloc() local 29 bio_init(bio, NULL, bio->bi_inline_vecs, in bch_bbio_alloc() 32 return bio; in bch_bbio_alloc() 35 void __bch_submit_bbio(struct bio *bio, struct cache_set *c) in __bch_submit_bbio() argument 37 struct bbio *b = container_of(bio, struct bbio, bio); in __bch_submit_bbio() 39 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in __bch_submit_bbio() 40 bio_set_dev(bio, c->cache->bdev); in __bch_submit_bbio() [all …]
|
D | movinggc.c | 19 struct bbio bio; member 48 struct bio *bio = &io->bio.bio; in CLOSURE_CALLBACK() local 50 bio_free_pages(bio); in CLOSURE_CALLBACK() 62 static void read_moving_endio(struct bio *bio) in read_moving_endio() argument 64 struct bbio *b = container_of(bio, struct bbio, bio); in read_moving_endio() 65 struct moving_io *io = container_of(bio->bi_private, in read_moving_endio() 68 if (bio->bi_status) in read_moving_endio() 69 io->op.status = bio->bi_status; in read_moving_endio() 75 bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move"); in read_moving_endio() 80 struct bio *bio = &io->bio.bio; in moving_init() local [all …]
|
/linux-6.12.1/fs/btrfs/ |
D | bio.c | 37 return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE; in bbio_has_ordered_extent() 47 memset(bbio, 0, offsetof(struct btrfs_bio, bio)); in btrfs_bio_init() 67 struct bio *bio; in btrfs_bio_alloc() local 69 bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); in btrfs_bio_alloc() 70 bbio = btrfs_bio(bio); in btrfs_bio_alloc() 80 struct bio *bio; in btrfs_split_bio() local 82 bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, GFP_NOFS, in btrfs_split_bio() 84 bbio = btrfs_bio(bio); in btrfs_split_bio() 102 bio_put(&bbio->bio); in btrfs_cleanup_bio() 119 bbio->bio.bi_status = status; in btrfs_bio_end_io() [all …]
|
/linux-6.12.1/fs/ext4/ |
D | readpage.c | 63 struct bio *bio; member 69 static void __read_end_io(struct bio *bio) in __read_end_io() argument 73 bio_for_each_folio_all(fi, bio) in __read_end_io() 74 folio_end_read(fi.folio, bio->bi_status == 0); in __read_end_io() 75 if (bio->bi_private) in __read_end_io() 76 mempool_free(bio->bi_private, bio_post_read_ctx_pool); in __read_end_io() 77 bio_put(bio); in __read_end_io() 86 struct bio *bio = ctx->bio; in decrypt_work() local 88 if (fscrypt_decrypt_bio(bio)) in decrypt_work() 91 __read_end_io(bio); in decrypt_work() [all …]
|
/linux-6.12.1/fs/ |
D | mpage.c | 46 static void mpage_read_end_io(struct bio *bio) in mpage_read_end_io() argument 49 int err = blk_status_to_errno(bio->bi_status); in mpage_read_end_io() 51 bio_for_each_folio_all(fi, bio) in mpage_read_end_io() 54 bio_put(bio); in mpage_read_end_io() 57 static void mpage_write_end_io(struct bio *bio) in mpage_write_end_io() argument 60 int err = blk_status_to_errno(bio->bi_status); in mpage_write_end_io() 62 bio_for_each_folio_all(fi, bio) { in mpage_write_end_io() 68 bio_put(bio); in mpage_write_end_io() 71 static struct bio *mpage_bio_submit_read(struct bio *bio) in mpage_bio_submit_read() argument 73 bio->bi_end_io = mpage_read_end_io; in mpage_bio_submit_read() [all …]
|
/linux-6.12.1/fs/squashfs/ |
D | block.c | 33 static int copy_bio_to_actor(struct bio *bio, in copy_bio_to_actor() argument 46 if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) in copy_bio_to_actor() 70 if (!bio_next_segment(bio, &iter_all)) in copy_bio_to_actor() 79 static int squashfs_bio_read_cached(struct bio *fullbio, in squashfs_bio_read_cached() 87 struct bio *bio = NULL; in squashfs_bio_read_cached() local 114 if (!bio || idx != end_idx) { in squashfs_bio_read_cached() 115 struct bio *new = bio_alloc_clone(bdev, fullbio, in squashfs_bio_read_cached() 118 if (bio) { in squashfs_bio_read_cached() 119 bio_trim(bio, start_idx * PAGE_SECTORS, in squashfs_bio_read_cached() 121 bio_chain(bio, new); in squashfs_bio_read_cached() [all …]
|
/linux-6.12.1/drivers/md/dm-vdo/ |
D | vio.c | 41 physical_block_number_t pbn_from_vio_bio(struct bio *bio) in pbn_from_vio_bio() argument 43 struct vio *vio = bio->bi_private; in pbn_from_vio_bio() 45 physical_block_number_t pbn = bio->bi_iter.bi_sector / VDO_SECTORS_PER_BLOCK; in pbn_from_vio_bio() 50 static int create_multi_block_bio(block_count_t size, struct bio **bio_ptr) in create_multi_block_bio() 52 struct bio *bio = NULL; in create_multi_block_bio() local 55 result = vdo_allocate_extended(struct bio, size + 1, struct bio_vec, in create_multi_block_bio() 56 "bio", &bio); in create_multi_block_bio() 60 *bio_ptr = bio; in create_multi_block_bio() 64 int vdo_create_bio(struct bio **bio_ptr) in vdo_create_bio() 69 void vdo_free_bio(struct bio *bio) in vdo_free_bio() argument [all …]
|
/linux-6.12.1/drivers/md/ |
D | dm-io-rewind.c | 56 static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done) in dm_bio_integrity_rewind() argument 58 struct bio_integrity_payload *bip = bio_integrity(bio); in dm_bio_integrity_rewind() 59 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); in dm_bio_integrity_rewind() 68 static inline void dm_bio_integrity_rewind(struct bio *bio, in dm_bio_integrity_rewind() argument 94 static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) in dm_bio_crypt_rewind() argument 96 struct bio_crypt_ctx *bc = bio->bi_crypt_context; in dm_bio_crypt_rewind() 104 static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) in dm_bio_crypt_rewind() argument 110 static inline void dm_bio_rewind_iter(const struct bio *bio, in dm_bio_rewind_iter() argument 116 if (bio_no_advance_iter(bio)) in dm_bio_rewind_iter() 119 dm_bvec_iter_rewind(bio->bi_io_vec, iter, bytes); in dm_bio_rewind_iter() [all …]
|
D | dm-raid1.c | 126 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) in queue_bio() argument 135 bio_list_add(bl, bio); in queue_bio() 145 struct bio *bio; in dispatch_bios() local 147 while ((bio = bio_list_pop(bio_list))) in dispatch_bios() 148 queue_bio(ms, bio, WRITE); in dispatch_bios() 168 static struct mirror *bio_get_m(struct bio *bio) in bio_get_m() argument 170 return (struct mirror *) bio->bi_next; in bio_get_m() 173 static void bio_set_m(struct bio *bio, struct mirror *m) in bio_set_m() argument 175 bio->bi_next = (struct bio *) m; in bio_set_m() 445 static int mirror_available(struct mirror_set *ms, struct bio *bio) in mirror_available() argument [all …]
|
/linux-6.12.1/drivers/nvme/target/ |
D | io-cmd-bdev.c | 183 static void nvmet_bio_done(struct bio *bio) in nvmet_bio_done() argument 185 struct nvmet_req *req = bio->bi_private; in nvmet_bio_done() 187 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bio_done() 188 nvmet_req_bio_put(req, bio); in nvmet_bio_done() 192 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument 206 bip = bio_integrity_alloc(bio, GFP_NOIO, in nvmet_bdev_alloc_bip() 214 bip_set_seed(bip, bio->bi_iter.bi_sector >> in nvmet_bdev_alloc_bip() 217 resid = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip() 220 rc = bio_integrity_add_page(bio, miter->page, len, in nvmet_bdev_alloc_bip() 237 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument [all …]
|
/linux-6.12.1/fs/f2fs/ |
D | iostat.h | 45 static inline void iostat_update_submit_ctx(struct bio *bio, in iostat_update_submit_ctx() argument 48 struct bio_iostat_ctx *iostat_ctx = bio->bi_private; in iostat_update_submit_ctx() 54 static inline struct bio_post_read_ctx *get_post_read_ctx(struct bio *bio) in get_post_read_ctx() argument 56 struct bio_iostat_ctx *iostat_ctx = bio->bi_private; in get_post_read_ctx() 61 extern void iostat_update_and_unbind_ctx(struct bio *bio); 63 struct bio *bio, struct bio_post_read_ctx *ctx); 71 static inline void iostat_update_and_unbind_ctx(struct bio *bio) {} in iostat_update_and_unbind_ctx() argument 73 struct bio *bio, struct bio_post_read_ctx *ctx) {} in iostat_alloc_and_bind_ctx() argument 74 static inline void iostat_update_submit_ctx(struct bio *bio, in iostat_update_submit_ctx() argument 76 static inline struct bio_post_read_ctx *get_post_read_ctx(struct bio *bio) in get_post_read_ctx() argument [all …]
|