/linux-6.12.1/lib/ |
D | iov_iter.c | 163 const struct iovec *iov, unsigned long nr_segs, in iov_iter_init() argument 172 .nr_segs = nr_segs, in iov_iter_init() 503 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance() 509 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance() 522 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance() 528 i->nr_segs -= iov - iter_iov(i); in iov_iter_iovec_advance() 634 i->nr_segs++; in iov_iter_revert() 649 i->nr_segs++; in iov_iter_revert() 666 if (i->nr_segs > 1) { in iov_iter_single_seg_count() 680 const struct kvec *kvec, unsigned long nr_segs, in iov_iter_kvec() argument [all …]
|
D | kunit_iov_iter.c | 130 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0); in iov_kunit_copy_to_kvec() 180 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0); in iov_kunit_copy_from_kvec() 289 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0); in iov_kunit_copy_to_bvec() 343 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0); in iov_kunit_copy_from_bvec()
|
/linux-6.12.1/include/linux/ |
D | uio.h | 40 unsigned long nr_segs; member 79 unsigned long nr_segs; member 105 state->nr_segs = iter->nr_segs; in iov_iter_save_state() 160 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) in iov_length() argument 165 for (seg = 0; seg < nr_segs; seg++) in iov_length() 285 unsigned long nr_segs, size_t count); 287 unsigned long nr_segs, size_t count); 289 unsigned long nr_segs, size_t count); 355 unsigned long nr_segs, unsigned long fast_segs, 358 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, [all …]
|
D | iov_iter.h | 64 iter->nr_segs -= p - iter->__iov; in iterate_iovec() 98 iter->nr_segs -= p - iter->kvec; in iterate_kvec() 137 iter->nr_segs -= p - iter->bvec; in iterate_bvec()
|
/linux-6.12.1/net/core/ |
D | net_test.c | 51 unsigned int nr_segs; member 60 .nr_segs = 1, 67 .nr_segs = 3, 76 .nr_segs = 3, 84 .nr_segs = 3, 93 .nr_segs = 2, 103 .nr_segs = 3, 111 .nr_segs = 2, 121 .nr_segs = 4, 135 .nr_segs = 4, [all …]
|
/linux-6.12.1/block/ |
D | blk-merge.c | 374 unsigned *nr_segs) in bio_split_rw() argument 377 bio_split_rw_at(bio, lim, nr_segs, in bio_split_rw() 389 const struct queue_limits *lim, unsigned *nr_segs) in bio_split_zone_append() argument 394 split_sectors = bio_split_rw_at(bio, lim, nr_segs, in bio_split_zone_append() 415 unsigned int nr_segs; in bio_split_to_limits() local 417 return __bio_split_to_limits(bio, lim, &nr_segs); in bio_split_to_limits() 652 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) in ll_back_merge_fn() argument 667 return ll_new_hw_segment(req, bio, nr_segs); in ll_back_merge_fn() 671 unsigned int nr_segs) in ll_front_merge_fn() argument 686 return ll_new_hw_segment(req, bio, nr_segs); in ll_front_merge_fn() [all …]
|
D | blk.h | 295 struct bio *bio, unsigned int nr_segs); 297 unsigned int nr_segs); 299 struct bio *bio, unsigned int nr_segs); 339 unsigned *nr_segs); 341 const struct queue_limits *lim, unsigned *nr_segs); 372 const struct queue_limits *lim, unsigned int *nr_segs) in __bio_split_to_limits() argument 378 return bio_split_rw(bio, lim, nr_segs); in __bio_split_to_limits() 379 *nr_segs = 1; in __bio_split_to_limits() 382 return bio_split_zone_append(bio, lim, nr_segs); in __bio_split_to_limits() 385 return bio_split_discard(bio, lim, nr_segs); in __bio_split_to_limits() [all …]
|
D | blk-map.c | 26 if (data->nr_segs > UIO_MAXIOV) in bio_alloc_map_data() 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 34 memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs); in bio_alloc_map_data() 541 unsigned int nr_segs = 0; in blk_rq_append_bio() local 544 nr_segs++; in blk_rq_append_bio() 547 blk_rq_bio_prep(rq, bio, nr_segs); in blk_rq_append_bio() 549 if (!ll_back_merge_fn(rq, bio, nr_segs)) in blk_rq_append_bio()
|
D | blk-mq-sched.h | 11 unsigned int nr_segs, struct request **merged_request); 13 unsigned int nr_segs);
|
D | blk-mq-sched.c | 338 unsigned int nr_segs) in blk_mq_sched_bio_merge() argument 347 ret = e->type->ops.bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge() 365 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) in blk_mq_sched_bio_merge()
|
D | blk-zoned.c | 757 struct bio *bio, unsigned int nr_segs) in blk_zone_wplug_add_bio() argument 778 bio->__bi_nr_segments = nr_segs; in blk_zone_wplug_add_bio() 944 static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) in blk_zone_wplug_handle_write() argument 1009 blk_zone_wplug_add_bio(zwplug, bio, nr_segs); in blk_zone_wplug_handle_write() 1028 bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) in blk_zone_plug_bio() argument 1077 return blk_zone_wplug_handle_write(bio, nr_segs); in blk_zone_plug_bio()
|
D | blk-crypto-fallback.c | 162 unsigned int nr_segs = bio_segments(bio_src); in blk_crypto_fallback_clone_bio() local 167 bio = bio_kmalloc(nr_segs, GFP_NOIO); in blk_crypto_fallback_clone_bio() 170 bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs, in blk_crypto_fallback_clone_bio()
|
D | blk-mq.c | 2537 unsigned int nr_segs) in blk_mq_bio_to_request() argument 2546 blk_rq_bio_prep(rq, bio, nr_segs); in blk_mq_bio_to_request() 2836 struct bio *bio, unsigned int nr_segs) in blk_mq_attempt_bio_merge() argument 2839 if (blk_attempt_plug_merge(q, bio, nr_segs)) in blk_mq_attempt_bio_merge() 2841 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) in blk_mq_attempt_bio_merge() 2946 unsigned int nr_segs; in blk_mq_submit_bio() local 2962 nr_segs = bio->__bi_nr_segments; in blk_mq_submit_bio() 2988 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); in blk_mq_submit_bio() 2995 if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) in blk_mq_submit_bio() 2998 if (blk_queue_is_zoned(q) && blk_zone_plug_bio(bio, nr_segs)) in blk_mq_submit_bio() [all …]
|
/linux-6.12.1/fs/smb/server/ |
D | transport_tcp.c | 117 unsigned int nr_segs, size_t bytes) in kvec_array_init() argument 128 nr_segs--; in kvec_array_init() 133 memcpy(new, iov, sizeof(*iov) * nr_segs); in kvec_array_init() 136 return nr_segs; in kvec_array_init() 146 static struct kvec *get_conn_iovec(struct tcp_transport *t, unsigned int nr_segs) in get_conn_iovec() argument 150 if (t->iov && nr_segs <= t->nr_iov) in get_conn_iovec() 154 new_iov = kmalloc_array(nr_segs, sizeof(*new_iov), GFP_KERNEL); in get_conn_iovec() 158 t->nr_iov = nr_segs; in get_conn_iovec() 305 unsigned int nr_segs, unsigned int to_read, in ksmbd_tcp_readv() argument 315 iov = get_conn_iovec(t, nr_segs); in ksmbd_tcp_readv() [all …]
|
/linux-6.12.1/drivers/scsi/ |
D | xen-scsifront.c | 1132 unsigned int sg_grant, nr_segs; in scsifront_read_backend_params() local 1136 nr_segs = min_t(unsigned int, sg_grant, SG_ALL); in scsifront_read_backend_params() 1137 nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); in scsifront_read_backend_params() 1138 nr_segs = min_t(unsigned int, nr_segs, in scsifront_read_backend_params() 1143 dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs); in scsifront_read_backend_params() 1144 else if (info->pause && nr_segs < host->sg_tablesize) in scsifront_read_backend_params() 1147 host->sg_tablesize, nr_segs); in scsifront_read_backend_params() 1149 host->sg_tablesize = nr_segs; in scsifront_read_backend_params() 1150 host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512; in scsifront_read_backend_params()
|
/linux-6.12.1/arch/powerpc/mm/ |
D | dma-noncoherent.c | 65 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; in __dma_sync_page_highmem() local 84 } while (seg_nr < nr_segs); in __dma_sync_page_highmem()
|
/linux-6.12.1/drivers/hwtracing/intel_th/ |
D | msu.c | 74 unsigned int nr_segs; member 330 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_total_sz() 425 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_oldest_sg() 662 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_buffer_clear_hw_header() 994 unsigned int nr_segs) in __msc_buffer_win_alloc() argument 1000 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); in __msc_buffer_win_alloc() 1004 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { in __msc_buffer_win_alloc() 1014 return nr_segs; in __msc_buffer_win_alloc() 1039 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { in msc_buffer_set_uc() 1059 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { in msc_buffer_set_wb() [all …]
|
/linux-6.12.1/drivers/md/bcache/ |
D | debug.c | 110 unsigned int nr_segs = bio_segments(bio); in bch_data_verify() local 115 check = bio_kmalloc(nr_segs, GFP_NOIO); in bch_data_verify() 118 bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs, in bch_data_verify()
|
/linux-6.12.1/io_uring/ |
D | net.c | 191 kmsg->free_iov_nr = kmsg->msg.msg_iter.nr_segs; in io_net_vec_assign() 218 int ret, nr_segs; in io_compat_msg_copy_hdr() local 221 nr_segs = iomsg->free_iov_nr; in io_compat_msg_copy_hdr() 225 nr_segs = 1; in io_compat_msg_copy_hdr() 254 nr_segs, &iov, &iomsg->msg.msg_iter, true); in io_compat_msg_copy_hdr() 267 int ret, nr_segs; in io_msg_copy_hdr() local 270 nr_segs = iomsg->free_iov_nr; in io_msg_copy_hdr() 274 nr_segs = 1; in io_msg_copy_hdr() 311 ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, nr_segs, in io_msg_copy_hdr() 327 iomsg->msg.msg_iter.nr_segs = 0; in io_sendmsg_copy_hdr() [all …]
|
D | rw.c | 97 int nr_segs, ret; in __io_import_iovec() local 116 nr_segs = io->free_iov_nr; in __io_import_iovec() 120 nr_segs = 1; in __io_import_iovec() 122 ret = __import_iovec(ddir, buf, sqe_len, nr_segs, &iov, &io->iter, in __io_import_iovec() 128 io->free_iov_nr = io->iter.nr_segs; in __io_import_iovec()
|
/linux-6.12.1/fs/fuse/ |
D | dev.c | 702 unsigned long nr_segs; member 758 BUG_ON(!cs->nr_segs); in fuse_copy_fill() 764 cs->nr_segs--; in fuse_copy_fill() 766 if (cs->nr_segs >= cs->pipe->max_usage) in fuse_copy_fill() 782 cs->nr_segs++; in fuse_copy_fill() 857 BUG_ON(!cs->nr_segs); in fuse_try_move_page() 861 cs->nr_segs--; in fuse_try_move_page() 948 if (cs->nr_segs >= cs->pipe->max_usage) in fuse_ref_page() 966 cs->nr_segs++; in fuse_ref_page() 1442 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) { in fuse_dev_splice_read() [all …]
|
/linux-6.12.1/drivers/nvme/target/ |
D | io-cmd-file.c | 77 unsigned long nr_segs, size_t count, int ki_flags) in nvmet_file_submit_bvec() argument 94 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec()
|
/linux-6.12.1/fs/bcachefs/ |
D | fs-io-direct.c | 317 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) { in bch2_dio_write_copy_iov() 318 dio->iov = iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov), in bch2_dio_write_copy_iov() 324 memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov)); in bch2_dio_write_copy_iov()
|
/linux-6.12.1/drivers/block/xen-blkback/ |
D | blkback.c | 710 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs, in xen_blkbk_unmap_and_respond() 907 pending_req->nr_segs, in xen_blkbk_map_seg() 923 nseg = pending_req->nr_segs; in xen_blkbk_parse_indirect() 1361 pending_req->nr_segs = nseg; in dispatch_rw_block_io() 1475 pending_req->nr_segs); in dispatch_rw_block_io()
|
/linux-6.12.1/fs/netfs/ |
D | iterator.c | 113 unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0; in netfs_limit_bvec()
|