Home
last modified time | relevance | path

Searched refs:lim (Results 1 – 25 of 155) sorted by relevance

1234567

/linux-6.12.1/block/
Dblk-settings.c35 void blk_set_stacking_limits(struct queue_limits *lim) in blk_set_stacking_limits() argument
37 memset(lim, 0, sizeof(*lim)); in blk_set_stacking_limits()
38 lim->logical_block_size = SECTOR_SIZE; in blk_set_stacking_limits()
39 lim->physical_block_size = SECTOR_SIZE; in blk_set_stacking_limits()
40 lim->io_min = SECTOR_SIZE; in blk_set_stacking_limits()
41 lim->discard_granularity = SECTOR_SIZE; in blk_set_stacking_limits()
42 lim->dma_alignment = SECTOR_SIZE - 1; in blk_set_stacking_limits()
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; in blk_set_stacking_limits()
46 lim->max_segments = USHRT_MAX; in blk_set_stacking_limits()
47 lim->max_discard_segments = USHRT_MAX; in blk_set_stacking_limits()
[all …]
Dblk-merge.c103 static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim) in bio_allowed_max_sectors() argument
105 return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT; in bio_allowed_max_sectors()
133 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, in bio_split_discard() argument
142 granularity = max(lim->discard_granularity >> 9, 1U); in bio_split_discard()
145 min(lim->max_discard_sectors, bio_allowed_max_sectors(lim)); in bio_split_discard()
160 ((lim->discard_alignment >> 9) % granularity); in bio_split_discard()
170 const struct queue_limits *lim, unsigned *nsegs) in bio_split_write_zeroes() argument
173 if (!lim->max_write_zeroes_sectors) in bio_split_write_zeroes()
175 if (bio_sectors(bio) <= lim->max_write_zeroes_sectors) in bio_split_write_zeroes()
177 return bio_submit_split(bio, lim->max_write_zeroes_sectors); in bio_split_write_zeroes()
[all …]
Dblk.h121 static inline bool __bvec_gap_to_prev(const struct queue_limits *lim, in __bvec_gap_to_prev() argument
124 return (offset & lim->virt_boundary_mask) || in __bvec_gap_to_prev()
125 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask); in __bvec_gap_to_prev()
132 static inline bool bvec_gap_to_prev(const struct queue_limits *lim, in bvec_gap_to_prev() argument
135 if (!lim->virt_boundary_mask) in bvec_gap_to_prev()
137 return __bvec_gap_to_prev(lim, bprv, offset); in bvec_gap_to_prev()
334 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
337 const struct queue_limits *lim, unsigned *nsegs);
338 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
341 const struct queue_limits *lim, unsigned *nr_segs);
[all …]
Dblk-sysfs.c159 struct queue_limits lim; local
173 lim = queue_limits_start_update(disk->queue);
174 lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
175 err = queue_limits_commit_update(disk->queue, &lim);
197 struct queue_limits lim; in queue_max_sectors_store() local
205 lim = queue_limits_start_update(disk->queue); in queue_max_sectors_store()
206 lim.max_user_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
207 err = queue_limits_commit_update(disk->queue, &lim); in queue_max_sectors_store()
216 struct queue_limits lim; in queue_feature_store() local
224 lim = queue_limits_start_update(disk->queue); in queue_feature_store()
[all …]
/linux-6.12.1/drivers/media/i2c/
Dccs-pll.c152 const struct ccs_pll_limits *lim, in check_fr_bounds() argument
161 lim_fr = &lim->op_fr; in check_fr_bounds()
164 lim_fr = &lim->vt_fr; in check_fr_bounds()
192 const struct ccs_pll_limits *lim, in check_bk_bounds() argument
204 lim_bk = &lim->op_bk; in check_bk_bounds()
207 lim_bk = &lim->vt_bk; in check_bk_bounds()
251 ccs_pll_find_vt_sys_div(struct device *dev, const struct ccs_pll_limits *lim, in ccs_pll_find_vt_sys_div() argument
260 *min_sys_div = lim->vt_bk.min_sys_clk_div; in ccs_pll_find_vt_sys_div()
264 lim->vt_bk.max_pix_clk_div)); in ccs_pll_find_vt_sys_div()
268 / lim->vt_bk.max_sys_clk_freq_hz); in ccs_pll_find_vt_sys_div()
[all …]
/linux-6.12.1/drivers/mmc/core/
Dqueue.c178 struct queue_limits *lim) in mmc_queue_setup_discard() argument
186 lim->max_hw_discard_sectors = max_discard; in mmc_queue_setup_discard()
188 lim->max_secure_erase_sectors = max_discard; in mmc_queue_setup_discard()
190 lim->max_write_zeroes_sectors = max_discard; in mmc_queue_setup_discard()
194 lim->discard_granularity = SECTOR_SIZE; in mmc_queue_setup_discard()
196 lim->discard_granularity = card->pref_erase << 9; in mmc_queue_setup_discard()
350 struct queue_limits lim = { in mmc_alloc_disk() local
356 mmc_queue_setup_discard(card, &lim); in mmc_alloc_disk()
358 lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512); in mmc_alloc_disk()
361 lim.logical_block_size = card->ext_csd.data_sector_size; in mmc_alloc_disk()
[all …]
/linux-6.12.1/drivers/md/
Ddm-zone.c256 struct queue_limits *lim; member
309 zlim->lim->max_active_zones = in device_get_zone_resource_limits()
310 min_not_zero(max_active_zones, zlim->lim->max_active_zones); in device_get_zone_resource_limits()
315 zlim->lim->max_open_zones = in device_get_zone_resource_limits()
316 min_not_zero(max_open_zones, zlim->lim->max_open_zones); in device_get_zone_resource_limits()
329 struct queue_limits *lim) in dm_set_zones_restrictions() argument
335 .lim = lim, in dm_set_zones_restrictions()
347 lim->max_zone_append_sectors = 0; in dm_set_zones_restrictions()
380 lim->max_open_zones = 0; in dm_set_zones_restrictions()
381 lim->max_active_zones = 0; in dm_set_zones_restrictions()
[all …]
/linux-6.12.1/lib/
Dbitmap.c40 unsigned int k, lim = bits/BITS_PER_LONG; in __bitmap_equal() local
41 for (k = 0; k < lim; ++k) in __bitmap_equal()
58 unsigned int k, lim = bits / BITS_PER_LONG; in __bitmap_or_equal() local
61 for (k = 0; k < lim; ++k) { in __bitmap_or_equal()
75 unsigned int k, lim = BITS_TO_LONGS(bits); in __bitmap_complement() local
76 for (k = 0; k < lim; ++k) in __bitmap_complement()
95 unsigned k, lim = BITS_TO_LONGS(nbits); in __bitmap_shift_right() local
98 for (k = 0; off + k < lim; ++k) { in __bitmap_shift_right()
105 if (!rem || off + k + 1 >= lim) in __bitmap_shift_right()
109 if (off + k + 1 == lim - 1) in __bitmap_shift_right()
[all …]
/linux-6.12.1/tools/lib/
Dbitmap.c10 unsigned int k, w = 0, lim = bits/BITS_PER_LONG; in __bitmap_weight() local
12 for (k = 0; k < lim; k++) in __bitmap_weight()
64 unsigned int lim = bits/BITS_PER_LONG; in __bitmap_and() local
67 for (k = 0; k < lim; k++) in __bitmap_and()
78 unsigned int k, lim = bits/BITS_PER_LONG; in __bitmap_equal() local
79 for (k = 0; k < lim; ++k) in __bitmap_equal()
93 unsigned int k, lim = bits/BITS_PER_LONG; in __bitmap_intersects() local
94 for (k = 0; k < lim; ++k) in __bitmap_intersects()
/linux-6.12.1/drivers/block/
Dvirtio_blk.c724 struct queue_limits *lim) in virtblk_read_zoned_limits() argument
731 lim->features |= BLK_FEAT_ZONED; in virtblk_read_zoned_limits()
735 lim->max_open_zones = v; in virtblk_read_zoned_limits()
740 lim->max_active_zones = v; in virtblk_read_zoned_limits()
749 lim->physical_block_size = wg; in virtblk_read_zoned_limits()
750 lim->io_min = wg; in virtblk_read_zoned_limits()
766 lim->chunk_sectors = vblk->zone_sectors; in virtblk_read_zoned_limits()
772 lim->max_hw_discard_sectors = 0; in virtblk_read_zoned_limits()
787 lim->max_zone_append_sectors = v; in virtblk_read_zoned_limits()
799 struct queue_limits *lim) in virtblk_read_zoned_limits() argument
[all …]
Dloop.c304 struct queue_limits lim = queue_limits_start_update(lo->lo_queue); in loop_clear_limits() local
307 lim.max_write_zeroes_sectors = 0; in loop_clear_limits()
310 lim.max_hw_discard_sectors = 0; in loop_clear_limits()
311 lim.discard_granularity = 0; in loop_clear_limits()
314 queue_limits_commit_update(lo->lo_queue, &lim); in loop_clear_limits()
774 struct queue_limits *lim) in loop_config_discard() argument
804 lim->max_hw_discard_sectors = max_discard_sectors; in loop_config_discard()
805 lim->max_write_zeroes_sectors = max_discard_sectors; in loop_config_discard()
807 lim->discard_granularity = granularity; in loop_config_discard()
809 lim->discard_granularity = 0; in loop_config_discard()
[all …]
/linux-6.12.1/tools/include/nolibc/
Dstdlib.h244 unsigned long lim; in utoa_r() local
250 for (dig = 0, lim = 1; dig < pos; dig++) in utoa_r()
251 lim *= 10; in utoa_r()
253 if (digits || in >= lim || !pos) { in utoa_r()
254 for (dig = 0; in >= lim; dig++) in utoa_r()
255 in -= lim; in utoa_r()
380 unsigned long long lim; in u64toa_r() local
386 for (dig = 0, lim = 1; dig < pos; dig++) in u64toa_r()
387 lim *= 10; in u64toa_r()
389 if (digits || in >= lim || !pos) { in u64toa_r()
[all …]
/linux-6.12.1/arch/um/os-Linux/
Dmain.c29 struct rlimit lim; in set_stklim() local
31 if (getrlimit(RLIMIT_STACK, &lim) < 0) { in set_stklim()
35 if ((lim.rlim_cur == RLIM_INFINITY) || (lim.rlim_cur > STACKSIZE)) { in set_stklim()
36 lim.rlim_cur = STACKSIZE; in set_stklim()
37 if (setrlimit(RLIMIT_STACK, &lim) < 0) { in set_stklim()
Dstart_up.c229 struct rlimit lim; in check_coredump_limit() local
230 int err = getrlimit(RLIMIT_CORE, &lim); in check_coredump_limit()
238 if (lim.rlim_cur == RLIM_INFINITY) in check_coredump_limit()
241 os_info("%llu\n", (unsigned long long)lim.rlim_cur); in check_coredump_limit()
244 if (lim.rlim_max == RLIM_INFINITY) in check_coredump_limit()
247 os_info("%llu\n", (unsigned long long)lim.rlim_max); in check_coredump_limit()
/linux-6.12.1/drivers/usb/storage/
Dscsiglue.c91 static int device_configure(struct scsi_device *sdev, struct queue_limits *lim) in device_configure() argument
106 lim->max_hw_sectors = min(lim->max_hw_sectors, max_sectors); in device_configure()
113 lim->max_hw_sectors = 0x7FFFFF; in device_configure()
119 lim->max_hw_sectors = 2048; in device_configure()
126 lim->max_hw_sectors = min_t(size_t, in device_configure()
127 lim->max_hw_sectors, dma_max_mapping_size(dev) >> SECTOR_SHIFT); in device_configure()
588 struct queue_limits lim; in max_sectors_store() local
596 lim = queue_limits_start_update(sdev->request_queue); in max_sectors_store()
597 lim.max_hw_sectors = ms; in max_sectors_store()
598 ret = queue_limits_commit_update(sdev->request_queue, &lim); in max_sectors_store()
/linux-6.12.1/drivers/pps/generators/
Dpps_gen_parport.c60 long lim, delta; in hrtimer_event() local
78 lim = NSEC_PER_SEC - send_delay - dev->port_write_time; in hrtimer_event()
81 if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) { in hrtimer_event()
91 } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); in hrtimer_event()
98 lim = NSEC_PER_SEC - dev->port_write_time; in hrtimer_event()
101 } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); in hrtimer_event()
/linux-6.12.1/rust/kernel/block/mq/
Dgen_disk.rs97 let mut lim: bindings::queue_limits = unsafe { core::mem::zeroed() }; in build() localVariable
99 lim.logical_block_size = self.logical_block_size; in build()
100 lim.physical_block_size = self.physical_block_size; in build()
102 lim.features = bindings::BLK_FEAT_ROTATIONAL; in build()
109 &mut lim, in build()
/linux-6.12.1/drivers/edac/
Damd64_edac.h136 #define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
137 #define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7))
289 struct reg_pair lim; member
422 u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff; in get_dram_limit() local
425 return lim; in get_dram_limit()
427 return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim; in get_dram_limit()
/linux-6.12.1/drivers/scsi/
Dsd.c105 static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
108 struct queue_limits *lim);
125 struct queue_limits *lim) in sd_set_flush_flag() argument
128 lim->features |= BLK_FEAT_WRITE_CACHE; in sd_set_flush_flag()
130 lim->features |= BLK_FEAT_FUA; in sd_set_flush_flag()
132 lim->features &= ~BLK_FEAT_FUA; in sd_set_flush_flag()
134 lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); in sd_set_flush_flag()
173 struct queue_limits lim; in cache_type_store() local
178 lim = queue_limits_start_update(sdkp->disk->queue); in cache_type_store()
179 sd_set_flush_flag(sdkp, &lim); in cache_type_store()
[all …]
Dsun3x_esp.c87 int lim; in sun3x_esp_dma_drain() local
95 lim = 1000; in sun3x_esp_dma_drain()
97 if (--lim == 0) { in sun3x_esp_dma_drain()
109 int lim; in sun3x_esp_dma_invalidate() local
111 lim = 1000; in sun3x_esp_dma_invalidate()
113 if (--lim == 0) { in sun3x_esp_dma_invalidate()
Dsun_esp.c225 int can_do_sbus64, lim; in sbus_esp_reset_dma() local
265 lim = 1000; in sbus_esp_reset_dma()
267 if (--lim == 0) { in sbus_esp_reset_dma()
324 int lim; in sbus_esp_dma_drain() local
336 lim = 1000; in sbus_esp_dma_drain()
338 if (--lim == 0) { in sbus_esp_dma_drain()
366 int lim; in sbus_esp_dma_invalidate() local
368 lim = 1000; in sbus_esp_dma_invalidate()
370 if (--lim == 0) { in sbus_esp_dma_invalidate()
Dsd_zbc.c591 int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, in sd_zbc_read_zones() argument
601 lim->features |= BLK_FEAT_ZONED; in sd_zbc_read_zones()
608 lim->zone_write_granularity = sdkp->physical_block_size; in sd_zbc_read_zones()
631 lim->max_open_zones = 0; in sd_zbc_read_zones()
633 lim->max_open_zones = sdkp->zones_max_open; in sd_zbc_read_zones()
634 lim->max_active_zones = 0; in sd_zbc_read_zones()
635 lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks); in sd_zbc_read_zones()
637 lim->max_zone_append_sectors = 0; in sd_zbc_read_zones()
/linux-6.12.1/drivers/regulator/
Dqcom-labibb-regulator.c310 static int qcom_labibb_set_ocp(struct regulator_dev *rdev, int lim, in qcom_labibb_set_ocp() argument
322 if (lim || severity != REGULATOR_SEVERITY_PROT || !enable) in qcom_labibb_set_ocp()
565 struct labibb_current_limits *lim = &vreg->uA_limits; in qcom_labibb_set_current_limit() local
569 if (min_uA < lim->uA_min || max_uA < lim->uA_min) in qcom_labibb_set_current_limit()
573 int uA_limit = (lim->uA_step * i) + lim->uA_min; in qcom_labibb_set_current_limit()
587 mask = desc->csel_mask | lim->ovr_val; in qcom_labibb_set_current_limit()
589 val = (u32)sel | lim->ovr_val; in qcom_labibb_set_current_limit()
599 struct labibb_current_limits *lim = &vreg->uA_limits; in qcom_labibb_get_current_limit() local
608 return (cur_step * lim->uA_step) + lim->uA_min; in qcom_labibb_get_current_limit()
/linux-6.12.1/drivers/nvme/host/
Dzns.c108 void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, in nvme_update_zone_info() argument
111 lim->features |= BLK_FEAT_ZONED; in nvme_update_zone_info()
112 lim->max_open_zones = zi->max_open_zones; in nvme_update_zone_info()
113 lim->max_active_zones = zi->max_active_zones; in nvme_update_zone_info()
114 lim->max_zone_append_sectors = ns->ctrl->max_zone_append; in nvme_update_zone_info()
115 lim->chunk_sectors = ns->head->zsze = in nvme_update_zone_info()
Dcore.c1778 struct queue_limits *lim, struct nvme_ns_info *info) in nvme_init_integrity() argument
1780 struct blk_integrity *bi = &lim->integrity; in nvme_init_integrity()
1840 static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim) in nvme_config_discard() argument
1845 lim->max_hw_discard_sectors = in nvme_config_discard()
1848 lim->max_hw_discard_sectors = UINT_MAX; in nvme_config_discard()
1850 lim->max_hw_discard_sectors = 0; in nvme_config_discard()
1852 lim->discard_granularity = lim->logical_block_size; in nvme_config_discard()
1855 lim->max_discard_segments = ctrl->dmrl; in nvme_config_discard()
1857 lim->max_discard_segments = NVME_DSM_MAX_RANGES; in nvme_config_discard()
1985 struct nvme_id_ns *id, struct queue_limits *lim, in nvme_update_atomic_write_disk_info() argument
[all …]

1234567