Lines Matching full:limits
235 struct queue_limits *limits = data; in device_area_is_invalid() local
239 limits->logical_block_size >> SECTOR_SHIFT; in device_area_is_invalid()
293 limits->logical_block_size, bdev); in device_area_is_invalid()
301 limits->logical_block_size, bdev); in device_area_is_invalid()
424 struct queue_limits *limits = data; in dm_set_device_limits() local
429 DMWARN("%s: Cannot set limits for nonexistent device %pg", in dm_set_device_limits()
434 if (blk_stack_limits(limits, &q->limits, in dm_set_device_limits()
440 q->limits.physical_block_size, in dm_set_device_limits()
441 q->limits.logical_block_size, in dm_set_device_limits()
442 q->limits.alignment_offset, in dm_set_device_limits()
450 queue_limits_stack_integrity_bdev(limits, bdev); in dm_set_device_limits()
598 static void dm_set_stacking_limits(struct queue_limits *limits) in dm_set_stacking_limits() argument
600 blk_set_stacking_limits(limits); in dm_set_stacking_limits()
601 limits->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL; in dm_set_stacking_limits()
612 struct queue_limits *limits) in validate_hardware_logical_block_alignment() argument
619 limits->logical_block_size >> SECTOR_SHIFT; in validate_hardware_logical_block_alignment()
644 /* combine all target devices' limits */ in validate_hardware_logical_block_alignment()
671 limits->logical_block_size); in validate_hardware_logical_block_alignment()
1070 if (md->queue->limits.features & BLK_FEAT_POLL) in dm_table_alloc_md_mempools()
1596 struct queue_limits *limits) in dm_calculate_queue_limits() argument
1602 dm_set_stacking_limits(limits); in dm_calculate_queue_limits()
1618 /* Set I/O hints portion of queue limits */ in dm_calculate_queue_limits()
1625 * Combine queue limits of all the devices this target uses. in dm_calculate_queue_limits()
1632 * After stacking all limits, validate all devices in dm_calculate_queue_limits()
1639 /* Set I/O hints portion of queue limits */ in dm_calculate_queue_limits()
1645 * overall queue limits. in dm_calculate_queue_limits()
1653 * Merge this target's queue limits into the overall limits in dm_calculate_queue_limits()
1656 if (blk_stack_limits(limits, &ti_limits, 0) < 0) in dm_calculate_queue_limits()
1665 if (!queue_limits_stack_integrity(limits, &ti_limits)) { in dm_calculate_queue_limits()
1683 if (limits->features & BLK_FEAT_ZONED) { in dm_calculate_queue_limits()
1685 * ...IF the above limits stacking determined a zoned model in dm_calculate_queue_limits()
1688 zoned = limits->features & BLK_FEAT_ZONED; in dm_calculate_queue_limits()
1689 zone_sectors = limits->chunk_sectors; in dm_calculate_queue_limits()
1694 return validate_hardware_logical_block_alignment(t, limits); in dm_calculate_queue_limits()
1732 return !q->limits.max_write_zeroes_sectors; in device_not_write_zeroes_capable()
1815 struct queue_limits *limits) in dm_table_set_restrictions() argument
1820 limits->features &= ~BLK_FEAT_NOWAIT; in dm_table_set_restrictions()
1827 limits->features &= ~BLK_FEAT_POLL; in dm_table_set_restrictions()
1830 limits->max_hw_discard_sectors = 0; in dm_table_set_restrictions()
1831 limits->discard_granularity = 0; in dm_table_set_restrictions()
1832 limits->discard_alignment = 0; in dm_table_set_restrictions()
1836 limits->max_write_zeroes_sectors = 0; in dm_table_set_restrictions()
1839 limits->max_secure_erase_sectors = 0; in dm_table_set_restrictions()
1842 limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; in dm_table_set_restrictions()
1845 limits->features |= BLK_FEAT_DAX; in dm_table_set_restrictions()
1849 limits->features &= ~BLK_FEAT_DAX; in dm_table_set_restrictions()
1856 (limits->features & BLK_FEAT_ZONED)) { in dm_table_set_restrictions()
1857 r = dm_set_zones_restrictions(t, q, limits); in dm_table_set_restrictions()
1862 r = queue_limits_set(q, limits); in dm_table_set_restrictions()
1867 * Now that the limits are set, check the zones mapped by the table in dm_table_set_restrictions()
1871 (limits->features & BLK_FEAT_ZONED)) { in dm_table_set_restrictions()