Lines Matching +full:a +full:- +full:z

1 // SPDX-License-Identifier: GPL-2.0
32 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_read_iomap_begin() local
33 struct super_block *sb = inode->i_sb; in zonefs_read_iomap_begin()
38 * act as if there is a hole up to the file maximum size. in zonefs_read_iomap_begin()
40 mutex_lock(&zi->i_truncate_mutex); in zonefs_read_iomap_begin()
41 iomap->bdev = inode->i_sb->s_bdev; in zonefs_read_iomap_begin()
42 iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); in zonefs_read_iomap_begin()
44 if (iomap->offset >= isize) { in zonefs_read_iomap_begin()
45 iomap->type = IOMAP_HOLE; in zonefs_read_iomap_begin()
46 iomap->addr = IOMAP_NULL_ADDR; in zonefs_read_iomap_begin()
47 iomap->length = length; in zonefs_read_iomap_begin()
49 iomap->type = IOMAP_MAPPED; in zonefs_read_iomap_begin()
50 iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset; in zonefs_read_iomap_begin()
51 iomap->length = isize - iomap->offset; in zonefs_read_iomap_begin()
53 mutex_unlock(&zi->i_truncate_mutex); in zonefs_read_iomap_begin()
69 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_write_iomap_begin() local
70 struct super_block *sb = inode->i_sb; in zonefs_write_iomap_begin()
74 if (WARN_ON_ONCE(offset + length > z->z_capacity)) in zonefs_write_iomap_begin()
75 return -EIO; in zonefs_write_iomap_begin()
79 * checked when writes are issued, so warn if we see a page writeback in zonefs_write_iomap_begin()
82 if (WARN_ON_ONCE(zonefs_zone_is_seq(z) && !(flags & IOMAP_DIRECT))) in zonefs_write_iomap_begin()
83 return -EIO; in zonefs_write_iomap_begin()
90 mutex_lock(&zi->i_truncate_mutex); in zonefs_write_iomap_begin()
91 iomap->bdev = inode->i_sb->s_bdev; in zonefs_write_iomap_begin()
92 iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); in zonefs_write_iomap_begin()
93 iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset; in zonefs_write_iomap_begin()
95 if (iomap->offset >= isize) { in zonefs_write_iomap_begin()
96 iomap->type = IOMAP_UNWRITTEN; in zonefs_write_iomap_begin()
97 iomap->length = z->z_capacity - iomap->offset; in zonefs_write_iomap_begin()
99 iomap->type = IOMAP_MAPPED; in zonefs_write_iomap_begin()
100 iomap->length = isize - iomap->offset; in zonefs_write_iomap_begin()
102 mutex_unlock(&zi->i_truncate_mutex); in zonefs_write_iomap_begin()
131 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_write_map_blocks() local
133 if (WARN_ON_ONCE(zonefs_zone_is_seq(z))) in zonefs_write_map_blocks()
134 return -EIO; in zonefs_write_map_blocks()
136 return -EIO; in zonefs_write_map_blocks()
139 if (offset >= wpc->iomap.offset && in zonefs_write_map_blocks()
140 offset < wpc->iomap.offset + wpc->iomap.length) in zonefs_write_map_blocks()
144 z->z_capacity - offset, in zonefs_write_map_blocks()
145 IOMAP_WRITE, &wpc->iomap, NULL); in zonefs_write_map_blocks()
166 zonefs_err(inode->i_sb, in zonefs_swap_activate()
167 "swap file: not a conventional zone file\n"); in zonefs_swap_activate()
168 return -EINVAL; in zonefs_swap_activate()
191 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_file_truncate() local
198 * only down to a 0 size, which is equivalent to a zone reset, and to in zonefs_file_truncate()
199 * the maximum file size, which is equivalent to a zone finish. in zonefs_file_truncate()
201 if (!zonefs_zone_is_seq(z)) in zonefs_file_truncate()
202 return -EPERM; in zonefs_file_truncate()
206 else if (isize == z->z_capacity) in zonefs_file_truncate()
209 return -EPERM; in zonefs_file_truncate()
214 filemap_invalidate_lock(inode->i_mapping); in zonefs_file_truncate()
217 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_truncate()
231 if (z->z_flags & ZONEFS_ZONE_OPEN) { in zonefs_file_truncate()
233 * Truncating a zone to EMPTY or FULL is the equivalent of in zonefs_file_truncate()
234 * closing the zone. For a truncation to 0, we need to in zonefs_file_truncate()
235 * re-open the zone to ensure new writes can be processed. in zonefs_file_truncate()
236 * For a truncation to the maximum file size, the zone is in zonefs_file_truncate()
243 z->z_flags &= ~ZONEFS_ZONE_OPEN; in zonefs_file_truncate()
248 z->z_wpoffset = isize; in zonefs_file_truncate()
252 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_truncate()
253 filemap_invalidate_unlock(inode->i_mapping); in zonefs_file_truncate()
265 return -EPERM; in zonefs_file_fsync()
274 ret = blkdev_issue_flush(inode->i_sb->s_bdev); in zonefs_file_fsync()
284 struct inode *inode = file_inode(vmf->vma->vm_file); in zonefs_filemap_page_mkwrite()
297 sb_start_pagefault(inode->i_sb); in zonefs_filemap_page_mkwrite()
298 file_update_time(vmf->vma->vm_file); in zonefs_filemap_page_mkwrite()
301 filemap_invalidate_lock_shared(inode->i_mapping); in zonefs_filemap_page_mkwrite()
303 filemap_invalidate_unlock_shared(inode->i_mapping); in zonefs_filemap_page_mkwrite()
305 sb_end_pagefault(inode->i_sb); in zonefs_filemap_page_mkwrite()
324 (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in zonefs_file_mmap()
325 return -EINVAL; in zonefs_file_mmap()
328 vma->vm_ops = &zonefs_file_vm_ops; in zonefs_file_mmap()
348 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_write_dio_end_io()
364 * but that is not a problem since a write completed in zonefs_file_write_dio_end_io()
369 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_write_dio_end_io()
370 if (i_size_read(inode) < iocb->ki_pos + size) { in zonefs_file_write_dio_end_io()
371 zonefs_update_stats(inode, iocb->ki_pos + size); in zonefs_file_write_dio_end_io()
372 zonefs_i_size_write(inode, iocb->ki_pos + size); in zonefs_file_write_dio_end_io()
374 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_write_dio_end_io()
386 * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
392 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_write_check_limits() local
394 loff_t max_size = z->z_capacity; in zonefs_write_check_limits()
399 return -EFBIG; in zonefs_write_check_limits()
401 count = min(count, limit - pos); in zonefs_write_check_limits()
404 if (!(file->f_flags & O_LARGEFILE)) in zonefs_write_check_limits()
408 return -EFBIG; in zonefs_write_check_limits()
410 return min(count, max_size - pos); in zonefs_write_check_limits()
415 struct file *file = iocb->ki_filp; in zonefs_write_checks()
418 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_write_checks() local
422 return -ETXTBSY; in zonefs_write_checks()
427 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) in zonefs_write_checks()
428 return -EINVAL; in zonefs_write_checks()
430 if (iocb->ki_flags & IOCB_APPEND) { in zonefs_write_checks()
431 if (zonefs_zone_is_cnv(z)) in zonefs_write_checks()
432 return -EINVAL; in zonefs_write_checks()
433 mutex_lock(&zi->i_truncate_mutex); in zonefs_write_checks()
434 iocb->ki_pos = z->z_wpoffset; in zonefs_write_checks()
435 mutex_unlock(&zi->i_truncate_mutex); in zonefs_write_checks()
438 count = zonefs_write_check_limits(file, iocb->ki_pos, in zonefs_write_checks()
452 * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
453 * elevator feature is being used (e.g. mq-deadline). The block layer always
459 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_dio_write()
461 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_file_dio_write() local
462 struct super_block *sb = inode->i_sb; in zonefs_file_dio_write()
470 if (zonefs_zone_is_seq(z) && !is_sync_kiocb(iocb) && in zonefs_file_dio_write()
471 (iocb->ki_flags & IOCB_NOWAIT)) in zonefs_file_dio_write()
472 return -EOPNOTSUPP; in zonefs_file_dio_write()
474 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_dio_write()
476 return -EAGAIN; in zonefs_file_dio_write()
487 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) { in zonefs_file_dio_write()
488 ret = -EINVAL; in zonefs_file_dio_write()
493 if (zonefs_zone_is_seq(z)) { in zonefs_file_dio_write()
494 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
495 if (iocb->ki_pos != z->z_wpoffset) { in zonefs_file_dio_write()
496 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
497 ret = -EINVAL; in zonefs_file_dio_write()
506 z->z_wpoffset += count; in zonefs_file_dio_write()
508 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
518 if (ret == -ENOTBLK) in zonefs_file_dio_write()
519 ret = -EBUSY; in zonefs_file_dio_write()
522 * For a failed IO or partial completion, trigger error recovery in zonefs_file_dio_write()
523 * to update the zone write pointer offset to a correct value. in zonefs_file_dio_write()
529 if (zonefs_zone_is_seq(z)) { in zonefs_file_dio_write()
531 ret = -EIO; in zonefs_file_dio_write()
532 if (ret < 0 && ret != -EIOCBQUEUED) in zonefs_file_dio_write()
545 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_buffered_write()
553 return -EIO; in zonefs_file_buffered_write()
555 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_buffered_write()
557 return -EAGAIN; in zonefs_file_buffered_write()
567 if (ret == -EIO) in zonefs_file_buffered_write()
580 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_write_iter()
581 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_file_write_iter() local
584 return -EPERM; in zonefs_file_write_iter()
586 if (sb_rdonly(inode->i_sb)) in zonefs_file_write_iter()
587 return -EROFS; in zonefs_file_write_iter()
590 if (iocb->ki_pos >= z->z_capacity) in zonefs_file_write_iter()
591 return -EFBIG; in zonefs_file_write_iter()
593 if (iocb->ki_flags & IOCB_DIRECT) { in zonefs_file_write_iter()
596 if (ret != -ENOTBLK) in zonefs_file_write_iter()
607 zonefs_io_error(file_inode(iocb->ki_filp), false); in zonefs_file_read_dio_end_io()
620 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_read_iter()
622 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_file_read_iter() local
623 struct super_block *sb = inode->i_sb; in zonefs_file_read_iter()
628 if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777))) in zonefs_file_read_iter()
629 return -EPERM; in zonefs_file_read_iter()
631 if (iocb->ki_pos >= z->z_capacity) in zonefs_file_read_iter()
634 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_read_iter()
636 return -EAGAIN; in zonefs_file_read_iter()
642 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
644 if (iocb->ki_pos >= isize) { in zonefs_file_read_iter()
645 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
649 iov_iter_truncate(to, isize - iocb->ki_pos); in zonefs_file_read_iter()
650 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
652 if (iocb->ki_flags & IOCB_DIRECT) { in zonefs_file_read_iter()
655 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) { in zonefs_file_read_iter()
656 ret = -EINVAL; in zonefs_file_read_iter()
659 file_accessed(iocb->ki_filp); in zonefs_file_read_iter()
664 if (ret == -EIO) in zonefs_file_read_iter()
680 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_file_splice_read() local
685 if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777))) in zonefs_file_splice_read()
686 return -EPERM; in zonefs_file_splice_read()
688 if (*ppos >= z->z_capacity) in zonefs_file_splice_read()
694 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_splice_read()
699 len = min_t(loff_t, len, isize - *ppos); in zonefs_file_splice_read()
700 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_splice_read()
704 if (ret == -EIO) in zonefs_file_splice_read()
721 if (!(file->f_mode & FMODE_WRITE)) in zonefs_seq_file_need_wro()
730 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_seq_file_write_open() local
733 mutex_lock(&zi->i_truncate_mutex); in zonefs_seq_file_write_open()
735 if (!zi->i_wr_refcnt) { in zonefs_seq_file_write_open()
736 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb); in zonefs_seq_file_write_open()
737 unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
739 if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { in zonefs_seq_file_write_open()
741 if (sbi->s_max_wro_seq_files in zonefs_seq_file_write_open()
742 && wro > sbi->s_max_wro_seq_files) { in zonefs_seq_file_write_open()
743 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
744 ret = -EBUSY; in zonefs_seq_file_write_open()
748 if (i_size_read(inode) < z->z_capacity) { in zonefs_seq_file_write_open()
752 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
755 z->z_flags |= ZONEFS_ZONE_OPEN; in zonefs_seq_file_write_open()
761 zi->i_wr_refcnt++; in zonefs_seq_file_write_open()
764 mutex_unlock(&zi->i_truncate_mutex); in zonefs_seq_file_write_open()
773 file->f_mode |= FMODE_CAN_ODIRECT; in zonefs_file_open()
787 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_seq_file_write_close() local
788 struct super_block *sb = inode->i_sb; in zonefs_seq_file_write_close()
792 mutex_lock(&zi->i_truncate_mutex); in zonefs_seq_file_write_close()
794 zi->i_wr_refcnt--; in zonefs_seq_file_write_close()
795 if (zi->i_wr_refcnt) in zonefs_seq_file_write_close()
803 if (z->z_flags & ZONEFS_ZONE_OPEN) { in zonefs_seq_file_write_close()
808 * Leaving zones explicitly open may lead to a state in zonefs_seq_file_write_close()
811 * read-only. in zonefs_seq_file_write_close()
813 if (z->z_flags & ZONEFS_ZONE_OPEN && in zonefs_seq_file_write_close()
814 !(sb->s_flags & SB_RDONLY)) { in zonefs_seq_file_write_close()
817 z->z_sector, ret); in zonefs_seq_file_write_close()
819 "remounting filesystem read-only\n"); in zonefs_seq_file_write_close()
820 sb->s_flags |= SB_RDONLY; in zonefs_seq_file_write_close()
825 z->z_flags &= ~ZONEFS_ZONE_OPEN; in zonefs_seq_file_write_close()
829 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_close()
832 mutex_unlock(&zi->i_truncate_mutex); in zonefs_seq_file_write_close()
838 * If we explicitly open a zone we must close it again as well, but the in zonefs_file_release()
840 * the zone has gone offline or read-only). Make sure we don't fail the in zonefs_file_release()
841 * close(2) for user-space. in zonefs_file_release()