1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 * Regular file handling primitives for NTFS-based filesystems.
7 *
8 */
9
10 #include <linux/backing-dev.h>
11 #include <linux/blkdev.h>
12 #include <linux/buffer_head.h>
13 #include <linux/compat.h>
14 #include <linux/falloc.h>
15 #include <linux/fiemap.h>
16 #include <linux/fileattr.h>
17
18 #include "debug.h"
19 #include "ntfs.h"
20 #include "ntfs_fs.h"
21
ntfs_ioctl_fitrim(struct ntfs_sb_info * sbi,unsigned long arg)22 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
23 {
24 struct fstrim_range __user *user_range;
25 struct fstrim_range range;
26 struct block_device *dev;
27 int err;
28
29 if (!capable(CAP_SYS_ADMIN))
30 return -EPERM;
31
32 dev = sbi->sb->s_bdev;
33 if (!bdev_max_discard_sectors(dev))
34 return -EOPNOTSUPP;
35
36 user_range = (struct fstrim_range __user *)arg;
37 if (copy_from_user(&range, user_range, sizeof(range)))
38 return -EFAULT;
39
40 range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev));
41
42 err = ntfs_trim_fs(sbi, &range);
43 if (err < 0)
44 return err;
45
46 if (copy_to_user(user_range, &range, sizeof(range)))
47 return -EFAULT;
48
49 return 0;
50 }
51
52 /*
53 * ntfs_fileattr_get - inode_operations::fileattr_get
54 */
ntfs_fileattr_get(struct dentry * dentry,struct fileattr * fa)55 int ntfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
56 {
57 struct inode *inode = d_inode(dentry);
58 struct ntfs_inode *ni = ntfs_i(inode);
59 u32 flags = 0;
60
61 if (inode->i_flags & S_IMMUTABLE)
62 flags |= FS_IMMUTABLE_FL;
63
64 if (inode->i_flags & S_APPEND)
65 flags |= FS_APPEND_FL;
66
67 if (is_compressed(ni))
68 flags |= FS_COMPR_FL;
69
70 if (is_encrypted(ni))
71 flags |= FS_ENCRYPT_FL;
72
73 fileattr_fill_flags(fa, flags);
74
75 return 0;
76 }
77
78 /*
79 * ntfs_fileattr_set - inode_operations::fileattr_set
80 */
ntfs_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct fileattr * fa)81 int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
82 struct fileattr *fa)
83 {
84 struct inode *inode = d_inode(dentry);
85 struct ntfs_inode *ni = ntfs_i(inode);
86 u32 flags = fa->flags;
87 unsigned int new_fl = 0;
88
89 if (fileattr_has_fsx(fa))
90 return -EOPNOTSUPP;
91
92 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_COMPR_FL))
93 return -EOPNOTSUPP;
94
95 if (flags & FS_IMMUTABLE_FL)
96 new_fl |= S_IMMUTABLE;
97
98 if (flags & FS_APPEND_FL)
99 new_fl |= S_APPEND;
100
101 /* Allowed to change compression for empty files and for directories only. */
102 if (!is_dedup(ni) && !is_encrypted(ni) &&
103 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
104 /* Change compress state. */
105 int err = ni_set_compress(inode, flags & FS_COMPR_FL);
106 if (err)
107 return err;
108 }
109
110 inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND);
111
112 inode_set_ctime_current(inode);
113 mark_inode_dirty(inode);
114
115 return 0;
116 }
117
118 /*
119 * ntfs_ioctl - file_operations::unlocked_ioctl
120 */
ntfs_ioctl(struct file * filp,u32 cmd,unsigned long arg)121 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
122 {
123 struct inode *inode = file_inode(filp);
124 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
125
126 switch (cmd) {
127 case FITRIM:
128 return ntfs_ioctl_fitrim(sbi, arg);
129 }
130 return -ENOTTY; /* Inappropriate ioctl for device. */
131 }
132
133 #ifdef CONFIG_COMPAT
ntfs_compat_ioctl(struct file * filp,u32 cmd,unsigned long arg)134 long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
135
136 {
137 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
138 }
139 #endif
140
141 /*
142 * ntfs_getattr - inode_operations::getattr
143 */
ntfs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,u32 flags)144 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
145 struct kstat *stat, u32 request_mask, u32 flags)
146 {
147 struct inode *inode = d_inode(path->dentry);
148 struct ntfs_inode *ni = ntfs_i(inode);
149
150 stat->result_mask |= STATX_BTIME;
151 stat->btime = ni->i_crtime;
152 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
153
154 if (inode->i_flags & S_IMMUTABLE)
155 stat->attributes |= STATX_ATTR_IMMUTABLE;
156
157 if (inode->i_flags & S_APPEND)
158 stat->attributes |= STATX_ATTR_APPEND;
159
160 if (is_compressed(ni))
161 stat->attributes |= STATX_ATTR_COMPRESSED;
162
163 if (is_encrypted(ni))
164 stat->attributes |= STATX_ATTR_ENCRYPTED;
165
166 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED |
167 STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
168
169 generic_fillattr(idmap, request_mask, inode, stat);
170
171 return 0;
172 }
173
ntfs_extend_initialized_size(struct file * file,struct ntfs_inode * ni,const loff_t valid,const loff_t new_valid)174 static int ntfs_extend_initialized_size(struct file *file,
175 struct ntfs_inode *ni,
176 const loff_t valid,
177 const loff_t new_valid)
178 {
179 struct inode *inode = &ni->vfs_inode;
180 struct address_space *mapping = inode->i_mapping;
181 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
182 loff_t pos = valid;
183 int err;
184
185 if (is_resident(ni)) {
186 ni->i_valid = new_valid;
187 return 0;
188 }
189
190 WARN_ON(is_compressed(ni));
191 WARN_ON(valid >= new_valid);
192
193 for (;;) {
194 u32 zerofrom, len;
195 struct folio *folio;
196 u8 bits;
197 CLST vcn, lcn, clen;
198
199 if (is_sparsed(ni)) {
200 bits = sbi->cluster_bits;
201 vcn = pos >> bits;
202
203 err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL,
204 false);
205 if (err)
206 goto out;
207
208 if (lcn == SPARSE_LCN) {
209 pos = ((loff_t)clen + vcn) << bits;
210 ni->i_valid = pos;
211 goto next;
212 }
213 }
214
215 zerofrom = pos & (PAGE_SIZE - 1);
216 len = PAGE_SIZE - zerofrom;
217
218 if (pos + len > new_valid)
219 len = new_valid - pos;
220
221 err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL);
222 if (err)
223 goto out;
224
225 folio_zero_range(folio, zerofrom, folio_size(folio));
226
227 err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL);
228 if (err < 0)
229 goto out;
230 pos += len;
231
232 next:
233 if (pos >= new_valid)
234 break;
235
236 balance_dirty_pages_ratelimited(mapping);
237 cond_resched();
238 }
239
240 return 0;
241
242 out:
243 ni->i_valid = valid;
244 ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
245 new_valid);
246 return err;
247 }
248
249 /*
250 * ntfs_zero_range - Helper function for punch_hole.
251 *
252 * It zeroes a range [vbo, vbo_to).
253 */
ntfs_zero_range(struct inode * inode,u64 vbo,u64 vbo_to)254 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
255 {
256 int err = 0;
257 struct address_space *mapping = inode->i_mapping;
258 u32 blocksize = i_blocksize(inode);
259 pgoff_t idx = vbo >> PAGE_SHIFT;
260 u32 from = vbo & (PAGE_SIZE - 1);
261 pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
262 loff_t page_off;
263 struct buffer_head *head, *bh;
264 u32 bh_next, bh_off, to;
265 sector_t iblock;
266 struct folio *folio;
267 bool dirty = false;
268
269 for (; idx < idx_end; idx += 1, from = 0) {
270 page_off = (loff_t)idx << PAGE_SHIFT;
271 to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) :
272 PAGE_SIZE;
273 iblock = page_off >> inode->i_blkbits;
274
275 folio = __filemap_get_folio(
276 mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
277 mapping_gfp_constraint(mapping, ~__GFP_FS));
278 if (IS_ERR(folio))
279 return PTR_ERR(folio);
280
281 head = folio_buffers(folio);
282 if (!head)
283 head = create_empty_buffers(folio, blocksize, 0);
284
285 bh = head;
286 bh_off = 0;
287 do {
288 bh_next = bh_off + blocksize;
289
290 if (bh_next <= from || bh_off >= to)
291 continue;
292
293 if (!buffer_mapped(bh)) {
294 ntfs_get_block(inode, iblock, bh, 0);
295 /* Unmapped? It's a hole - nothing to do. */
296 if (!buffer_mapped(bh))
297 continue;
298 }
299
300 /* Ok, it's mapped. Make sure it's up-to-date. */
301 if (folio_test_uptodate(folio))
302 set_buffer_uptodate(bh);
303 else if (bh_read(bh, 0) < 0) {
304 err = -EIO;
305 folio_unlock(folio);
306 folio_put(folio);
307 goto out;
308 }
309
310 mark_buffer_dirty(bh);
311 } while (bh_off = bh_next, iblock += 1,
312 head != (bh = bh->b_this_page));
313
314 folio_zero_segment(folio, from, to);
315 dirty = true;
316
317 folio_unlock(folio);
318 folio_put(folio);
319 cond_resched();
320 }
321 out:
322 if (dirty)
323 mark_inode_dirty(inode);
324 return err;
325 }
326
327 /*
328 * ntfs_file_mmap - file_operations::mmap
329 */
ntfs_file_mmap(struct file * file,struct vm_area_struct * vma)330 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
331 {
332 struct inode *inode = file_inode(file);
333 struct ntfs_inode *ni = ntfs_i(inode);
334 u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
335 bool rw = vma->vm_flags & VM_WRITE;
336 int err;
337
338 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
339 return -EIO;
340
341 if (is_encrypted(ni)) {
342 ntfs_inode_warn(inode, "mmap encrypted not supported");
343 return -EOPNOTSUPP;
344 }
345
346 if (is_dedup(ni)) {
347 ntfs_inode_warn(inode, "mmap deduplicated not supported");
348 return -EOPNOTSUPP;
349 }
350
351 if (is_compressed(ni) && rw) {
352 ntfs_inode_warn(inode, "mmap(write) compressed not supported");
353 return -EOPNOTSUPP;
354 }
355
356 if (rw) {
357 u64 to = min_t(loff_t, i_size_read(inode),
358 from + vma->vm_end - vma->vm_start);
359
360 if (is_sparsed(ni)) {
361 /* Allocate clusters for rw map. */
362 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
363 CLST lcn, len;
364 CLST vcn = from >> sbi->cluster_bits;
365 CLST end = bytes_to_cluster(sbi, to);
366 bool new;
367
368 for (; vcn < end; vcn += len) {
369 err = attr_data_get_block(ni, vcn, 1, &lcn,
370 &len, &new, true);
371 if (err)
372 goto out;
373 }
374 }
375
376 if (ni->i_valid < to) {
377 inode_lock(inode);
378 err = ntfs_extend_initialized_size(file, ni,
379 ni->i_valid, to);
380 inode_unlock(inode);
381 if (err)
382 goto out;
383 }
384 }
385
386 err = generic_file_mmap(file, vma);
387 out:
388 return err;
389 }
390
ntfs_extend(struct inode * inode,loff_t pos,size_t count,struct file * file)391 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
392 struct file *file)
393 {
394 struct ntfs_inode *ni = ntfs_i(inode);
395 struct address_space *mapping = inode->i_mapping;
396 loff_t end = pos + count;
397 bool extend_init = file && pos > ni->i_valid;
398 int err;
399
400 if (end <= inode->i_size && !extend_init)
401 return 0;
402
403 /* Mark rw ntfs as dirty. It will be cleared at umount. */
404 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
405
406 if (end > inode->i_size) {
407 err = ntfs_set_size(inode, end);
408 if (err)
409 goto out;
410 }
411
412 if (extend_init && !is_compressed(ni)) {
413 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
414 if (err)
415 goto out;
416 } else {
417 err = 0;
418 }
419
420 if (file && is_sparsed(ni)) {
421 /*
422 * This code optimizes large writes to sparse file.
423 * TODO: merge this fragment with fallocate fragment.
424 */
425 struct ntfs_sb_info *sbi = ni->mi.sbi;
426 CLST vcn = pos >> sbi->cluster_bits;
427 CLST cend = bytes_to_cluster(sbi, end);
428 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
429 CLST lcn, clen;
430 bool new;
431
432 if (cend_v > cend)
433 cend_v = cend;
434
435 /*
436 * Allocate and zero new clusters.
437 * Zeroing these clusters may be too long.
438 */
439 for (; vcn < cend_v; vcn += clen) {
440 err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn,
441 &clen, &new, true);
442 if (err)
443 goto out;
444 }
445 /*
446 * Allocate but not zero new clusters.
447 */
448 for (; vcn < cend; vcn += clen) {
449 err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
450 &clen, &new, false);
451 if (err)
452 goto out;
453 }
454 }
455
456 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
457 mark_inode_dirty(inode);
458
459 if (IS_SYNC(inode)) {
460 int err2;
461
462 err = filemap_fdatawrite_range(mapping, pos, end - 1);
463 err2 = sync_mapping_buffers(mapping);
464 if (!err)
465 err = err2;
466 err2 = write_inode_now(inode, 1);
467 if (!err)
468 err = err2;
469 if (!err)
470 err = filemap_fdatawait_range(mapping, pos, end - 1);
471 }
472
473 out:
474 return err;
475 }
476
ntfs_truncate(struct inode * inode,loff_t new_size)477 static int ntfs_truncate(struct inode *inode, loff_t new_size)
478 {
479 struct super_block *sb = inode->i_sb;
480 struct ntfs_inode *ni = ntfs_i(inode);
481 int err, dirty = 0;
482 u64 new_valid;
483
484 if (!S_ISREG(inode->i_mode))
485 return 0;
486
487 if (is_compressed(ni)) {
488 if (ni->i_valid > new_size)
489 ni->i_valid = new_size;
490 } else {
491 err = block_truncate_page(inode->i_mapping, new_size,
492 ntfs_get_block);
493 if (err)
494 return err;
495 }
496
497 new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
498
499 truncate_setsize(inode, new_size);
500
501 ni_lock(ni);
502
503 down_write(&ni->file.run_lock);
504 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
505 &new_valid, ni->mi.sbi->options->prealloc, NULL);
506 up_write(&ni->file.run_lock);
507
508 if (new_valid < ni->i_valid)
509 ni->i_valid = new_valid;
510
511 ni_unlock(ni);
512
513 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
514 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
515 if (!IS_DIRSYNC(inode)) {
516 dirty = 1;
517 } else {
518 err = ntfs_sync_inode(inode);
519 if (err)
520 return err;
521 }
522
523 if (dirty)
524 mark_inode_dirty(inode);
525
526 /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
527
528 return 0;
529 }
530
531 /*
532 * ntfs_fallocate - file_operations::ntfs_fallocate
533 *
534 * Preallocate space for a file. This implements ntfs's fallocate file
535 * operation, which gets called from sys_fallocate system call. User
536 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
537 * we just allocate clusters without zeroing them out. Otherwise we
538 * allocate and zero out clusters via an expanding truncate.
539 */
ntfs_fallocate(struct file * file,int mode,loff_t vbo,loff_t len)540 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
541 {
542 struct inode *inode = file_inode(file);
543 struct address_space *mapping = inode->i_mapping;
544 struct super_block *sb = inode->i_sb;
545 struct ntfs_sb_info *sbi = sb->s_fs_info;
546 struct ntfs_inode *ni = ntfs_i(inode);
547 loff_t end = vbo + len;
548 loff_t vbo_down = round_down(vbo, max_t(unsigned long,
549 sbi->cluster_size, PAGE_SIZE));
550 bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
551 loff_t i_size, new_size;
552 bool map_locked;
553 int err;
554
555 /* No support for dir. */
556 if (!S_ISREG(inode->i_mode))
557 return -EOPNOTSUPP;
558
559 /*
560 * vfs_fallocate checks all possible combinations of mode.
561 * Do additional checks here before ntfs_set_state(dirty).
562 */
563 if (mode & FALLOC_FL_PUNCH_HOLE) {
564 if (!is_supported_holes)
565 return -EOPNOTSUPP;
566 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
567 } else if (mode & FALLOC_FL_INSERT_RANGE) {
568 if (!is_supported_holes)
569 return -EOPNOTSUPP;
570 } else if (mode &
571 ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
572 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
573 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
574 mode);
575 return -EOPNOTSUPP;
576 }
577
578 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
579
580 inode_lock(inode);
581 i_size = inode->i_size;
582 new_size = max(end, i_size);
583 map_locked = false;
584
585 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
586 /* Should never be here, see ntfs_file_open. */
587 err = -EOPNOTSUPP;
588 goto out;
589 }
590
591 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
592 FALLOC_FL_INSERT_RANGE)) {
593 inode_dio_wait(inode);
594 filemap_invalidate_lock(mapping);
595 map_locked = true;
596 }
597
598 if (mode & FALLOC_FL_PUNCH_HOLE) {
599 u32 frame_size;
600 loff_t mask, vbo_a, end_a, tmp;
601
602 err = filemap_write_and_wait_range(mapping, vbo_down,
603 LLONG_MAX);
604 if (err)
605 goto out;
606
607 truncate_pagecache(inode, vbo_down);
608
609 ni_lock(ni);
610 err = attr_punch_hole(ni, vbo, len, &frame_size);
611 ni_unlock(ni);
612 if (!err)
613 goto ok;
614
615 if (err != E_NTFS_NOTALIGNED)
616 goto out;
617
618 /* Process not aligned punch. */
619 err = 0;
620 mask = frame_size - 1;
621 vbo_a = (vbo + mask) & ~mask;
622 end_a = end & ~mask;
623
624 tmp = min(vbo_a, end);
625 if (tmp > vbo) {
626 err = ntfs_zero_range(inode, vbo, tmp);
627 if (err)
628 goto out;
629 }
630
631 if (vbo < end_a && end_a < end) {
632 err = ntfs_zero_range(inode, end_a, end);
633 if (err)
634 goto out;
635 }
636
637 /* Aligned punch_hole */
638 if (end_a > vbo_a) {
639 ni_lock(ni);
640 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
641 ni_unlock(ni);
642 if (err)
643 goto out;
644 }
645 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
646 /*
647 * Write tail of the last page before removed range since
648 * it will get removed from the page cache below.
649 */
650 err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
651 if (err)
652 goto out;
653
654 /*
655 * Write data that will be shifted to preserve them
656 * when discarding page cache below.
657 */
658 err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
659 if (err)
660 goto out;
661
662 truncate_pagecache(inode, vbo_down);
663
664 ni_lock(ni);
665 err = attr_collapse_range(ni, vbo, len);
666 ni_unlock(ni);
667 if (err)
668 goto out;
669 } else if (mode & FALLOC_FL_INSERT_RANGE) {
670 /* Check new size. */
671 err = inode_newsize_ok(inode, new_size);
672 if (err)
673 goto out;
674
675 /* Write out all dirty pages. */
676 err = filemap_write_and_wait_range(mapping, vbo_down,
677 LLONG_MAX);
678 if (err)
679 goto out;
680 truncate_pagecache(inode, vbo_down);
681
682 ni_lock(ni);
683 err = attr_insert_range(ni, vbo, len);
684 ni_unlock(ni);
685 if (err)
686 goto out;
687 } else {
688 /* Check new size. */
689 u8 cluster_bits = sbi->cluster_bits;
690
691 /* Be sure file is non resident. */
692 if (is_resident(ni)) {
693 ni_lock(ni);
694 err = attr_force_nonresident(ni);
695 ni_unlock(ni);
696 if (err)
697 goto out;
698 }
699
700 /* generic/213: expected -ENOSPC instead of -EFBIG. */
701 if (!is_supported_holes) {
702 loff_t to_alloc = new_size - inode_get_bytes(inode);
703
704 if (to_alloc > 0 &&
705 (to_alloc >> cluster_bits) >
706 wnd_zeroes(&sbi->used.bitmap)) {
707 err = -ENOSPC;
708 goto out;
709 }
710 }
711
712 err = inode_newsize_ok(inode, new_size);
713 if (err)
714 goto out;
715
716 if (new_size > i_size) {
717 /*
718 * Allocate clusters, do not change 'valid' size.
719 */
720 err = ntfs_set_size(inode, new_size);
721 if (err)
722 goto out;
723 }
724
725 if (is_supported_holes) {
726 CLST vcn = vbo >> cluster_bits;
727 CLST cend = bytes_to_cluster(sbi, end);
728 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
729 CLST lcn, clen;
730 bool new;
731
732 if (cend_v > cend)
733 cend_v = cend;
734
735 /*
736 * Allocate and zero new clusters.
737 * Zeroing these clusters may be too long.
738 */
739 for (; vcn < cend_v; vcn += clen) {
740 err = attr_data_get_block(ni, vcn, cend_v - vcn,
741 &lcn, &clen, &new,
742 true);
743 if (err)
744 goto out;
745 }
746 /*
747 * Allocate but not zero new clusters.
748 */
749 for (; vcn < cend; vcn += clen) {
750 err = attr_data_get_block(ni, vcn, cend - vcn,
751 &lcn, &clen, &new,
752 false);
753 if (err)
754 goto out;
755 }
756 }
757
758 if (mode & FALLOC_FL_KEEP_SIZE) {
759 ni_lock(ni);
760 /* True - Keep preallocated. */
761 err = attr_set_size(ni, ATTR_DATA, NULL, 0,
762 &ni->file.run, i_size, &ni->i_valid,
763 true, NULL);
764 ni_unlock(ni);
765 if (err)
766 goto out;
767 } else if (new_size > i_size) {
768 i_size_write(inode, new_size);
769 }
770 }
771
772 ok:
773 err = file_modified(file);
774 if (err)
775 goto out;
776
777 out:
778 if (map_locked)
779 filemap_invalidate_unlock(mapping);
780
781 if (!err) {
782 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
783 mark_inode_dirty(inode);
784 }
785
786 inode_unlock(inode);
787 return err;
788 }
789
790 /*
791 * ntfs_setattr - inode_operations::setattr
792 */
ntfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)793 int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
794 struct iattr *attr)
795 {
796 struct inode *inode = d_inode(dentry);
797 struct ntfs_inode *ni = ntfs_i(inode);
798 u32 ia_valid = attr->ia_valid;
799 umode_t mode = inode->i_mode;
800 int err;
801
802 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
803 return -EIO;
804
805 err = setattr_prepare(idmap, dentry, attr);
806 if (err)
807 goto out;
808
809 if (ia_valid & ATTR_SIZE) {
810 loff_t newsize, oldsize;
811
812 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
813 /* Should never be here, see ntfs_file_open(). */
814 err = -EOPNOTSUPP;
815 goto out;
816 }
817 inode_dio_wait(inode);
818 oldsize = i_size_read(inode);
819 newsize = attr->ia_size;
820
821 if (newsize <= oldsize)
822 err = ntfs_truncate(inode, newsize);
823 else
824 err = ntfs_extend(inode, newsize, 0, NULL);
825
826 if (err)
827 goto out;
828
829 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
830 i_size_write(inode, newsize);
831 }
832
833 setattr_copy(idmap, inode, attr);
834
835 if (mode != inode->i_mode) {
836 err = ntfs_acl_chmod(idmap, dentry);
837 if (err)
838 goto out;
839
840 /* Linux 'w' -> Windows 'ro'. */
841 if (0222 & inode->i_mode)
842 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
843 else
844 ni->std_fa |= FILE_ATTRIBUTE_READONLY;
845 }
846
847 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE))
848 ntfs_save_wsl_perm(inode, NULL);
849 mark_inode_dirty(inode);
850 out:
851 return err;
852 }
853
854 /*
855 * check_read_restriction:
856 * common code for ntfs_file_read_iter and ntfs_file_splice_read
857 */
check_read_restriction(struct inode * inode)858 static int check_read_restriction(struct inode *inode)
859 {
860 struct ntfs_inode *ni = ntfs_i(inode);
861
862 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
863 return -EIO;
864
865 if (is_encrypted(ni)) {
866 ntfs_inode_warn(inode, "encrypted i/o not supported");
867 return -EOPNOTSUPP;
868 }
869
870 #ifndef CONFIG_NTFS3_LZX_XPRESS
871 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
872 ntfs_inode_warn(
873 inode,
874 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
875 return -EOPNOTSUPP;
876 }
877 #endif
878
879 if (is_dedup(ni)) {
880 ntfs_inode_warn(inode, "read deduplicated not supported");
881 return -EOPNOTSUPP;
882 }
883
884 return 0;
885 }
886
887 /*
888 * ntfs_file_read_iter - file_operations::read_iter
889 */
ntfs_file_read_iter(struct kiocb * iocb,struct iov_iter * iter)890 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
891 {
892 struct file *file = iocb->ki_filp;
893 struct inode *inode = file_inode(file);
894 struct ntfs_inode *ni = ntfs_i(inode);
895 ssize_t err;
896
897 err = check_read_restriction(inode);
898 if (err)
899 return err;
900
901 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
902 ntfs_inode_warn(inode, "direct i/o + compressed not supported");
903 return -EOPNOTSUPP;
904 }
905
906 return generic_file_read_iter(iocb, iter);
907 }
908
909 /*
910 * ntfs_file_splice_read - file_operations::splice_read
911 */
ntfs_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)912 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
913 struct pipe_inode_info *pipe, size_t len,
914 unsigned int flags)
915 {
916 struct inode *inode = file_inode(in);
917 ssize_t err;
918
919 err = check_read_restriction(inode);
920 if (err)
921 return err;
922
923 return filemap_splice_read(in, ppos, pipe, len, flags);
924 }
925
926 /*
927 * ntfs_get_frame_pages
928 *
929 * Return: Array of locked pages.
930 */
ntfs_get_frame_pages(struct address_space * mapping,pgoff_t index,struct page ** pages,u32 pages_per_frame,bool * frame_uptodate)931 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
932 struct page **pages, u32 pages_per_frame,
933 bool *frame_uptodate)
934 {
935 gfp_t gfp_mask = mapping_gfp_mask(mapping);
936 u32 npages;
937
938 *frame_uptodate = true;
939
940 for (npages = 0; npages < pages_per_frame; npages++, index++) {
941 struct folio *folio;
942
943 folio = __filemap_get_folio(mapping, index,
944 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
945 gfp_mask);
946 if (IS_ERR(folio)) {
947 while (npages--) {
948 folio = page_folio(pages[npages]);
949 folio_unlock(folio);
950 folio_put(folio);
951 }
952
953 return -ENOMEM;
954 }
955
956 if (!folio_test_uptodate(folio))
957 *frame_uptodate = false;
958
959 pages[npages] = &folio->page;
960 }
961
962 return 0;
963 }
964
965 /*
966 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
967 */
ntfs_compress_write(struct kiocb * iocb,struct iov_iter * from)968 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
969 {
970 int err;
971 struct file *file = iocb->ki_filp;
972 size_t count = iov_iter_count(from);
973 loff_t pos = iocb->ki_pos;
974 struct inode *inode = file_inode(file);
975 loff_t i_size = i_size_read(inode);
976 struct address_space *mapping = inode->i_mapping;
977 struct ntfs_inode *ni = ntfs_i(inode);
978 u64 valid = ni->i_valid;
979 struct ntfs_sb_info *sbi = ni->mi.sbi;
980 struct page *page, **pages = NULL;
981 size_t written = 0;
982 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
983 u32 frame_size = 1u << frame_bits;
984 u32 pages_per_frame = frame_size >> PAGE_SHIFT;
985 u32 ip, off;
986 CLST frame;
987 u64 frame_vbo;
988 pgoff_t index;
989 bool frame_uptodate;
990
991 if (frame_size < PAGE_SIZE) {
992 /*
993 * frame_size == 8K if cluster 512
994 * frame_size == 64K if cluster 4096
995 */
996 ntfs_inode_warn(inode, "page size is bigger than frame size");
997 return -EOPNOTSUPP;
998 }
999
1000 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS);
1001 if (!pages)
1002 return -ENOMEM;
1003
1004 err = file_remove_privs(file);
1005 if (err)
1006 goto out;
1007
1008 err = file_update_time(file);
1009 if (err)
1010 goto out;
1011
1012 /* Zero range [valid : pos). */
1013 while (valid < pos) {
1014 CLST lcn, clen;
1015
1016 frame = valid >> frame_bits;
1017 frame_vbo = valid & ~(frame_size - 1);
1018 off = valid & (frame_size - 1);
1019
1020 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn,
1021 &clen, NULL, false);
1022 if (err)
1023 goto out;
1024
1025 if (lcn == SPARSE_LCN) {
1026 ni->i_valid = valid =
1027 frame_vbo + ((u64)clen << sbi->cluster_bits);
1028 continue;
1029 }
1030
1031 /* Load full frame. */
1032 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
1033 pages, pages_per_frame,
1034 &frame_uptodate);
1035 if (err)
1036 goto out;
1037
1038 if (!frame_uptodate && off) {
1039 err = ni_read_frame(ni, frame_vbo, pages,
1040 pages_per_frame);
1041 if (err) {
1042 for (ip = 0; ip < pages_per_frame; ip++) {
1043 page = pages[ip];
1044 unlock_page(page);
1045 put_page(page);
1046 }
1047 goto out;
1048 }
1049 }
1050
1051 ip = off >> PAGE_SHIFT;
1052 off = offset_in_page(valid);
1053 for (; ip < pages_per_frame; ip++, off = 0) {
1054 page = pages[ip];
1055 zero_user_segment(page, off, PAGE_SIZE);
1056 flush_dcache_page(page);
1057 SetPageUptodate(page);
1058 }
1059
1060 ni_lock(ni);
1061 err = ni_write_frame(ni, pages, pages_per_frame);
1062 ni_unlock(ni);
1063
1064 for (ip = 0; ip < pages_per_frame; ip++) {
1065 page = pages[ip];
1066 SetPageUptodate(page);
1067 unlock_page(page);
1068 put_page(page);
1069 }
1070
1071 if (err)
1072 goto out;
1073
1074 ni->i_valid = valid = frame_vbo + frame_size;
1075 }
1076
1077 /* Copy user data [pos : pos + count). */
1078 while (count) {
1079 size_t copied, bytes;
1080
1081 off = pos & (frame_size - 1);
1082 bytes = frame_size - off;
1083 if (bytes > count)
1084 bytes = count;
1085
1086 frame_vbo = pos & ~(frame_size - 1);
1087 index = frame_vbo >> PAGE_SHIFT;
1088
1089 if (unlikely(fault_in_iov_iter_readable(from, bytes))) {
1090 err = -EFAULT;
1091 goto out;
1092 }
1093
1094 /* Load full frame. */
1095 err = ntfs_get_frame_pages(mapping, index, pages,
1096 pages_per_frame, &frame_uptodate);
1097 if (err)
1098 goto out;
1099
1100 if (!frame_uptodate) {
1101 loff_t to = pos + bytes;
1102
1103 if (off || (to < i_size && (to & (frame_size - 1)))) {
1104 err = ni_read_frame(ni, frame_vbo, pages,
1105 pages_per_frame);
1106 if (err) {
1107 for (ip = 0; ip < pages_per_frame;
1108 ip++) {
1109 page = pages[ip];
1110 unlock_page(page);
1111 put_page(page);
1112 }
1113 goto out;
1114 }
1115 }
1116 }
1117
1118 WARN_ON(!bytes);
1119 copied = 0;
1120 ip = off >> PAGE_SHIFT;
1121 off = offset_in_page(pos);
1122
1123 /* Copy user data to pages. */
1124 for (;;) {
1125 size_t cp, tail = PAGE_SIZE - off;
1126
1127 page = pages[ip];
1128 cp = copy_page_from_iter_atomic(page, off,
1129 min(tail, bytes), from);
1130 flush_dcache_page(page);
1131
1132 copied += cp;
1133 bytes -= cp;
1134 if (!bytes || !cp)
1135 break;
1136
1137 if (cp < tail) {
1138 off += cp;
1139 } else {
1140 ip++;
1141 off = 0;
1142 }
1143 }
1144
1145 ni_lock(ni);
1146 err = ni_write_frame(ni, pages, pages_per_frame);
1147 ni_unlock(ni);
1148
1149 for (ip = 0; ip < pages_per_frame; ip++) {
1150 page = pages[ip];
1151 ClearPageDirty(page);
1152 SetPageUptodate(page);
1153 unlock_page(page);
1154 put_page(page);
1155 }
1156
1157 if (err)
1158 goto out;
1159
1160 /*
1161 * We can loop for a long time in here. Be nice and allow
1162 * us to schedule out to avoid softlocking if preempt
1163 * is disabled.
1164 */
1165 cond_resched();
1166
1167 pos += copied;
1168 written += copied;
1169
1170 count = iov_iter_count(from);
1171 }
1172
1173 out:
1174 kfree(pages);
1175
1176 if (err < 0)
1177 return err;
1178
1179 iocb->ki_pos += written;
1180 if (iocb->ki_pos > ni->i_valid)
1181 ni->i_valid = iocb->ki_pos;
1182 if (iocb->ki_pos > i_size)
1183 i_size_write(inode, iocb->ki_pos);
1184
1185 return written;
1186 }
1187
1188 /*
1189 * check_write_restriction:
1190 * common code for ntfs_file_write_iter and ntfs_file_splice_write
1191 */
check_write_restriction(struct inode * inode)1192 static int check_write_restriction(struct inode *inode)
1193 {
1194 struct ntfs_inode *ni = ntfs_i(inode);
1195
1196 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1197 return -EIO;
1198
1199 if (is_encrypted(ni)) {
1200 ntfs_inode_warn(inode, "encrypted i/o not supported");
1201 return -EOPNOTSUPP;
1202 }
1203
1204 if (is_dedup(ni)) {
1205 ntfs_inode_warn(inode, "write into deduplicated not supported");
1206 return -EOPNOTSUPP;
1207 }
1208
1209 return 0;
1210 }
1211
1212 /*
1213 * ntfs_file_write_iter - file_operations::write_iter
1214 */
ntfs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1215 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1216 {
1217 struct file *file = iocb->ki_filp;
1218 struct inode *inode = file_inode(file);
1219 struct ntfs_inode *ni = ntfs_i(inode);
1220 ssize_t ret;
1221 int err;
1222
1223 err = check_write_restriction(inode);
1224 if (err)
1225 return err;
1226
1227 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
1228 ntfs_inode_warn(inode, "direct i/o + compressed not supported");
1229 return -EOPNOTSUPP;
1230 }
1231
1232 if (!inode_trylock(inode)) {
1233 if (iocb->ki_flags & IOCB_NOWAIT)
1234 return -EAGAIN;
1235 inode_lock(inode);
1236 }
1237
1238 ret = generic_write_checks(iocb, from);
1239 if (ret <= 0)
1240 goto out;
1241
1242 err = file_modified(iocb->ki_filp);
1243 if (err) {
1244 ret = err;
1245 goto out;
1246 }
1247
1248 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
1249 /* Should never be here, see ntfs_file_open(). */
1250 ret = -EOPNOTSUPP;
1251 goto out;
1252 }
1253
1254 ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
1255 if (ret)
1256 goto out;
1257
1258 ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) :
1259 __generic_file_write_iter(iocb, from);
1260
1261 out:
1262 inode_unlock(inode);
1263
1264 if (ret > 0)
1265 ret = generic_write_sync(iocb, ret);
1266
1267 return ret;
1268 }
1269
1270 /*
1271 * ntfs_file_open - file_operations::open
1272 */
ntfs_file_open(struct inode * inode,struct file * file)1273 int ntfs_file_open(struct inode *inode, struct file *file)
1274 {
1275 struct ntfs_inode *ni = ntfs_i(inode);
1276
1277 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1278 return -EIO;
1279
1280 if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
1281 (file->f_flags & O_DIRECT))) {
1282 return -EOPNOTSUPP;
1283 }
1284
1285 /* Decompress "external compressed" file if opened for rw. */
1286 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
1287 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
1288 #ifdef CONFIG_NTFS3_LZX_XPRESS
1289 int err = ni_decompress_file(ni);
1290
1291 if (err)
1292 return err;
1293 #else
1294 ntfs_inode_warn(
1295 inode,
1296 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
1297 return -EOPNOTSUPP;
1298 #endif
1299 }
1300
1301 return generic_file_open(inode, file);
1302 }
1303
1304 /*
1305 * ntfs_file_release - file_operations::release
1306 */
ntfs_file_release(struct inode * inode,struct file * file)1307 static int ntfs_file_release(struct inode *inode, struct file *file)
1308 {
1309 struct ntfs_inode *ni = ntfs_i(inode);
1310 struct ntfs_sb_info *sbi = ni->mi.sbi;
1311 int err = 0;
1312
1313 /* If we are last writer on the inode, drop the block reservation. */
1314 if (sbi->options->prealloc &&
1315 ((file->f_mode & FMODE_WRITE) &&
1316 atomic_read(&inode->i_writecount) == 1)
1317 /*
1318 * The only file when inode->i_fop = &ntfs_file_operations and
1319 * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT.
1320 *
1321 * Add additional check here.
1322 */
1323 && inode->i_ino != MFT_REC_MFT) {
1324 ni_lock(ni);
1325 down_write(&ni->file.run_lock);
1326
1327 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
1328 i_size_read(inode), &ni->i_valid, false,
1329 NULL);
1330
1331 up_write(&ni->file.run_lock);
1332 ni_unlock(ni);
1333 }
1334 return err;
1335 }
1336
1337 /*
1338 * ntfs_fiemap - inode_operations::fiemap
1339 */
ntfs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len)1340 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1341 __u64 start, __u64 len)
1342 {
1343 int err;
1344 struct ntfs_inode *ni = ntfs_i(inode);
1345
1346 err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR);
1347 if (err)
1348 return err;
1349
1350 ni_lock(ni);
1351
1352 err = ni_fiemap(ni, fieinfo, start, len);
1353
1354 ni_unlock(ni);
1355
1356 return err;
1357 }
1358
1359 /*
1360 * ntfs_file_splice_write - file_operations::splice_write
1361 */
ntfs_file_splice_write(struct pipe_inode_info * pipe,struct file * file,loff_t * ppos,size_t len,unsigned int flags)1362 static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe,
1363 struct file *file, loff_t *ppos,
1364 size_t len, unsigned int flags)
1365 {
1366 ssize_t err;
1367 struct inode *inode = file_inode(file);
1368
1369 err = check_write_restriction(inode);
1370 if (err)
1371 return err;
1372
1373 return iter_file_splice_write(pipe, file, ppos, len, flags);
1374 }
1375
1376 // clang-format off
1377 const struct inode_operations ntfs_file_inode_operations = {
1378 .getattr = ntfs_getattr,
1379 .setattr = ntfs_setattr,
1380 .listxattr = ntfs_listxattr,
1381 .get_acl = ntfs_get_acl,
1382 .set_acl = ntfs_set_acl,
1383 .fiemap = ntfs_fiemap,
1384 .fileattr_get = ntfs_fileattr_get,
1385 .fileattr_set = ntfs_fileattr_set,
1386 };
1387
1388 const struct file_operations ntfs_file_operations = {
1389 .llseek = generic_file_llseek,
1390 .read_iter = ntfs_file_read_iter,
1391 .write_iter = ntfs_file_write_iter,
1392 .unlocked_ioctl = ntfs_ioctl,
1393 #ifdef CONFIG_COMPAT
1394 .compat_ioctl = ntfs_compat_ioctl,
1395 #endif
1396 .splice_read = ntfs_file_splice_read,
1397 .splice_write = ntfs_file_splice_write,
1398 .mmap = ntfs_file_mmap,
1399 .open = ntfs_file_open,
1400 .fsync = generic_file_fsync,
1401 .fallocate = ntfs_fallocate,
1402 .release = ntfs_file_release,
1403 };
1404
1405 #if IS_ENABLED(CONFIG_NTFS_FS)
1406 const struct file_operations ntfs_legacy_file_operations = {
1407 .llseek = generic_file_llseek,
1408 .read_iter = ntfs_file_read_iter,
1409 .splice_read = ntfs_file_splice_read,
1410 .open = ntfs_file_open,
1411 .release = ntfs_file_release,
1412 };
1413 #endif
1414 // clang-format on
1415