Lines Matching full:fd
7 * Manage the dynamic fd arrays in the process files_struct.
36 kvfree(fdt->fd); in __free_fdtable()
51 * Copy 'count' fd bits from the old table to the new table and clear the extra
80 memcpy(nfdt->fd, ofdt->fd, cpy); in copy_fdtable()
81 memset((char *)nfdt->fd + cpy, 0, set); in copy_fdtable()
135 fdt->fd = data; in alloc_fdtable()
151 kvfree(fdt->fd); in alloc_fdtable()
160 * This function will allocate a new fdtable and both fd array and fdset, of
245 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt) in __set_close_on_exec() argument
247 __set_bit(fd, fdt->close_on_exec); in __set_close_on_exec()
250 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt) in __clear_close_on_exec() argument
252 if (test_bit(fd, fdt->close_on_exec)) in __clear_close_on_exec()
253 __clear_bit(fd, fdt->close_on_exec); in __clear_close_on_exec()
256 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt) in __set_open_fd() argument
258 __set_bit(fd, fdt->open_fds); in __set_open_fd()
259 fd /= BITS_PER_LONG; in __set_open_fd()
260 if (!~fdt->open_fds[fd]) in __set_open_fd()
261 __set_bit(fd, fdt->full_fds_bits); in __set_open_fd()
264 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) in __clear_open_fd() argument
266 __clear_bit(fd, fdt->open_fds); in __clear_open_fd()
267 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits); in __clear_open_fd()
270 static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt) in fd_is_open() argument
272 return test_bit(fd, fdt->open_fds); in fd_is_open()
326 new_fdt->fd = &newf->fd_array[0]; in dup_fd()
333 * Check whether we need to allocate a larger fd array and fd set. in dup_fd()
355 * Reacquire the oldf lock and a pointer to its fd table in dup_fd()
356 * who knows it may have a new bigger fd table. We need in dup_fd()
366 old_fds = old_fdt->fd; in dup_fd()
367 new_fds = new_fdt->fd; in dup_fd()
375 * The fd may be claimed in the fd bitmap but not yet in dup_fd()
378 * fd is available to the new process. in dup_fd()
401 * It is safe to dereference the fd table without RCU or in close_files()
416 struct file * file = xchg(&fdt->fd[i], NULL); in close_files()
459 .fd = &init_files.fd_array[0],
488 unsigned int fd; in alloc_fd() local
495 fd = start; in alloc_fd()
496 if (fd < files->next_fd) in alloc_fd()
497 fd = files->next_fd; in alloc_fd()
499 if (fd < fdt->max_fds) in alloc_fd()
500 fd = find_next_fd(fdt, fd); in alloc_fd()
507 if (fd >= end) in alloc_fd()
510 error = expand_files(files, fd); in alloc_fd()
522 files->next_fd = fd + 1; in alloc_fd()
524 __set_open_fd(fd, fdt); in alloc_fd()
526 __set_close_on_exec(fd, fdt); in alloc_fd()
528 __clear_close_on_exec(fd, fdt); in alloc_fd()
529 error = fd; in alloc_fd()
532 if (rcu_access_pointer(fdt->fd[fd]) != NULL) { in alloc_fd()
533 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); in alloc_fd()
534 rcu_assign_pointer(fdt->fd[fd], NULL); in alloc_fd()
554 static void __put_unused_fd(struct files_struct *files, unsigned int fd) in __put_unused_fd() argument
557 __clear_open_fd(fd, fdt); in __put_unused_fd()
558 if (fd < files->next_fd) in __put_unused_fd()
559 files->next_fd = fd; in __put_unused_fd()
562 void put_unused_fd(unsigned int fd) in put_unused_fd() argument
566 __put_unused_fd(files, fd); in put_unused_fd()
573 * Install a file pointer in the fd array.
588 void fd_install(unsigned int fd, struct file *file) in fd_install() argument
602 BUG_ON(fdt->fd[fd] != NULL); in fd_install()
603 rcu_assign_pointer(fdt->fd[fd], file); in fd_install()
610 BUG_ON(fdt->fd[fd] != NULL); in fd_install()
611 rcu_assign_pointer(fdt->fd[fd], file); in fd_install()
618 * file_close_fd_locked - return file associated with fd
620 * @fd: file descriptor to retrieve file for
626 * Returns: The file associated with @fd (NULL if @fd is not open)
628 struct file *file_close_fd_locked(struct files_struct *files, unsigned fd) in file_close_fd_locked() argument
635 if (fd >= fdt->max_fds) in file_close_fd_locked()
638 fd = array_index_nospec(fd, fdt->max_fds); in file_close_fd_locked()
639 file = fdt->fd[fd]; in file_close_fd_locked()
641 rcu_assign_pointer(fdt->fd[fd], NULL); in file_close_fd_locked()
642 __put_unused_fd(files, fd); in file_close_fd_locked()
647 int close_fd(unsigned fd) in close_fd() argument
653 file = file_close_fd_locked(files, fd); in close_fd()
663 * last_fd - return last valid index into fd table
676 unsigned int fd, unsigned int max_fd) in __range_cloexec() argument
684 if (fd <= max_fd) in __range_cloexec()
685 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); in __range_cloexec()
689 static inline void __range_close(struct files_struct *files, unsigned int fd, in __range_close() argument
699 for (; fd <= max_fd; fd++) { in __range_close()
700 file = file_close_fd_locked(files, fd); in __range_close()
718 * @fd: starting file descriptor to close
723 * from @fd up to and including @max_fd are closed.
725 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags) in __close_range() argument
733 if (fd > max_fd) in __close_range()
737 struct fd_range range = {fd, max_fd}, *punch_hole = ⦥ in __close_range()
758 __range_cloexec(cur_fds, fd, max_fd); in __close_range()
760 __range_close(cur_fds, fd, max_fd); in __close_range()
777 * file_close_fd - return file associated with fd
778 * @fd: file descriptor to retrieve file for
782 * Returns: The file associated with @fd (NULL if @fd is not open)
784 struct file *file_close_fd(unsigned int fd) in file_close_fd() argument
790 file = file_close_fd_locked(files, fd); in file_close_fd()
805 unsigned fd = i * BITS_PER_LONG; in do_close_on_exec() local
807 if (fd >= fdt->max_fds) in do_close_on_exec()
813 for ( ; set ; fd++, set >>= 1) { in do_close_on_exec()
817 file = fdt->fd[fd]; in do_close_on_exec()
820 rcu_assign_pointer(fdt->fd[fd], NULL); in do_close_on_exec()
821 __put_unused_fd(files, fd); in do_close_on_exec()
924 unsigned int fd, fmode_t mask) in __fget_files_rcu() argument
932 /* Mask is a 0 for invalid fd's, ~0 for valid ones */ in __fget_files_rcu()
933 nospec_mask = array_index_mask_nospec(fd, fdt->max_fds); in __fget_files_rcu()
936 * fdentry points to the 'fd' offset, or fdt->fd[0]. in __fget_files_rcu()
937 * Loading from fdt->fd[0] is always safe, because the in __fget_files_rcu()
940 fdentry = fdt->fd + (fd & nospec_mask); in __fget_files_rcu()
970 * Note that we don't need to re-check the 'fdt->fd' in __fget_files_rcu()
999 static struct file *__fget_files(struct files_struct *files, unsigned int fd, in __fget_files() argument
1005 file = __fget_files_rcu(files, fd, mask); in __fget_files()
1011 static inline struct file *__fget(unsigned int fd, fmode_t mask) in __fget() argument
1013 return __fget_files(current->files, fd, mask); in __fget()
1016 struct file *fget(unsigned int fd) in fget() argument
1018 return __fget(fd, FMODE_PATH); in fget()
1022 struct file *fget_raw(unsigned int fd) in fget_raw() argument
1024 return __fget(fd, 0); in fget_raw()
1028 struct file *fget_task(struct task_struct *task, unsigned int fd) in fget_task() argument
1034 file = __fget_files(task->files, fd, 0); in fget_task()
1040 struct file *lookup_fdget_rcu(unsigned int fd) in lookup_fdget_rcu() argument
1042 return __fget_files_rcu(current->files, fd, 0); in lookup_fdget_rcu()
1047 struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd) in task_lookup_fdget_rcu() argument
1056 file = __fget_files_rcu(files, fd, 0); in task_lookup_fdget_rcu()
1066 unsigned int fd = *ret_fd; in task_lookup_next_fdget_rcu() local
1072 for (; fd < files_fdtable(files)->max_fds; fd++) { in task_lookup_next_fdget_rcu()
1073 file = __fget_files_rcu(files, fd, 0); in task_lookup_next_fdget_rcu()
1079 *ret_fd = fd; in task_lookup_next_fdget_rcu()
1085 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1100 static inline struct fd __fget_light(unsigned int fd, fmode_t mask) in __fget_light() argument
1115 file = files_lookup_fd_raw(files, fd); in __fget_light()
1120 file = __fget_files(files, fd, mask); in __fget_light()
1126 struct fd fdget(unsigned int fd) in fdget() argument
1128 return __fget_light(fd, FMODE_PATH); in fdget()
1132 struct fd fdget_raw(unsigned int fd) in fdget_raw() argument
1134 return __fget_light(fd, 0); in fdget_raw()
1153 struct fd fdget_pos(unsigned int fd) in fdget_pos() argument
1155 struct fd f = fdget(fd); in fdget_pos()
1176 void set_close_on_exec(unsigned int fd, int flag) in set_close_on_exec() argument
1183 __set_close_on_exec(fd, fdt); in set_close_on_exec()
1185 __clear_close_on_exec(fd, fdt); in set_close_on_exec()
1189 bool get_close_on_exec(unsigned int fd) in get_close_on_exec() argument
1193 res = close_on_exec(fd, current->files); in get_close_on_exec()
1199 struct file *file, unsigned fd, unsigned flags) in do_dup2() argument
1220 fd = array_index_nospec(fd, fdt->max_fds); in do_dup2()
1221 tofree = fdt->fd[fd]; in do_dup2()
1222 if (!tofree && fd_is_open(fd, fdt)) in do_dup2()
1225 rcu_assign_pointer(fdt->fd[fd], file); in do_dup2()
1226 __set_open_fd(fd, fdt); in do_dup2()
1228 __set_close_on_exec(fd, fdt); in do_dup2()
1230 __clear_close_on_exec(fd, fdt); in do_dup2()
1236 return fd; in do_dup2()
1243 int replace_fd(unsigned fd, struct file *file, unsigned flags) in replace_fd() argument
1249 return close_fd(fd); in replace_fd()
1251 if (fd >= rlimit(RLIMIT_NOFILE)) in replace_fd()
1255 err = expand_files(files, fd); in replace_fd()
1258 return do_dup2(files, file, fd, flags); in replace_fd()
1268 * @ufd: __user pointer to write new fd number to
1269 * @o_flags: the O_* flags to apply to the new fd entry
1272 * checks and count updates. Optionally writes the fd number to userspace, if
1278 * Returns newly install fd or -ve on error.
1419 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); in iterate_fd()