Lines Matching +full:no +full:- +full:unaligned +full:- +full:direct +full:- +full:access

1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/fs/nfs/direct.c
7 * High-performance uncached I/O for the Linux NFS client
10 * depends on uncached access to file data. Database clusters
15 * from a local cache. A streaming video server, for instance, has no
21 * correct unaligned requests from applications. All requested bytes are
22 * held on permanent storage before a direct write system call returns to
27 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
33 * 18 Dec 2001 Initial implementation for 2.4 --cel
34 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
35 * 08 Jun 2003 Port to 2.5 APIs --cel
36 * 31 Mar 2004 Handle direct I/O without VFS support --cel
37 * 15 Sep 2004 Parallel async reads --cel
38 * 04 May 2005 support O_DIRECT with aio --cel
76 atomic_inc(&dreq->io_count); in get_dreq()
81 return atomic_dec_and_test(&dreq->io_count); in put_dreq()
89 if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) || in nfs_direct_handle_truncated()
90 test_bit(NFS_IOHDR_EOF, &hdr->flags))) in nfs_direct_handle_truncated()
92 if (dreq->max_count >= dreq_len) { in nfs_direct_handle_truncated()
93 dreq->max_count = dreq_len; in nfs_direct_handle_truncated()
94 if (dreq->count > dreq_len) in nfs_direct_handle_truncated()
95 dreq->count = dreq_len; in nfs_direct_handle_truncated()
98 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error) in nfs_direct_handle_truncated()
99 dreq->error = hdr->error; in nfs_direct_handle_truncated()
106 loff_t hdr_end = hdr->io_start + hdr->good_bytes; in nfs_direct_count_bytes()
109 if (hdr_end > dreq->io_start) in nfs_direct_count_bytes()
110 dreq_len = hdr_end - dreq->io_start; in nfs_direct_count_bytes()
114 if (dreq_len > dreq->max_count) in nfs_direct_count_bytes()
115 dreq_len = dreq->max_count; in nfs_direct_count_bytes()
117 if (dreq->count < dreq_len) in nfs_direct_count_bytes()
118 dreq->count = dreq_len; in nfs_direct_count_bytes()
125 size_t req_start = (size_t)(offs - dreq->io_start); in nfs_direct_truncate_request()
127 if (req_start < dreq->max_count) in nfs_direct_truncate_request()
128 dreq->max_count = req_start; in nfs_direct_truncate_request()
129 if (req_start < dreq->count) in nfs_direct_truncate_request()
130 dreq->count = req_start; in nfs_direct_truncate_request()
134 * nfs_swap_rw - NFS address space operation for swap I/O
138 * Perform IO to the swap-file. This is much like direct IO.
163 cinfo->inode = dreq->inode; in nfs_init_cinfo_from_dreq()
164 cinfo->mds = &dreq->mds_cinfo; in nfs_init_cinfo_from_dreq()
165 cinfo->ds = &dreq->ds_cinfo; in nfs_init_cinfo_from_dreq()
166 cinfo->dreq = dreq; in nfs_init_cinfo_from_dreq()
167 cinfo->completion_ops = &nfs_direct_commit_completion_ops; in nfs_init_cinfo_from_dreq()
178 kref_init(&dreq->kref); in nfs_direct_req_alloc()
179 kref_get(&dreq->kref); in nfs_direct_req_alloc()
180 init_completion(&dreq->completion); in nfs_direct_req_alloc()
181 INIT_LIST_HEAD(&dreq->mds_cinfo.list); in nfs_direct_req_alloc()
182 pnfs_init_ds_commit_info(&dreq->ds_cinfo); in nfs_direct_req_alloc()
183 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work); in nfs_direct_req_alloc()
184 spin_lock_init(&dreq->lock); in nfs_direct_req_alloc()
193 pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode); in nfs_direct_req_free()
194 if (dreq->l_ctx != NULL) in nfs_direct_req_free()
195 nfs_put_lock_context(dreq->l_ctx); in nfs_direct_req_free()
196 if (dreq->ctx != NULL) in nfs_direct_req_free()
197 put_nfs_open_context(dreq->ctx); in nfs_direct_req_free()
203 kref_put(&dreq->kref, nfs_direct_req_free); in nfs_direct_req_release()
208 loff_t start = offset - dreq->io_start; in nfs_dreq_bytes_left()
209 return dreq->max_count - start; in nfs_dreq_bytes_left()
214 * Collects and returns the final error value/byte-count.
218 ssize_t result = -EIOCBQUEUED; in nfs_direct_wait()
221 if (dreq->iocb) in nfs_direct_wait()
224 result = wait_for_completion_killable(&dreq->completion); in nfs_direct_wait()
227 result = dreq->count; in nfs_direct_wait()
228 WARN_ON_ONCE(dreq->count < 0); in nfs_direct_wait()
231 result = dreq->error; in nfs_direct_wait()
238 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
243 struct inode *inode = dreq->inode; in nfs_direct_complete()
247 if (dreq->iocb) { in nfs_direct_complete()
248 long res = (long) dreq->error; in nfs_direct_complete()
249 if (dreq->count != 0) { in nfs_direct_complete()
250 res = (long) dreq->count; in nfs_direct_complete()
251 WARN_ON_ONCE(dreq->count < 0); in nfs_direct_complete()
253 dreq->iocb->ki_complete(dreq->iocb, res); in nfs_direct_complete()
256 complete(&dreq->completion); in nfs_direct_complete()
264 struct nfs_direct_req *dreq = hdr->dreq; in nfs_direct_read_completion()
266 spin_lock(&dreq->lock); in nfs_direct_read_completion()
267 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) { in nfs_direct_read_completion()
268 spin_unlock(&dreq->lock); in nfs_direct_read_completion()
273 spin_unlock(&dreq->lock); in nfs_direct_read_completion()
275 while (!list_empty(&hdr->pages)) { in nfs_direct_read_completion()
276 struct nfs_page *req = nfs_list_entry(hdr->pages.next); in nfs_direct_read_completion()
277 struct page *page = req->wb_page; in nfs_direct_read_completion()
279 if (!PageCompound(page) && bytes < hdr->good_bytes && in nfs_direct_read_completion()
280 (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY)) in nfs_direct_read_completion()
282 bytes += req->wb_bytes; in nfs_direct_read_completion()
289 hdr->release(hdr); in nfs_direct_read_completion()
297 req = nfs_list_entry(head->next); in nfs_read_sync_pgio_error()
305 get_dreq(hdr->dreq); in nfs_direct_pgio_init()
319 * no requests have been sent, just return an error.
327 struct inode *inode = dreq->inode; in nfs_direct_read_schedule_iovec()
328 ssize_t result = -EINVAL; in nfs_direct_read_schedule_iovec()
330 size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE); in nfs_direct_read_schedule_iovec()
332 nfs_pageio_init_read(&desc, dreq->inode, false, in nfs_direct_read_schedule_iovec()
350 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; in nfs_direct_read_schedule_iovec()
353 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); in nfs_direct_read_schedule_iovec()
355 req = nfs_page_create_from_page(dreq->ctx, pagevec[i], in nfs_direct_read_schedule_iovec()
367 bytes -= req_len; in nfs_direct_read_schedule_iovec()
380 * If no bytes were started, return the error, and let the in nfs_direct_read_schedule_iovec()
386 return result < 0 ? result : -EIO; in nfs_direct_read_schedule_iovec()
395 * nfs_file_direct_read - file direct read operation for NFS files
400 * We use this function for direct reads instead of calling
403 * to work, we must generate a GETATTR before each direct read, and
417 struct file *file = iocb->ki_filp; in nfs_file_direct_read()
418 struct address_space *mapping = file->f_mapping; in nfs_file_direct_read()
419 struct inode *inode = mapping->host; in nfs_file_direct_read()
424 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); in nfs_file_direct_read()
426 dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n", in nfs_file_direct_read()
427 file, count, (long long) iocb->ki_pos); in nfs_file_direct_read()
435 result = -ENOMEM; in nfs_file_direct_read()
440 dreq->inode = inode; in nfs_file_direct_read()
441 dreq->max_count = count; in nfs_file_direct_read()
442 dreq->io_start = iocb->ki_pos; in nfs_file_direct_read()
443 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); in nfs_file_direct_read()
444 l_ctx = nfs_get_lock_context(dreq->ctx); in nfs_file_direct_read()
450 dreq->l_ctx = l_ctx; in nfs_file_direct_read()
452 dreq->iocb = iocb; in nfs_file_direct_read()
455 dreq->flags = NFS_ODIRECT_SHOULD_DIRTY; in nfs_file_direct_read()
460 NFS_I(inode)->read_io += count; in nfs_file_direct_read()
461 requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos); in nfs_file_direct_read()
469 requested -= result; in nfs_file_direct_read()
470 iocb->ki_pos += result; in nfs_file_direct_read()
486 struct nfs_page *head = req->wb_head; in nfs_direct_add_page_head()
488 if (!list_empty(&head->wb_list) || !nfs_lock_request(head)) in nfs_direct_add_page_head()
490 if (!list_empty(&head->wb_list)) { in nfs_direct_add_page_head()
494 list_add(&head->wb_list, list); in nfs_direct_add_page_head()
495 kref_get(&head->wb_kref); in nfs_direct_add_page_head()
496 kref_get(&head->wb_kref); in nfs_direct_add_page_head()
506 if (req->wb_head != req) { in nfs_direct_join_group()
507 nfs_direct_add_page_head(&req->wb_list, req); in nfs_direct_join_group()
510 subreq = req->wb_this_page; in nfs_direct_join_group()
518 if (!list_empty(&subreq->wb_list)) { in nfs_direct_join_group()
522 } while ((subreq = subreq->wb_this_page) != req); in nfs_direct_join_group()
532 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); in nfs_direct_write_scan_commit_list()
534 nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0); in nfs_direct_write_scan_commit_list()
535 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); in nfs_direct_write_scan_commit_list()
546 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); in nfs_direct_write_reschedule()
548 nfs_direct_join_group(&reqs, &cinfo, dreq->inode); in nfs_direct_write_reschedule()
550 nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo); in nfs_direct_write_reschedule()
553 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false, in nfs_direct_write_reschedule()
560 req->wb_nio++; in nfs_direct_write_reschedule()
562 spin_lock(&dreq->lock); in nfs_direct_write_reschedule()
563 if (dreq->error < 0) { in nfs_direct_write_reschedule()
564 desc.pg_error = dreq->error; in nfs_direct_write_reschedule()
565 } else if (desc.pg_error != -EAGAIN) { in nfs_direct_write_reschedule()
566 dreq->flags = 0; in nfs_direct_write_reschedule()
568 desc.pg_error = -EIO; in nfs_direct_write_reschedule()
569 dreq->error = desc.pg_error; in nfs_direct_write_reschedule()
571 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; in nfs_direct_write_reschedule()
572 spin_unlock(&dreq->lock); in nfs_direct_write_reschedule()
583 if (desc.pg_error == -EAGAIN) { in nfs_direct_write_reschedule()
586 spin_lock(&dreq->lock); in nfs_direct_write_reschedule()
588 spin_unlock(&dreq->lock); in nfs_direct_write_reschedule()
599 const struct nfs_writeverf *verf = data->res.verf; in nfs_direct_commit_complete()
600 struct nfs_direct_req *dreq = data->dreq; in nfs_direct_commit_complete()
603 int status = data->task.tk_status; in nfs_direct_commit_complete()
607 spin_lock(&dreq->lock); in nfs_direct_commit_complete()
610 dreq->error = status; in nfs_direct_commit_complete()
611 dreq->flags = NFS_ODIRECT_DONE; in nfs_direct_commit_complete()
613 status = dreq->error; in nfs_direct_commit_complete()
615 spin_unlock(&dreq->lock); in nfs_direct_commit_complete()
619 while (!list_empty(&data->pages)) { in nfs_direct_commit_complete()
620 req = nfs_list_entry(data->pages.next); in nfs_direct_commit_complete()
623 spin_lock(&dreq->lock); in nfs_direct_commit_complete()
625 spin_unlock(&dreq->lock); in nfs_direct_commit_complete()
628 spin_lock(&dreq->lock); in nfs_direct_commit_complete()
629 if (dreq->flags == 0) in nfs_direct_commit_complete()
630 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; in nfs_direct_commit_complete()
631 spin_unlock(&dreq->lock); in nfs_direct_commit_complete()
636 req->wb_nio = 0; in nfs_direct_commit_complete()
650 struct nfs_direct_req *dreq = cinfo->dreq; in nfs_direct_resched_write()
654 spin_lock(&dreq->lock); in nfs_direct_resched_write()
655 if (dreq->flags != NFS_ODIRECT_DONE) in nfs_direct_resched_write()
656 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; in nfs_direct_resched_write()
657 spin_unlock(&dreq->lock); in nfs_direct_resched_write()
674 nfs_scan_commit(dreq->inode, &mds_list, &cinfo); in nfs_direct_commit_schedule()
675 res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo); in nfs_direct_commit_schedule()
676 if (res < 0) { /* res == -ENOMEM */ in nfs_direct_commit_schedule()
677 spin_lock(&dreq->lock); in nfs_direct_commit_schedule()
678 if (dreq->flags == 0) in nfs_direct_commit_schedule()
679 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; in nfs_direct_commit_schedule()
680 spin_unlock(&dreq->lock); in nfs_direct_commit_schedule()
693 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); in nfs_direct_write_clear_reqs()
707 int flags = dreq->flags; in nfs_direct_write_schedule_work()
709 dreq->flags = 0; in nfs_direct_write_schedule_work()
719 nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping); in nfs_direct_write_schedule_work()
727 queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */ in nfs_direct_write_complete()
732 struct nfs_direct_req *dreq = hdr->dreq; in nfs_direct_write_completion()
734 struct nfs_page *req = nfs_list_entry(hdr->pages.next); in nfs_direct_write_completion()
741 spin_lock(&dreq->lock); in nfs_direct_write_completion()
742 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) { in nfs_direct_write_completion()
743 spin_unlock(&dreq->lock); in nfs_direct_write_completion()
748 if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) && in nfs_direct_write_completion()
749 !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { in nfs_direct_write_completion()
750 if (!dreq->flags) in nfs_direct_write_completion()
751 dreq->flags = NFS_ODIRECT_DO_COMMIT; in nfs_direct_write_completion()
752 flags = dreq->flags; in nfs_direct_write_completion()
754 spin_unlock(&dreq->lock); in nfs_direct_write_completion()
756 while (!list_empty(&hdr->pages)) { in nfs_direct_write_completion()
758 req = nfs_list_entry(hdr->pages.next); in nfs_direct_write_completion()
761 kref_get(&req->wb_kref); in nfs_direct_write_completion()
762 memcpy(&req->wb_verf, &hdr->verf.verifier, in nfs_direct_write_completion()
763 sizeof(req->wb_verf)); in nfs_direct_write_completion()
764 nfs_mark_request_commit(req, hdr->lseg, &cinfo, in nfs_direct_write_completion()
765 hdr->ds_commit_idx); in nfs_direct_write_completion()
767 kref_get(&req->wb_kref); in nfs_direct_write_completion()
776 hdr->release(hdr); in nfs_direct_write_completion()
784 req = nfs_list_entry(head->next); in nfs_write_sync_pgio_error()
792 struct nfs_direct_req *dreq = hdr->dreq; in nfs_direct_write_reschedule_io()
799 spin_lock(&dreq->lock); in nfs_direct_write_reschedule_io()
800 if (dreq->error == 0) in nfs_direct_write_reschedule_io()
801 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; in nfs_direct_write_reschedule_io()
802 set_bit(NFS_IOHDR_REDO, &hdr->flags); in nfs_direct_write_reschedule_io()
803 spin_unlock(&dreq->lock); in nfs_direct_write_reschedule_io()
804 while (!list_empty(&hdr->pages)) { in nfs_direct_write_reschedule_io()
805 req = nfs_list_entry(hdr->pages.next); in nfs_direct_write_reschedule_io()
829 * no requests have been sent, just return an error.
836 struct inode *inode = dreq->inode; in nfs_direct_write_schedule_iovec()
840 size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE); in nfs_direct_write_schedule_iovec()
851 NFS_I(inode)->write_io += iov_iter_count(iter); in nfs_direct_write_schedule_iovec()
864 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; in nfs_direct_write_schedule_iovec()
867 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); in nfs_direct_write_schedule_iovec()
869 req = nfs_page_create_from_page(dreq->ctx, pagevec[i], in nfs_direct_write_schedule_iovec()
883 bytes -= req_len; in nfs_direct_write_schedule_iovec()
897 if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) { in nfs_direct_write_schedule_iovec()
905 spin_lock(&dreq->lock); in nfs_direct_write_schedule_iovec()
906 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; in nfs_direct_write_schedule_iovec()
907 spin_unlock(&dreq->lock); in nfs_direct_write_schedule_iovec()
921 * If no bytes were started, return the error, and let the in nfs_direct_write_schedule_iovec()
927 return result < 0 ? result : -EIO; in nfs_direct_write_schedule_iovec()
936 * nfs_file_direct_write - file direct write operation for NFS files
941 * We use this function for direct writes instead of calling
948 * We eliminate local atime updates, see direct read above.
953 * Note that O_APPEND is not supported for NFS direct writes, as there
954 * is no atomic O_APPEND write facility in the NFS protocol.
961 struct file *file = iocb->ki_filp; in nfs_file_direct_write()
962 struct address_space *mapping = file->f_mapping; in nfs_file_direct_write()
963 struct inode *inode = mapping->host; in nfs_file_direct_write()
968 dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", in nfs_file_direct_write()
969 file, iov_iter_count(iter), (long long) iocb->ki_pos); in nfs_file_direct_write()
979 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); in nfs_file_direct_write()
981 pos = iocb->ki_pos; in nfs_file_direct_write()
982 end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT; in nfs_file_direct_write()
986 result = -ENOMEM; in nfs_file_direct_write()
991 dreq->inode = inode; in nfs_file_direct_write()
992 dreq->max_count = count; in nfs_file_direct_write()
993 dreq->io_start = pos; in nfs_file_direct_write()
994 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); in nfs_file_direct_write()
995 l_ctx = nfs_get_lock_context(dreq->ctx); in nfs_file_direct_write()
1001 dreq->l_ctx = l_ctx; in nfs_file_direct_write()
1003 dreq->iocb = iocb; in nfs_file_direct_write()
1004 pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode); in nfs_file_direct_write()
1015 if (mapping->nrpages) { in nfs_file_direct_write()
1026 requested -= result; in nfs_file_direct_write()
1027 iocb->ki_pos = pos + result; in nfs_file_direct_write()
1043 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1053 return -ENOMEM; in nfs_init_directcache()
1059 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures