Lines Matching +full:buffered +full:- +full:positive

1 // SPDX-License-Identifier: GPL-2.0-only
20 #define BUFFERED 0x08 /* The pagecache needs cleaning up */ macro
32 struct netfs_inode *ictx = netfs_inode(folio->mapping->host); in netfs_folio_written_back()
43 fend = folio_pos(folio) + finfo->dirty_offset + finfo->dirty_len; in netfs_folio_written_back()
44 if (fend > ictx->zero_point) in netfs_folio_written_back()
45 ictx->zero_point = fend; in netfs_folio_written_back()
48 group = finfo->netfs_group; in netfs_folio_written_back()
86 struct folio_queue *folioq = wreq->buffer; in netfs_writeback_unlock_folios()
87 unsigned long long collected_to = wreq->collected_to; in netfs_writeback_unlock_folios()
88 unsigned int slot = wreq->buffer_head_slot; in netfs_writeback_unlock_folios()
90 if (wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) { in netfs_writeback_unlock_folios()
110 wreq->debug_id, folio->index)) in netfs_writeback_unlock_folios()
116 flen = finfo ? finfo->dirty_offset + finfo->dirty_len : fsize; in netfs_writeback_unlock_folios()
118 fend = min_t(unsigned long long, fpos + flen, wreq->i_size); in netfs_writeback_unlock_folios()
126 wreq->nr_group_rel += netfs_folio_written_back(folio); in netfs_writeback_unlock_folios()
127 wreq->cleaned_to = fpos + fsize; in netfs_writeback_unlock_folios()
137 if (READ_ONCE(wreq->buffer_tail) == folioq) in netfs_writeback_unlock_folios()
147 wreq->buffer = folioq; in netfs_writeback_unlock_folios()
148 wreq->buffer_head_slot = slot; in netfs_writeback_unlock_folios()
159 _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr); in netfs_retry_write_stream()
161 if (list_empty(&stream->subrequests)) in netfs_retry_write_stream()
164 if (stream->source == NETFS_UPLOAD_TO_SERVER && in netfs_retry_write_stream()
165 wreq->netfs_ops->retry_request) in netfs_retry_write_stream()
166 wreq->netfs_ops->retry_request(wreq, stream); in netfs_retry_write_stream()
168 if (unlikely(stream->failed)) in netfs_retry_write_stream()
172 if (!stream->prepare_write) { in netfs_retry_write_stream()
175 list_for_each_entry(subreq, &stream->subrequests, rreq_link) { in netfs_retry_write_stream()
176 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) in netfs_retry_write_stream()
178 if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { in netfs_retry_write_stream()
179 struct iov_iter source = subreq->io_iter; in netfs_retry_write_stream()
181 iov_iter_revert(&source, subreq->len - source.count); in netfs_retry_write_stream()
182 __set_bit(NETFS_SREQ_RETRYING, &subreq->flags); in netfs_retry_write_stream()
190 next = stream->subrequests.next; in netfs_retry_write_stream()
205 start = from->start + from->transferred; in netfs_retry_write_stream()
206 len = from->len - from->transferred; in netfs_retry_write_stream()
208 if (test_bit(NETFS_SREQ_FAILED, &from->flags) || in netfs_retry_write_stream()
209 !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags)) in netfs_retry_write_stream()
212 list_for_each_continue(next, &stream->subrequests) { in netfs_retry_write_stream()
214 if (subreq->start + subreq->transferred != start + len || in netfs_retry_write_stream()
215 test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) || in netfs_retry_write_stream()
216 !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) in netfs_retry_write_stream()
219 len += to->len; in netfs_retry_write_stream()
226 source = from->io_iter; in netfs_retry_write_stream()
231 list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) { in netfs_retry_write_stream()
236 __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); in netfs_retry_write_stream()
237 __set_bit(NETFS_SREQ_RETRYING, &subreq->flags); in netfs_retry_write_stream()
238 stream->prepare_write(subreq); in netfs_retry_write_stream()
240 part = min(len, stream->sreq_max_len); in netfs_retry_write_stream()
241 subreq->len = part; in netfs_retry_write_stream()
242 subreq->start = start; in netfs_retry_write_stream()
243 subreq->transferred = 0; in netfs_retry_write_stream()
244 len -= part; in netfs_retry_write_stream()
247 __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags)) in netfs_retry_write_stream()
263 &stream->subrequests, rreq_link) { in netfs_retry_write_stream()
265 list_del(&subreq->rreq_link); in netfs_retry_write_stream()
278 subreq->source = to->source; in netfs_retry_write_stream()
279 subreq->start = start; in netfs_retry_write_stream()
280 subreq->debug_index = atomic_inc_return(&wreq->subreq_counter); in netfs_retry_write_stream()
281 subreq->stream_nr = to->stream_nr; in netfs_retry_write_stream()
282 __set_bit(NETFS_SREQ_RETRYING, &subreq->flags); in netfs_retry_write_stream()
284 trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, in netfs_retry_write_stream()
285 refcount_read(&subreq->ref), in netfs_retry_write_stream()
289 list_add(&subreq->rreq_link, &to->rreq_link); in netfs_retry_write_stream()
293 stream->sreq_max_len = len; in netfs_retry_write_stream()
294 stream->sreq_max_segs = INT_MAX; in netfs_retry_write_stream()
295 switch (stream->source) { in netfs_retry_write_stream()
298 stream->sreq_max_len = umin(len, wreq->wsize); in netfs_retry_write_stream()
307 stream->prepare_write(subreq); in netfs_retry_write_stream()
309 part = umin(len, stream->sreq_max_len); in netfs_retry_write_stream()
310 subreq->len = subreq->transferred + part; in netfs_retry_write_stream()
311 len -= part; in netfs_retry_write_stream()
314 __set_bit(NETFS_SREQ_BOUNDARY, &to->flags); in netfs_retry_write_stream()
324 } while (!list_is_head(next, &stream->subrequests)); in netfs_retry_write_stream()
329 * encryption and the server copy changed due to a third-party write, we may
342 stream = &wreq->io_streams[s]; in netfs_retry_writes()
343 if (!stream->active) in netfs_retry_writes()
346 list_for_each_entry(subreq, &stream->subrequests, rreq_link) { in netfs_retry_writes()
347 wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS, in netfs_retry_writes()
358 stream = &wreq->io_streams[s]; in netfs_retry_writes()
359 if (stream->need_retry) { in netfs_retry_writes()
360 stream->need_retry = false; in netfs_retry_writes()
368 * retry some of the results - or even do an RMW cycle for content crypto.
383 _enter("%llx-%llx", wreq->start, wreq->start + wreq->len); in netfs_collect_write_results()
388 issued_to = atomic64_read(&wreq->issued_to); in netfs_collect_write_results()
391 if (wreq->origin == NETFS_WRITEBACK || in netfs_collect_write_results()
392 wreq->origin == NETFS_WRITETHROUGH || in netfs_collect_write_results()
393 wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) in netfs_collect_write_results()
394 notes = BUFFERED; in netfs_collect_write_results()
404 stream = &wreq->io_streams[s]; in netfs_collect_write_results()
406 if (!smp_load_acquire(&stream->active)) in netfs_collect_write_results()
409 front = stream->front; in netfs_collect_write_results()
413 // front->debug_index, front->start, front->transferred, front->len); in netfs_collect_write_results()
415 if (stream->collected_to < front->start) { in netfs_collect_write_results()
417 stream->collected_to = front->start; in netfs_collect_write_results()
421 if (test_bit(NETFS_SREQ_IN_PROGRESS, &front->flags)) { in netfs_collect_write_results()
425 smp_rmb(); /* Read counters after I-P flag. */ in netfs_collect_write_results()
427 if (stream->failed) { in netfs_collect_write_results()
428 stream->collected_to = front->start + front->len; in netfs_collect_write_results()
432 if (front->start + front->transferred > stream->collected_to) { in netfs_collect_write_results()
433 stream->collected_to = front->start + front->transferred; in netfs_collect_write_results()
434 stream->transferred = stream->collected_to - wreq->start; in netfs_collect_write_results()
437 if (test_bit(NETFS_SREQ_FAILED, &front->flags)) { in netfs_collect_write_results()
438 stream->failed = true; in netfs_collect_write_results()
439 stream->error = front->error; in netfs_collect_write_results()
440 if (stream->source == NETFS_UPLOAD_TO_SERVER) in netfs_collect_write_results()
441 mapping_set_error(wreq->mapping, front->error); in netfs_collect_write_results()
445 if (front->transferred < front->len) { in netfs_collect_write_results()
446 stream->need_retry = true; in netfs_collect_write_results()
453 spin_lock_bh(&wreq->lock); in netfs_collect_write_results()
456 list_del_init(&front->rreq_link); in netfs_collect_write_results()
457 front = list_first_entry_or_null(&stream->subrequests, in netfs_collect_write_results()
459 stream->front = front; in netfs_collect_write_results()
460 spin_unlock_bh(&wreq->lock); in netfs_collect_write_results()
470 if (!front && issued_to > stream->collected_to) { in netfs_collect_write_results()
472 stream->collected_to = issued_to; in netfs_collect_write_results()
475 if (stream->collected_to < collected_to) in netfs_collect_write_results()
476 collected_to = stream->collected_to; in netfs_collect_write_results()
479 if (collected_to != ULLONG_MAX && collected_to > wreq->collected_to) in netfs_collect_write_results()
480 wreq->collected_to = collected_to; in netfs_collect_write_results()
483 stream = &wreq->io_streams[s]; in netfs_collect_write_results()
484 if (stream->active) in netfs_collect_write_results()
488 trace_netfs_collect_state(wreq, wreq->collected_to, notes); in netfs_collect_write_results()
491 if (notes & BUFFERED) { in netfs_collect_write_results()
492 if (wreq->cleaned_to < wreq->collected_to) in netfs_collect_write_results()
495 wreq->cleaned_to = wreq->collected_to; in netfs_collect_write_results()
502 if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { in netfs_collect_write_results()
504 clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags); in netfs_collect_write_results()
505 wake_up_bit(&wreq->flags, NETFS_RREQ_PAUSE); in netfs_collect_write_results()
518 netfs_put_group_many(wreq->group, wreq->nr_group_rel); in netfs_collect_write_results()
519 wreq->nr_group_rel = 0; in netfs_collect_write_results()
539 struct netfs_inode *ictx = netfs_inode(wreq->inode); in netfs_write_collection_worker()
543 _enter("R=%x", wreq->debug_id); in netfs_write_collection_worker()
546 if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) { in netfs_write_collection_worker()
556 if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags)) { in netfs_write_collection_worker()
564 struct netfs_io_stream *stream = &wreq->io_streams[s]; in netfs_write_collection_worker()
565 if (!stream->active) in netfs_write_collection_worker()
567 if (!list_empty(&stream->subrequests)) { in netfs_write_collection_worker()
571 if (stream->transferred < transferred) in netfs_write_collection_worker()
572 transferred = stream->transferred; in netfs_write_collection_worker()
576 wreq->transferred = transferred; in netfs_write_collection_worker()
579 if (wreq->io_streams[1].active && in netfs_write_collection_worker()
580 wreq->io_streams[1].failed) { in netfs_write_collection_worker()
584 ictx->ops->invalidate_cache(wreq); in netfs_write_collection_worker()
587 if (wreq->cleanup) in netfs_write_collection_worker()
588 wreq->cleanup(wreq); in netfs_write_collection_worker()
590 if (wreq->origin == NETFS_DIO_WRITE && in netfs_write_collection_worker()
591 wreq->mapping->nrpages) { in netfs_write_collection_worker()
595 * ->write_iter() is prevented from interfering by the DIO in netfs_write_collection_worker()
598 pgoff_t first = wreq->start >> PAGE_SHIFT; in netfs_write_collection_worker()
599 pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT; in netfs_write_collection_worker()
600 invalidate_inode_pages2_range(wreq->mapping, first, last); in netfs_write_collection_worker()
603 if (wreq->origin == NETFS_DIO_WRITE) in netfs_write_collection_worker()
604 inode_dio_end(wreq->inode); in netfs_write_collection_worker()
608 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags); in netfs_write_collection_worker()
609 wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS); in netfs_write_collection_worker()
611 if (wreq->iocb) { in netfs_write_collection_worker()
612 size_t written = min(wreq->transferred, wreq->len); in netfs_write_collection_worker()
613 wreq->iocb->ki_pos += written; in netfs_write_collection_worker()
614 if (wreq->iocb->ki_complete) in netfs_write_collection_worker()
615 wreq->iocb->ki_complete( in netfs_write_collection_worker()
616 wreq->iocb, wreq->error ? wreq->error : written); in netfs_write_collection_worker()
617 wreq->iocb = VFS_PTR_POISON; in netfs_write_collection_worker()
629 if (!work_pending(&wreq->work)) { in netfs_wake_write_collector()
631 if (!queue_work(system_unbound_wq, &wreq->work)) in netfs_wake_write_collector()
637 * netfs_write_subrequest_terminated - Note the termination of a write operation.
646 * supplying a positive value to indicate the number of bytes transferred or a
663 struct netfs_io_request *wreq = subreq->rreq; in netfs_write_subrequest_terminated()
664 struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr]; in netfs_write_subrequest_terminated()
666 _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error); in netfs_write_subrequest_terminated()
668 switch (subreq->source) { in netfs_write_subrequest_terminated()
682 subreq->error = transferred_or_error; in netfs_write_subrequest_terminated()
683 if (subreq->error == -EAGAIN) in netfs_write_subrequest_terminated()
684 set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); in netfs_write_subrequest_terminated()
686 set_bit(NETFS_SREQ_FAILED, &subreq->flags); in netfs_write_subrequest_terminated()
689 switch (subreq->source) { in netfs_write_subrequest_terminated()
700 set_bit(NETFS_RREQ_PAUSE, &wreq->flags); in netfs_write_subrequest_terminated()
702 if (WARN(transferred_or_error > subreq->len - subreq->transferred, in netfs_write_subrequest_terminated()
703 "Subreq excess write: R=%x[%x] %zd > %zu - %zu", in netfs_write_subrequest_terminated()
704 wreq->debug_id, subreq->debug_index, in netfs_write_subrequest_terminated()
705 transferred_or_error, subreq->len, subreq->transferred)) in netfs_write_subrequest_terminated()
706 transferred_or_error = subreq->len - subreq->transferred; in netfs_write_subrequest_terminated()
708 subreq->error = 0; in netfs_write_subrequest_terminated()
709 subreq->transferred += transferred_or_error; in netfs_write_subrequest_terminated()
711 if (subreq->transferred < subreq->len) in netfs_write_subrequest_terminated()
712 set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); in netfs_write_subrequest_terminated()
717 clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags); in netfs_write_subrequest_terminated()
718 wake_up_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS); in netfs_write_subrequest_terminated()
723 if (list_is_first(&subreq->rreq_link, &stream->subrequests)) in netfs_write_subrequest_terminated()