Home
last modified time | relevance | path

Searched refs:folioq (Results 1 – 18 of 18) sorted by relevance

/linux-6.12.1/include/linux/
Dfolio_queue.h49 static inline void folioq_init(struct folio_queue *folioq) in folioq_init() argument
51 folio_batch_init(&folioq->vec); in folioq_init()
52 folioq->next = NULL; in folioq_init()
53 folioq->prev = NULL; in folioq_init()
54 folioq->marks = 0; in folioq_init()
55 folioq->marks2 = 0; in folioq_init()
56 folioq->marks3 = 0; in folioq_init()
66 static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq) in folioq_nr_slots() argument
78 static inline unsigned int folioq_count(struct folio_queue *folioq) in folioq_count() argument
80 return folio_batch_count(&folioq->vec); in folioq_count()
[all …]
Diov_iter.h151 const struct folio_queue *folioq = iter->folioq; in iterate_folioq() local
155 if (slot == folioq_nr_slots(folioq)) { in iterate_folioq()
157 folioq = folioq->next; in iterate_folioq()
162 struct folio *folio = folioq_folio(folioq, slot); in iterate_folioq()
170 fsize = folioq_folio_size(folioq, slot); in iterate_folioq()
182 if (slot == folioq_nr_slots(folioq) && folioq->next) { in iterate_folioq()
183 folioq = folioq->next; in iterate_folioq()
192 iter->folioq = folioq; in iterate_folioq()
Duio.h71 const struct folio_queue *folioq; member
292 const struct folio_queue *folioq,
/linux-6.12.1/fs/netfs/
Dread_pgpriv2.c23 struct folio_queue *folioq, in netfs_pgpriv2_mark_copy_to_cache() argument
26 struct folio *folio = folioq_folio(folioq, slot); in netfs_pgpriv2_mark_copy_to_cache()
30 folioq_mark3(folioq, slot); in netfs_pgpriv2_mark_copy_to_cache()
37 static void netfs_pgpriv2_cancel(struct folio_queue *folioq) in netfs_pgpriv2_cancel() argument
42 while (folioq) { in netfs_pgpriv2_cancel()
43 if (!folioq->marks3) { in netfs_pgpriv2_cancel()
44 folioq = folioq->next; in netfs_pgpriv2_cancel()
48 slot = __ffs(folioq->marks3); in netfs_pgpriv2_cancel()
49 folio = folioq_folio(folioq, slot); in netfs_pgpriv2_cancel()
53 folioq_unmark3(folioq, slot); in netfs_pgpriv2_cancel()
[all …]
Dbuffered_read.c70 struct folio_queue *folioq, in netfs_load_buffer_from_ra() argument
76 nr = __readahead_batch(rreq->ractl, (struct page **)folioq->vec.folios, in netfs_load_buffer_from_ra()
77 ARRAY_SIZE(folioq->vec.folios)); in netfs_load_buffer_from_ra()
78 folioq->vec.nr = nr; in netfs_load_buffer_from_ra()
80 struct folio *folio = folioq_folio(folioq, i); in netfs_load_buffer_from_ra()
84 folioq->orders[i] = order; in netfs_load_buffer_from_ra()
91 for (int i = nr; i < folioq_nr_slots(folioq); i++) in netfs_load_buffer_from_ra()
92 folioq_clear(folioq, i); in netfs_load_buffer_from_ra()
163 if (subreq->io_iter.folioq_slot >= folioq_nr_slots(subreq->io_iter.folioq)) { in netfs_prepare_read_iterator()
164 subreq->io_iter.folioq = subreq->io_iter.folioq->next; in netfs_prepare_read_iterator()
[all …]
Dread_collect.c36 struct folio_queue *folioq, in netfs_unlock_read_folio() argument
40 struct folio *folio = folioq_folio(folioq, slot); in netfs_unlock_read_folio()
68 netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot); in netfs_unlock_read_folio()
81 folioq_clear(folioq, slot); in netfs_unlock_read_folio()
92 struct folio_queue *folioq = subreq->curr_folioq; in netfs_consume_read_data() local
109 if (WARN_ON_ONCE(!folioq) || in netfs_consume_read_data()
110 WARN_ON_ONCE(!folioq_folio(folioq, slot)) || in netfs_consume_read_data()
111 WARN_ON_ONCE(folioq_folio(folioq, slot)->index != fpos / PAGE_SIZE)) { in netfs_consume_read_data()
117 if (folioq) { in netfs_consume_read_data()
118 struct folio *folio = folioq_folio(folioq, slot); in netfs_consume_read_data()
[all …]
Diterator.c199 const struct folio_queue *folioq = iter->folioq; in netfs_limit_folioq() local
210 if (slot >= folioq_nr_slots(folioq)) { in netfs_limit_folioq()
211 folioq = folioq->next; in netfs_limit_folioq()
217 size_t flen = folioq_folio_size(folioq, slot); in netfs_limit_folioq()
230 if (slot >= folioq_nr_slots(folioq)) { in netfs_limit_folioq()
231 folioq = folioq->next; in netfs_limit_folioq()
234 } while (folioq); in netfs_limit_folioq()
Dwrite_collect.c86 struct folio_queue *folioq = wreq->buffer; in netfs_writeback_unlock_folios() local
96 if (slot >= folioq_nr_slots(folioq)) { in netfs_writeback_unlock_folios()
97 folioq = netfs_delete_buffer_head(wreq); in netfs_writeback_unlock_folios()
107 folio = folioq_folio(folioq, slot); in netfs_writeback_unlock_folios()
134 folioq_clear(folioq, slot); in netfs_writeback_unlock_folios()
136 if (slot >= folioq_nr_slots(folioq)) { in netfs_writeback_unlock_folios()
137 if (READ_ONCE(wreq->buffer_tail) == folioq) in netfs_writeback_unlock_folios()
139 folioq = netfs_delete_buffer_head(wreq); in netfs_writeback_unlock_folios()
147 wreq->buffer = folioq; in netfs_writeback_unlock_folios()
Dmisc.c50 if (rreq->io_iter.folioq == prev && in netfs_buffer_make_space()
52 rreq->io_iter.folioq = tail; in netfs_buffer_make_space()
Dread_retry.c18 subreq->curr_folioq = (struct folio_queue *)io_iter->folioq; in netfs_reissue_read()
Dinternal.h98 struct folio_queue *folioq,
Dwrite_issue.c163 wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) { in netfs_prepare_write()
/linux-6.12.1/Documentation/core-api/
Dfolio_queue.rst64 void folioq_init(struct folio_queue *folioq);
77 unsigned int folioq_append(struct folio_queue *folioq,
80 unsigned int folioq_append_mark(struct folio_queue *folioq,
91 void folioq_clear(struct folio_queue *folioq, unsigned int slot);
104 struct folio *folioq_folio(const struct folio_queue *folioq,
110 unsigned int folioq_folio_order(const struct folio_queue *folioq,
113 size_t folioq_folio_size(const struct folio_queue *folioq,
126 unsigned int folioq_nr_slots(const struct folio_queue *folioq);
128 unsigned int folioq_count(struct folio_queue *folioq);
130 bool folioq_full(struct folio_queue *folioq);
[all …]
/linux-6.12.1/lib/
Diov_iter.c534 const struct folio_queue *folioq = i->folioq; in iov_iter_folioq_advance() local
541 if (slot >= folioq_nr_slots(folioq)) { in iov_iter_folioq_advance()
542 folioq = folioq->next; in iov_iter_folioq_advance()
548 size_t fsize = folioq_folio_size(folioq, slot); in iov_iter_folioq_advance()
554 if (slot >= folioq_nr_slots(folioq) && folioq->next) { in iov_iter_folioq_advance()
555 folioq = folioq->next; in iov_iter_folioq_advance()
562 i->folioq = folioq; in iov_iter_folioq_advance()
587 const struct folio_queue *folioq = i->folioq; in iov_iter_folioq_revert() local
594 folioq = folioq->prev; in iov_iter_folioq_revert()
595 slot = folioq_nr_slots(folioq); in iov_iter_folioq_revert()
[all …]
Dkunit_iov_iter.c371 struct folio_queue *folioq, *next; in iov_kunit_destroy_folioq() local
373 for (folioq = data; folioq; folioq = next) { in iov_kunit_destroy_folioq()
374 next = folioq->next; in iov_kunit_destroy_folioq()
375 for (int i = 0; i < folioq_nr_slots(folioq); i++) in iov_kunit_destroy_folioq()
376 if (folioq_folio(folioq, i)) in iov_kunit_destroy_folioq()
377 folio_put(folioq_folio(folioq, i)); in iov_kunit_destroy_folioq()
378 kfree(folioq); in iov_kunit_destroy_folioq()
384 struct folio_queue *folioq, in iov_kunit_load_folioq() argument
387 struct folio_queue *p = folioq; in iov_kunit_load_folioq()
402 iov_iter_folio_queue(iter, dir, folioq, 0, 0, size); in iov_kunit_load_folioq()
[all …]
Dscatterlist.c1275 const struct folio_queue *folioq = iter->folioq; in extract_folioq_to_sg() local
1281 BUG_ON(!folioq); in extract_folioq_to_sg()
1283 if (slot >= folioq_nr_slots(folioq)) { in extract_folioq_to_sg()
1284 folioq = folioq->next; in extract_folioq_to_sg()
1285 if (WARN_ON_ONCE(!folioq)) in extract_folioq_to_sg()
1291 struct folio *folio = folioq_folio(folioq, slot); in extract_folioq_to_sg()
1292 size_t fsize = folioq_folio_size(folioq, slot); in extract_folioq_to_sg()
1308 if (slot >= folioq_nr_slots(folioq)) { in extract_folioq_to_sg()
1309 if (!folioq->next) { in extract_folioq_to_sg()
1313 folioq = folioq->next; in extract_folioq_to_sg()
[all …]
/linux-6.12.1/fs/smb/client/
Dsmbdirect.c2536 const struct folio_queue *folioq = iter->folioq; in smb_extract_folioq_to_rdma() local
2541 BUG_ON(!folioq); in smb_extract_folioq_to_rdma()
2543 if (slot >= folioq_nr_slots(folioq)) { in smb_extract_folioq_to_rdma()
2544 folioq = folioq->next; in smb_extract_folioq_to_rdma()
2545 if (WARN_ON_ONCE(!folioq)) in smb_extract_folioq_to_rdma()
2551 struct folio *folio = folioq_folio(folioq, slot); in smb_extract_folioq_to_rdma()
2552 size_t fsize = folioq_folio_size(folioq, slot); in smb_extract_folioq_to_rdma()
2567 if (slot >= folioq_nr_slots(folioq)) { in smb_extract_folioq_to_rdma()
2568 if (!folioq->next) { in smb_extract_folioq_to_rdma()
2572 folioq = folioq->next; in smb_extract_folioq_to_rdma()
[all …]
Dsmb2ops.c4393 struct folio_queue *folioq; in cifs_clear_folioq_buffer() local
4395 while ((folioq = buffer)) { in cifs_clear_folioq_buffer()
4396 for (int s = 0; s < folioq_count(folioq); s++) in cifs_clear_folioq_buffer()
4397 if (folioq_is_marked(folioq, s)) in cifs_clear_folioq_buffer()
4398 folio_put(folioq_folio(folioq, s)); in cifs_clear_folioq_buffer()
4399 buffer = folioq->next; in cifs_clear_folioq_buffer()
4400 kfree(folioq); in cifs_clear_folioq_buffer()
4601 cifs_copy_folioq_to_iter(struct folio_queue *folioq, size_t data_size, in cifs_copy_folioq_to_iter() argument
4604 for (; folioq; folioq = folioq->next) { in cifs_copy_folioq_to_iter()
4605 for (int s = 0; s < folioq_count(folioq); s++) { in cifs_copy_folioq_to_iter()
[all …]