Lines Matching +full:ext +full:- +full:32 +full:k
1 // SPDX-License-Identifier: GPL-2.0-only
8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
61 #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
68 return nr_free_pages() - nr_free_highpages(); in low_free_pages()
92 * a file-alike way
100 unsigned int k; member
106 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
107 sizeof(u32) - sizeof(u32)];
135 struct swsusp_extent *ext; in swsusp_extents_insert() local
139 ext = rb_entry(*new, struct swsusp_extent, node); in swsusp_extents_insert()
141 if (swap_offset < ext->start) { in swsusp_extents_insert()
143 if (swap_offset == ext->start - 1) { in swsusp_extents_insert()
144 ext->start--; in swsusp_extents_insert()
147 new = &((*new)->rb_left); in swsusp_extents_insert()
148 } else if (swap_offset > ext->end) { in swsusp_extents_insert()
150 if (swap_offset == ext->end + 1) { in swsusp_extents_insert()
151 ext->end++; in swsusp_extents_insert()
154 new = &((*new)->rb_right); in swsusp_extents_insert()
157 return -EINVAL; in swsusp_extents_insert()
161 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); in swsusp_extents_insert()
162 if (!ext) in swsusp_extents_insert()
163 return -ENOMEM; in swsusp_extents_insert()
165 ext->start = swap_offset; in swsusp_extents_insert()
166 ext->end = swap_offset; in swsusp_extents_insert()
167 rb_link_node(&ext->node, parent, new); in swsusp_extents_insert()
168 rb_insert_color(&ext->node, &swsusp_extents); in swsusp_extents_insert()
173 * alloc_swapdev_block - allocate a swap page and register that it has
192 * free_all_swap_pages - free swap pages allocated for saving image data.
202 struct swsusp_extent *ext; in free_all_swap_pages() local
204 ext = rb_entry(node, struct swsusp_extent, node); in free_all_swap_pages()
206 swap_free_nr(swp_entry(swap, ext->start), in free_all_swap_pages()
207 ext->end - ext->start + 1); in free_all_swap_pages()
209 kfree(ext); in free_all_swap_pages()
234 atomic_set(&hb->count, 0); in hib_init_batch()
235 init_waitqueue_head(&hb->wait); in hib_init_batch()
236 hb->error = BLK_STS_OK; in hib_init_batch()
237 blk_start_plug(&hb->plug); in hib_init_batch()
242 blk_finish_plug(&hb->plug); in hib_finish_batch()
247 struct hib_bio_batch *hb = bio->bi_private; in hib_end_io()
250 if (bio->bi_status) { in hib_end_io()
251 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n", in hib_end_io()
253 (unsigned long long)bio->bi_iter.bi_sector); in hib_end_io()
262 if (bio->bi_status && !hb->error) in hib_end_io()
263 hb->error = bio->bi_status; in hib_end_io()
264 if (atomic_dec_and_test(&hb->count)) in hib_end_io()
265 wake_up(&hb->wait); in hib_end_io()
279 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); in hib_submit_io()
283 (unsigned long long)bio->bi_iter.bi_sector); in hib_submit_io()
285 return -EFAULT; in hib_submit_io()
289 bio->bi_end_io = hib_end_io; in hib_submit_io()
290 bio->bi_private = hb; in hib_submit_io()
291 atomic_inc(&hb->count); in hib_submit_io()
307 wait_event(hb->wait, atomic_read(&hb->count) == 0); in hib_wait_io()
308 return blk_status_to_errno(hb->error); in hib_wait_io()
319 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || in mark_swapfiles()
320 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { in mark_swapfiles()
321 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); in mark_swapfiles()
322 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); in mark_swapfiles()
323 swsusp_header->image = handle->first_sector; in mark_swapfiles()
325 swsusp_header->hw_sig = swsusp_hardware_signature; in mark_swapfiles()
328 swsusp_header->flags = flags; in mark_swapfiles()
330 swsusp_header->crc32 = handle->crc32; in mark_swapfiles()
335 error = -ENODEV; in mark_swapfiles()
348 * swsusp_swap_check - check if the resume device is a swap device
374 * write_page - Write one page to given swap location.
386 return -ENOSPC; in write_page()
416 if (handle->cur) in release_swap_writer()
417 free_page((unsigned long)handle->cur); in release_swap_writer()
418 handle->cur = NULL; in release_swap_writer()
427 if (ret != -ENOSPC) in get_swap_writer()
428 pr_err("Cannot find swap device, try swapon -a\n"); in get_swap_writer()
431 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); in get_swap_writer()
432 if (!handle->cur) { in get_swap_writer()
433 ret = -ENOMEM; in get_swap_writer()
436 handle->cur_swap = alloc_swapdev_block(root_swap); in get_swap_writer()
437 if (!handle->cur_swap) { in get_swap_writer()
438 ret = -ENOSPC; in get_swap_writer()
441 handle->k = 0; in get_swap_writer()
442 handle->reqd_free_pages = reqd_free_pages(); in get_swap_writer()
443 handle->first_sector = handle->cur_swap; in get_swap_writer()
458 if (!handle->cur) in swap_write_page()
459 return -EINVAL; in swap_write_page()
464 handle->cur->entries[handle->k++] = offset; in swap_write_page()
465 if (handle->k >= MAP_PAGE_ENTRIES) { in swap_write_page()
468 return -ENOSPC; in swap_write_page()
469 handle->cur->next_swap = offset; in swap_write_page()
470 error = write_page(handle->cur, handle->cur_swap, hb); in swap_write_page()
473 clear_page(handle->cur); in swap_write_page()
474 handle->cur_swap = offset; in swap_write_page()
475 handle->k = 0; in swap_write_page()
477 if (hb && low_free_pages() <= handle->reqd_free_pages) { in swap_write_page()
485 handle->reqd_free_pages = reqd_free_pages(); in swap_write_page()
494 if (handle->cur && handle->cur_swap) in flush_swap_writer()
495 return write_page(handle->cur, handle->cur_swap, NULL); in flush_swap_writer()
497 return -EINVAL; in flush_swap_writer()
528 #define UNC_PAGES 32
544 * save_image - save the suspend image data
615 wait_event(d->go, atomic_read_acquire(&d->ready) || in crc32_threadfn()
618 d->thr = NULL; in crc32_threadfn()
619 atomic_set_release(&d->stop, 1); in crc32_threadfn()
620 wake_up(&d->done); in crc32_threadfn()
623 atomic_set(&d->ready, 0); in crc32_threadfn()
625 for (i = 0; i < d->run_threads; i++) in crc32_threadfn()
626 *d->crc32 = crc32_le(*d->crc32, in crc32_threadfn()
627 d->unc[i], *d->unc_len[i]); in crc32_threadfn()
628 atomic_set_release(&d->stop, 1); in crc32_threadfn()
629 wake_up(&d->done); in crc32_threadfn()
662 wait_event(d->go, atomic_read_acquire(&d->ready) || in compress_threadfn()
665 d->thr = NULL; in compress_threadfn()
666 d->ret = -1; in compress_threadfn()
667 atomic_set_release(&d->stop, 1); in compress_threadfn()
668 wake_up(&d->done); in compress_threadfn()
671 atomic_set(&d->ready, 0); in compress_threadfn()
673 cmp_len = CMP_SIZE - CMP_HEADER; in compress_threadfn()
674 d->ret = crypto_comp_compress(d->cc, d->unc, d->unc_len, in compress_threadfn()
675 d->cmp + CMP_HEADER, in compress_threadfn()
677 d->cmp_len = cmp_len; in compress_threadfn()
679 atomic_set(&compressed_size, atomic_read(&compressed_size) + d->cmp_len); in compress_threadfn()
680 atomic_set_release(&d->stop, 1); in compress_threadfn()
681 wake_up(&d->done); in compress_threadfn()
687 * save_compressed_image - Save the suspend image data after compression.
717 nr_threads = num_online_cpus() - 1; in save_compressed_image()
723 ret = -ENOMEM; in save_compressed_image()
730 ret = -ENOMEM; in save_compressed_image()
737 ret = -ENOMEM; in save_compressed_image()
751 ret = -EFAULT; in save_compressed_image()
761 ret = -ENOMEM; in save_compressed_image()
769 init_waitqueue_head(&crc->go); in save_compressed_image()
770 init_waitqueue_head(&crc->done); in save_compressed_image()
772 handle->crc32 = 0; in save_compressed_image()
773 crc->crc32 = &handle->crc32; in save_compressed_image()
775 crc->unc[thr] = data[thr].unc; in save_compressed_image()
776 crc->unc_len[thr] = &data[thr].unc_len; in save_compressed_image()
779 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); in save_compressed_image()
780 if (IS_ERR(crc->thr)) { in save_compressed_image()
781 crc->thr = NULL; in save_compressed_image()
783 ret = -ENOMEM; in save_compressed_image()
791 handle->reqd_free_pages = reqd_free_pages(); in save_compressed_image()
831 crc->run_threads = thr; in save_compressed_image()
832 atomic_set_release(&crc->ready, 1); in save_compressed_image()
833 wake_up(&crc->go); in save_compressed_image()
851 ret = -1; in save_compressed_image()
861 * OK - we saved the length of the compressed data, so in save_compressed_image()
876 wait_event(crc->done, atomic_read_acquire(&crc->stop)); in save_compressed_image()
877 atomic_set(&crc->stop, 0); in save_compressed_image()
894 if (crc->thr) in save_compressed_image()
895 kthread_stop(crc->thr); in save_compressed_image()
913 * enough_swap - Make sure we have enough swap to save the image.
931 * swsusp_write - Write entire image and metadata.
957 error = -ENOSPC; in swsusp_write()
965 error = -EFAULT; in swsusp_write()
973 save_image(&handle, &snapshot, pages - 1) : in swsusp_write()
974 save_compressed_image(&handle, &snapshot, pages - 1); in swsusp_write()
983 * in a file-like way.
990 while (handle->maps) { in release_swap_reader()
991 if (handle->maps->map) in release_swap_reader()
992 free_page((unsigned long)handle->maps->map); in release_swap_reader()
993 tmp = handle->maps; in release_swap_reader()
994 handle->maps = handle->maps->next; in release_swap_reader()
997 handle->cur = NULL; in release_swap_reader()
1007 *flags_p = swsusp_header->flags; in get_swap_reader()
1009 if (!swsusp_header->image) /* how can this happen? */ in get_swap_reader()
1010 return -EINVAL; in get_swap_reader()
1012 handle->cur = NULL; in get_swap_reader()
1013 last = handle->maps = NULL; in get_swap_reader()
1014 offset = swsusp_header->image; in get_swap_reader()
1016 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL); in get_swap_reader()
1019 return -ENOMEM; in get_swap_reader()
1021 if (!handle->maps) in get_swap_reader()
1022 handle->maps = tmp; in get_swap_reader()
1024 last->next = tmp; in get_swap_reader()
1027 tmp->map = (struct swap_map_page *) in get_swap_reader()
1029 if (!tmp->map) { in get_swap_reader()
1031 return -ENOMEM; in get_swap_reader()
1034 error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL); in get_swap_reader()
1039 offset = tmp->map->next_swap; in get_swap_reader()
1041 handle->k = 0; in get_swap_reader()
1042 handle->cur = handle->maps->map; in get_swap_reader()
1053 if (!handle->cur) in swap_read_page()
1054 return -EINVAL; in swap_read_page()
1055 offset = handle->cur->entries[handle->k]; in swap_read_page()
1057 return -EFAULT; in swap_read_page()
1061 if (++handle->k >= MAP_PAGE_ENTRIES) { in swap_read_page()
1062 handle->k = 0; in swap_read_page()
1063 free_page((unsigned long)handle->maps->map); in swap_read_page()
1064 tmp = handle->maps; in swap_read_page()
1065 handle->maps = handle->maps->next; in swap_read_page()
1067 if (!handle->maps) in swap_read_page()
1070 handle->cur = handle->maps->map; in swap_read_page()
1083 * load_image - load the image using the swap map handle
1116 if (snapshot->sync_read) in load_image()
1134 ret = -ENODATA; in load_image()
1166 wait_event(d->go, atomic_read_acquire(&d->ready) || in decompress_threadfn()
1169 d->thr = NULL; in decompress_threadfn()
1170 d->ret = -1; in decompress_threadfn()
1171 atomic_set_release(&d->stop, 1); in decompress_threadfn()
1172 wake_up(&d->done); in decompress_threadfn()
1175 atomic_set(&d->ready, 0); in decompress_threadfn()
1178 d->ret = crypto_comp_decompress(d->cc, d->cmp + CMP_HEADER, d->cmp_len, in decompress_threadfn()
1179 d->unc, &unc_len); in decompress_threadfn()
1180 d->unc_len = unc_len; in decompress_threadfn()
1183 flush_icache_range((unsigned long)d->unc, in decompress_threadfn()
1184 (unsigned long)d->unc + d->unc_len); in decompress_threadfn()
1186 atomic_set_release(&d->stop, 1); in decompress_threadfn()
1187 wake_up(&d->done); in decompress_threadfn()
1193 * load_compressed_image - Load compressed image data and decompress it.
1224 nr_threads = num_online_cpus() - 1; in load_compressed_image()
1230 ret = -ENOMEM; in load_compressed_image()
1237 ret = -ENOMEM; in load_compressed_image()
1244 ret = -ENOMEM; in load_compressed_image()
1260 ret = -EFAULT; in load_compressed_image()
1270 ret = -ENOMEM; in load_compressed_image()
1278 init_waitqueue_head(&crc->go); in load_compressed_image()
1279 init_waitqueue_head(&crc->done); in load_compressed_image()
1281 handle->crc32 = 0; in load_compressed_image()
1282 crc->crc32 = &handle->crc32; in load_compressed_image()
1284 crc->unc[thr] = data[thr].unc; in load_compressed_image()
1285 crc->unc_len[thr] = &data[thr].unc_len; in load_compressed_image()
1288 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); in load_compressed_image()
1289 if (IS_ERR(crc->thr)) { in load_compressed_image()
1290 crc->thr = NULL; in load_compressed_image()
1292 ret = -ENOMEM; in load_compressed_image()
1304 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2; in load_compressed_image()
1317 ret = -ENOMEM; in load_compressed_image()
1347 if (handle->cur && in load_compressed_image()
1348 handle->cur->entries[handle->k]) { in load_compressed_image()
1359 want -= i; in load_compressed_image()
1377 if (crc->run_threads) { in load_compressed_image()
1378 wait_event(crc->done, atomic_read_acquire(&crc->stop)); in load_compressed_image()
1379 atomic_set(&crc->stop, 0); in load_compressed_image()
1380 crc->run_threads = 0; in load_compressed_image()
1389 ret = -1; in load_compressed_image()
1397 ret = -1; in load_compressed_image()
1408 have--; in load_compressed_image()
1445 data[thr].unc_len & (PAGE_SIZE - 1))) { in load_compressed_image()
1447 ret = -1; in load_compressed_image()
1463 crc->run_threads = thr + 1; in load_compressed_image()
1464 atomic_set_release(&crc->ready, 1); in load_compressed_image()
1465 wake_up(&crc->go); in load_compressed_image()
1471 crc->run_threads = thr; in load_compressed_image()
1472 atomic_set_release(&crc->ready, 1); in load_compressed_image()
1473 wake_up(&crc->go); in load_compressed_image()
1477 if (crc->run_threads) { in load_compressed_image()
1478 wait_event(crc->done, atomic_read_acquire(&crc->stop)); in load_compressed_image()
1479 atomic_set(&crc->stop, 0); in load_compressed_image()
1486 ret = -ENODATA; in load_compressed_image()
1488 if (swsusp_header->flags & SF_CRC32_MODE) { in load_compressed_image()
1489 if(handle->crc32 != swsusp_header->crc32) { in load_compressed_image()
1491 ret = -ENODATA; in load_compressed_image()
1502 if (crc->thr) in load_compressed_image()
1503 kthread_stop(crc->thr); in load_compressed_image()
1521 * swsusp_read - read the hibernation image.
1536 return error < 0 ? error : -EFAULT; in swsusp_read()
1545 load_image(&handle, &snapshot, header->pages - 1) : in swsusp_read()
1546 load_compressed_image(&handle, &snapshot, header->pages - 1); in swsusp_read()
1560 * swsusp_check - Open the resume device and check for the swsusp signature.
1578 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { in swsusp_check()
1579 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); in swsusp_check()
1580 swsusp_header_flags = swsusp_header->flags; in swsusp_check()
1586 error = -EINVAL; in swsusp_check()
1588 if (!error && swsusp_header->flags & SF_HW_SIG && in swsusp_check()
1589 swsusp_header->hw_sig != swsusp_hardware_signature) { in swsusp_check()
1591 swsusp_header->hw_sig, swsusp_hardware_signature); in swsusp_check()
1592 error = -EINVAL; in swsusp_check()
1611 * swsusp_close - close resume device.
1625 * swsusp_unmark - Unmark swsusp signature in the resume device
1635 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { in swsusp_unmark()
1636 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); in swsusp_unmark()
1642 error = -ENODEV; in swsusp_unmark()