Lines Matching refs:journal

48 static inline struct journal_lock * __must_check get_lock(struct slab_journal *journal,  in get_lock()  argument
51 return &journal->locks[sequence_number % journal->size]; in get_lock()
66 static inline bool __must_check must_make_entries_to_flush(struct slab_journal *journal) in must_make_entries_to_flush() argument
68 return ((journal->slab->status != VDO_SLAB_REBUILDING) && in must_make_entries_to_flush()
69 vdo_waitq_has_waiters(&journal->entry_waiters)); in must_make_entries_to_flush()
78 static inline bool __must_check is_reaping(struct slab_journal *journal) in is_reaping() argument
80 return (journal->head != journal->unreapable); in is_reaping()
87 static void initialize_tail_block(struct slab_journal *journal) in initialize_tail_block() argument
89 struct slab_journal_block_header *header = &journal->tail_header; in initialize_tail_block()
91 header->sequence_number = journal->tail; in initialize_tail_block()
100 static void initialize_journal_state(struct slab_journal *journal) in initialize_journal_state() argument
102 journal->unreapable = journal->head; in initialize_journal_state()
103 journal->reap_lock = get_lock(journal, journal->unreapable); in initialize_journal_state()
104 journal->next_commit = journal->tail; in initialize_journal_state()
105 journal->summarized = journal->last_summarized = journal->tail; in initialize_journal_state()
106 initialize_tail_block(journal); in initialize_journal_state()
115 static bool __must_check block_is_full(struct slab_journal *journal) in block_is_full() argument
117 journal_entry_count_t count = journal->tail_header.entry_count; in block_is_full()
119 return (journal->tail_header.has_block_map_increments ? in block_is_full()
120 (journal->full_entries_per_block == count) : in block_is_full()
121 (journal->entries_per_block == count)); in block_is_full()
124 static void add_entries(struct slab_journal *journal);
125 static void update_tail_block_location(struct slab_journal *journal);
137 return ((slab->journal.tail == 1) && in is_slab_journal_blank()
138 (slab->journal.tail_header.entry_count == 0)); in is_slab_journal_blank()
147 static void mark_slab_journal_dirty(struct slab_journal *journal, sequence_number_t lock) in mark_slab_journal_dirty() argument
150 struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals; in mark_slab_journal_dirty()
152 VDO_ASSERT_LOG_ONLY(journal->recovery_lock == 0, "slab journal was clean"); in mark_slab_journal_dirty()
154 journal->recovery_lock = lock; in mark_slab_journal_dirty()
156 if (dirty_journal->recovery_lock <= journal->recovery_lock) in mark_slab_journal_dirty()
160 list_move_tail(&journal->dirty_entry, dirty_journal->dirty_entry.next); in mark_slab_journal_dirty()
163 static void mark_slab_journal_clean(struct slab_journal *journal) in mark_slab_journal_clean() argument
165 journal->recovery_lock = 0; in mark_slab_journal_clean()
166 list_del_init(&journal->dirty_entry); in mark_slab_journal_clean()
172 struct slab_journal *journal = &slab->journal; in check_if_slab_drained() local
176 must_make_entries_to_flush(journal) || in check_if_slab_drained()
177 is_reaping(journal) || in check_if_slab_drained()
178 journal->waiting_to_commit || in check_if_slab_drained()
179 !list_empty(&journal->uncommitted_blocks) || in check_if_slab_drained()
180 journal->updating_slab_summary || in check_if_slab_drained()
399 static void finish_reaping(struct slab_journal *journal) in finish_reaping() argument
401 journal->head = journal->unreapable; in finish_reaping()
402 add_entries(journal); in finish_reaping()
403 check_if_slab_drained(journal->slab); in finish_reaping()
406 static void reap_slab_journal(struct slab_journal *journal);
415 struct slab_journal *journal = completion->parent; in complete_reaping() local
417 return_vio_to_pool(journal->slab->allocator->vio_pool, in complete_reaping()
419 finish_reaping(journal); in complete_reaping()
420 reap_slab_journal(journal); in complete_reaping()
437 struct slab_journal *journal = vio->completion.parent; in flush_endio() local
440 journal->slab->allocator->thread_id); in flush_endio()
451 struct slab_journal *journal = in flush_for_reaping() local
456 vio->completion.parent = journal; in flush_for_reaping()
464 static void reap_slab_journal(struct slab_journal *journal) in reap_slab_journal() argument
468 if (is_reaping(journal)) { in reap_slab_journal()
473 if ((journal->slab->status != VDO_SLAB_REBUILT) || in reap_slab_journal()
474 !vdo_is_state_normal(&journal->slab->state) || in reap_slab_journal()
475 vdo_is_read_only(journal->slab->allocator->depot->vdo)) { in reap_slab_journal()
487 while ((journal->unreapable < journal->tail) && (journal->reap_lock->count == 0)) { in reap_slab_journal()
489 journal->unreapable++; in reap_slab_journal()
490 journal->reap_lock++; in reap_slab_journal()
491 if (journal->reap_lock == &journal->locks[journal->size]) in reap_slab_journal()
492 journal->reap_lock = &journal->locks[0]; in reap_slab_journal()
508 journal->flush_waiter.callback = flush_for_reaping; in reap_slab_journal()
509 acquire_vio_from_pool(journal->slab->allocator->vio_pool, in reap_slab_journal()
510 &journal->flush_waiter); in reap_slab_journal()
521 static void adjust_slab_journal_block_reference(struct slab_journal *journal, in adjust_slab_journal_block_reference() argument
530 if (journal->slab->status == VDO_SLAB_REPLAYING) { in adjust_slab_journal_block_reference()
536 lock = get_lock(journal, sequence_number); in adjust_slab_journal_block_reference()
546 reap_slab_journal(journal); in adjust_slab_journal_block_reference()
561 struct slab_journal *journal = in release_journal_locks() local
572 (unsigned long long) journal->summarized); in release_journal_locks()
575 journal->updating_slab_summary = false; in release_journal_locks()
576 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); in release_journal_locks()
577 check_if_slab_drained(journal->slab); in release_journal_locks()
581 if (journal->partial_write_in_progress && (journal->summarized == journal->tail)) { in release_journal_locks()
582 journal->partial_write_in_progress = false; in release_journal_locks()
583 add_entries(journal); in release_journal_locks()
586 first = journal->last_summarized; in release_journal_locks()
587 journal->last_summarized = journal->summarized; in release_journal_locks()
588 for (i = journal->summarized - 1; i >= first; i--) { in release_journal_locks()
593 if (journal->recovery_journal != NULL) { in release_journal_locks()
594 zone_count_t zone_number = journal->slab->allocator->zone_number; in release_journal_locks()
595 struct journal_lock *lock = get_lock(journal, i); in release_journal_locks()
597 vdo_release_recovery_journal_block_reference(journal->recovery_journal, in release_journal_locks()
607 adjust_slab_journal_block_reference(journal, i, -1); in release_journal_locks()
610 journal->updating_slab_summary = false; in release_journal_locks()
612 reap_slab_journal(journal); in release_journal_locks()
615 update_tail_block_location(journal); in release_journal_locks()
622 static void update_tail_block_location(struct slab_journal *journal) in update_tail_block_location() argument
625 struct vdo_slab *slab = journal->slab; in update_tail_block_location()
627 if (journal->updating_slab_summary || in update_tail_block_location()
628 vdo_is_read_only(journal->slab->allocator->depot->vdo) || in update_tail_block_location()
629 (journal->last_summarized >= journal->next_commit)) { in update_tail_block_location()
642 journal->summarized = journal->next_commit; in update_tail_block_location()
643 journal->updating_slab_summary = true; in update_tail_block_location()
651 update_slab_summary_entry(slab, &journal->slab_summary_waiter, in update_tail_block_location()
652 journal->summarized % journal->size, in update_tail_block_location()
653 (journal->head > 1), false, free_block_count); in update_tail_block_location()
661 struct slab_journal *journal = &slab->journal; in reopen_slab_journal() local
664 VDO_ASSERT_LOG_ONLY(journal->tail_header.entry_count == 0, in reopen_slab_journal()
666 journal->head = journal->tail; in reopen_slab_journal()
667 initialize_journal_state(journal); in reopen_slab_journal()
670 for (block = 1; block <= journal->size; block++) { in reopen_slab_journal()
671 VDO_ASSERT_LOG_ONLY((get_lock(journal, block)->count == 0), in reopen_slab_journal()
676 add_entries(journal); in reopen_slab_journal()
697 struct slab_journal *journal = completion->parent; in complete_write() local
701 return_vio_to_pool(journal->slab->allocator->vio_pool, vdo_forget(pooled)); in complete_write()
707 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); in complete_write()
708 check_if_slab_drained(journal->slab); in complete_write()
712 WRITE_ONCE(journal->events->blocks_written, journal->events->blocks_written + 1); in complete_write()
714 if (list_empty(&journal->uncommitted_blocks)) { in complete_write()
716 journal->next_commit = journal->tail; in complete_write()
719 pooled = container_of(journal->uncommitted_blocks.next, in complete_write()
721 journal->next_commit = get_committing_sequence_number(pooled); in complete_write()
724 update_tail_block_location(journal); in complete_write()
730 struct slab_journal *journal = vio->completion.parent; in write_slab_journal_endio() local
732 continue_vio_after_io(vio, complete_write, journal->slab->allocator->thread_id); in write_slab_journal_endio()
746 struct slab_journal *journal = in write_slab_journal_block() local
748 struct slab_journal_block_header *header = &journal->tail_header; in write_slab_journal_block()
749 int unused_entries = journal->entries_per_block - header->entry_count; in write_slab_journal_block()
753 header->head = journal->head; in write_slab_journal_block()
754 list_add_tail(&pooled->list_entry, &journal->uncommitted_blocks); in write_slab_journal_block()
755 vdo_pack_slab_journal_block_header(header, &journal->block->header); in write_slab_journal_block()
758 memcpy(pooled->vio.data, journal->block, VDO_BLOCK_SIZE); in write_slab_journal_block()
766 adjust_slab_journal_block_reference(journal, header->sequence_number, in write_slab_journal_block()
768 journal->partial_write_in_progress = !block_is_full(journal); in write_slab_journal_block()
771 block_number = journal->slab->journal_origin + in write_slab_journal_block()
772 (header->sequence_number % journal->size); in write_slab_journal_block()
773 vio->completion.parent = journal; in write_slab_journal_block()
784 journal->tail++; in write_slab_journal_block()
785 initialize_tail_block(journal); in write_slab_journal_block()
786 journal->waiting_to_commit = false; in write_slab_journal_block()
788 operation = vdo_get_admin_state_code(&journal->slab->state); in write_slab_journal_block()
790 vdo_finish_operation(&journal->slab->state, in write_slab_journal_block()
791 (vdo_is_read_only(journal->slab->allocator->depot->vdo) ? in write_slab_journal_block()
796 add_entries(journal); in write_slab_journal_block()
803 static void commit_tail(struct slab_journal *journal) in commit_tail() argument
805 if ((journal->tail_header.entry_count == 0) && must_make_entries_to_flush(journal)) { in commit_tail()
813 if (vdo_is_read_only(journal->slab->allocator->depot->vdo) || in commit_tail()
814 journal->waiting_to_commit || in commit_tail()
815 (journal->tail_header.entry_count == 0)) { in commit_tail()
827 mark_slab_journal_clean(journal); in commit_tail()
829 journal->waiting_to_commit = true; in commit_tail()
831 journal->resource_waiter.callback = write_slab_journal_block; in commit_tail()
832 acquire_vio_from_pool(journal->slab->allocator->vio_pool, in commit_tail()
833 &journal->resource_waiter); in commit_tail()
903 static void add_entry(struct slab_journal *journal, physical_block_number_t pbn, in add_entry() argument
907 struct packed_slab_journal_block *block = journal->block; in add_entry()
910 result = VDO_ASSERT(vdo_before_journal_point(&journal->tail_header.recovery_point, in add_entry()
915 (unsigned long long) journal->tail_header.recovery_point.sequence_number, in add_entry()
916 journal->tail_header.recovery_point.entry_count); in add_entry()
918 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); in add_entry()
923 result = VDO_ASSERT((journal->tail_header.entry_count < in add_entry()
924 journal->full_entries_per_block), in add_entry()
927 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, in add_entry()
933 encode_slab_journal_entry(&journal->tail_header, &block->payload, in add_entry()
934 pbn - journal->slab->start, operation, increment); in add_entry()
935 journal->tail_header.recovery_point = recovery_point; in add_entry()
936 if (block_is_full(journal)) in add_entry()
937 commit_tail(journal); in add_entry()
940 static inline block_count_t journal_length(const struct slab_journal *journal) in journal_length() argument
942 return journal->tail - journal->head; in journal_length()
962 struct slab_journal *journal = &slab->journal; in vdo_attempt_replay_into_slab() local
963 struct slab_journal_block_header *header = &journal->tail_header; in vdo_attempt_replay_into_slab()
967 if (!vdo_before_journal_point(&journal->tail_header.recovery_point, &expanded)) in vdo_attempt_replay_into_slab()
970 if ((header->entry_count >= journal->full_entries_per_block) && in vdo_attempt_replay_into_slab()
976 commit_tail(journal); in vdo_attempt_replay_into_slab()
979 if (journal->waiting_to_commit) { in vdo_attempt_replay_into_slab()
980 vdo_start_operation_with_waiter(&journal->slab->state, in vdo_attempt_replay_into_slab()
986 if (journal_length(journal) >= journal->size) { in vdo_attempt_replay_into_slab()
992 journal->head++; in vdo_attempt_replay_into_slab()
993 journal->unreapable++; in vdo_attempt_replay_into_slab()
996 if (journal->slab->status == VDO_SLAB_REBUILT) in vdo_attempt_replay_into_slab()
997 journal->slab->status = VDO_SLAB_REPLAYING; in vdo_attempt_replay_into_slab()
999 add_entry(journal, pbn, operation, increment, expanded); in vdo_attempt_replay_into_slab()
1009 static bool requires_reaping(const struct slab_journal *journal) in requires_reaping() argument
1011 return (journal_length(journal) >= journal->blocking_threshold); in requires_reaping()
1077 adjust_slab_journal_block_reference(&slab->journal, in finish_reference_block_write()
1220 static void reclaim_journal_space(struct slab_journal *journal) in reclaim_journal_space() argument
1222 block_count_t length = journal_length(journal); in reclaim_journal_space()
1223 struct vdo_slab *slab = journal->slab; in reclaim_journal_space()
1227 if ((length < journal->flushing_threshold) || (write_count == 0)) in reclaim_journal_space()
1231 WRITE_ONCE(journal->events->flush_count, journal->events->flush_count + 1); in reclaim_journal_space()
1232 if (length < journal->flushing_deadline) { in reclaim_journal_space()
1234 write_count /= journal->flushing_deadline - length + 1; in reclaim_journal_space()
1663 adjust_slab_journal_block_reference(&slab->journal, entry_lock, -1); in adjust_reference_count()
1695 struct slab_journal *journal = context; in add_entry_from_waiter() local
1696 struct slab_journal_block_header *header = &journal->tail_header; in add_entry_from_waiter()
1708 get_lock(journal, header->sequence_number)->recovery_start = recovery_block; in add_entry_from_waiter()
1709 if (journal->recovery_journal != NULL) { in add_entry_from_waiter()
1710 zone_count_t zone_number = journal->slab->allocator->zone_number; in add_entry_from_waiter()
1712 vdo_acquire_recovery_journal_block_reference(journal->recovery_journal, in add_entry_from_waiter()
1718 mark_slab_journal_dirty(journal, recovery_block); in add_entry_from_waiter()
1719 reclaim_journal_space(journal); in add_entry_from_waiter()
1722 add_entry(journal, updater->zpbn.pbn, updater->operation, updater->increment, in add_entry_from_waiter()
1726 if (journal->slab->status != VDO_SLAB_REBUILT) { in add_entry_from_waiter()
1731 adjust_slab_journal_block_reference(journal, in add_entry_from_waiter()
1736 result = adjust_reference_count(journal->slab, updater, in add_entry_from_waiter()
1753 static inline bool is_next_entry_a_block_map_increment(struct slab_journal *journal) in is_next_entry_a_block_map_increment() argument
1755 struct vdo_waiter *waiter = vdo_waitq_get_first_waiter(&journal->entry_waiters); in is_next_entry_a_block_map_increment()
1769 static void add_entries(struct slab_journal *journal) in add_entries() argument
1771 if (journal->adding_entries) { in add_entries()
1776 journal->adding_entries = true; in add_entries()
1777 while (vdo_waitq_has_waiters(&journal->entry_waiters)) { in add_entries()
1778 struct slab_journal_block_header *header = &journal->tail_header; in add_entries()
1780 if (journal->partial_write_in_progress || in add_entries()
1781 (journal->slab->status == VDO_SLAB_REBUILDING)) { in add_entries()
1789 if (journal->waiting_to_commit) { in add_entries()
1794 WRITE_ONCE(journal->events->tail_busy_count, in add_entries()
1795 journal->events->tail_busy_count + 1); in add_entries()
1797 } else if (is_next_entry_a_block_map_increment(journal) && in add_entries()
1798 (header->entry_count >= journal->full_entries_per_block)) { in add_entries()
1803 commit_tail(journal); in add_entries()
1804 if (journal->waiting_to_commit) { in add_entries()
1805 WRITE_ONCE(journal->events->tail_busy_count, in add_entries()
1806 journal->events->tail_busy_count + 1); in add_entries()
1812 if (requires_reaping(journal)) { in add_entries()
1813 WRITE_ONCE(journal->events->blocked_count, in add_entries()
1814 journal->events->blocked_count + 1); in add_entries()
1815 save_dirty_reference_blocks(journal->slab); in add_entries()
1821 get_lock(journal, header->sequence_number); in add_entries()
1828 VDO_ASSERT_LOG_ONLY((journal->head + journal->size) == journal->tail, in add_entries()
1836 VDO_ASSERT_LOG_ONLY((journal->blocking_threshold >= journal->size), in add_entries()
1839 WRITE_ONCE(journal->events->disk_full_count, in add_entries()
1840 journal->events->disk_full_count + 1); in add_entries()
1841 save_dirty_reference_blocks(journal->slab); in add_entries()
1850 lock->count = journal->entries_per_block + 1; in add_entries()
1853 struct vdo_slab *slab = journal->slab; in add_entries()
1869 adjust_slab_journal_block_reference(journal, 1, in add_entries()
1874 vdo_waitq_notify_next_waiter(&journal->entry_waiters, in add_entries()
1875 add_entry_from_waiter, journal); in add_entries()
1878 journal->adding_entries = false; in add_entries()
1881 if (vdo_is_state_draining(&journal->slab->state) && in add_entries()
1882 !vdo_is_state_suspending(&journal->slab->state) && in add_entries()
1883 !vdo_waitq_has_waiters(&journal->entry_waiters)) in add_entries()
1884 commit_tail(journal); in add_entries()
2318 commit_tail(&slab->journal); in drain_slab()
2411 struct slab_journal *journal = completion->parent; in finish_loading_journal() local
2412 struct vdo_slab *slab = journal->slab; in finish_loading_journal()
2421 journal->tail = header.sequence_number + 1; in finish_loading_journal()
2427 journal->head = (slab->allocator->summary_entries[slab->slab_number].is_dirty ? in finish_loading_journal()
2428 header.head : journal->tail); in finish_loading_journal()
2429 journal->tail_header = header; in finish_loading_journal()
2430 initialize_journal_state(journal); in finish_loading_journal()
2440 struct slab_journal *journal = vio->completion.parent; in read_slab_journal_tail_endio() local
2443 journal->slab->allocator->thread_id); in read_slab_journal_tail_endio()
2449 struct slab_journal *journal = completion->parent; in handle_load_error() local
2453 return_vio_to_pool(journal->slab->allocator->vio_pool, vio_as_pooled_vio(vio)); in handle_load_error()
2454 vdo_finish_loading_with_result(&journal->slab->state, result); in handle_load_error()
2467 struct slab_journal *journal = in read_slab_journal_tail() local
2469 struct vdo_slab *slab = journal->slab; in read_slab_journal_tail()
2480 (tail_block_offset_t)(journal->size - 1) : in read_slab_journal_tail()
2483 vio->completion.parent = journal; in read_slab_journal_tail()
2495 struct slab_journal *journal = &slab->journal; in load_slab_journal() local
2506 VDO_ASSERT_LOG_ONLY(((journal->size < 16) || in load_slab_journal()
2507 (journal->scrubbing_threshold < (journal->size - 1))), in load_slab_journal()
2514 journal->resource_waiter.callback = read_slab_journal_tail; in load_slab_journal()
2515 acquire_vio_from_pool(slab->allocator->vio_pool, &journal->resource_waiter); in load_slab_journal()
2828 struct slab_journal *journal = &slab->journal; in apply_journal_entries() local
2831 sequence_number_t tail = journal->tail; in apply_journal_entries()
2832 tail_block_offset_t end_index = (tail - 1) % journal->size; in apply_journal_entries()
2838 tail_block_offset_t head_index = head % journal->size; in apply_journal_entries()
2856 (header.entry_count > journal->entries_per_block) || in apply_journal_entries()
2858 (header.entry_count > journal->full_entries_per_block))) { in apply_journal_entries()
2875 if (index == journal->size) in apply_journal_entries()
3089 vdo_waitq_notify_all_waiters(&slab->journal.entry_waiters, in notify_block_allocator_of_read_only_mode()
3090 abort_waiter, &slab->journal); in notify_block_allocator_of_read_only_mode()
3254 vdo_waitq_enqueue_waiter(&slab->journal.entry_waiters, &updater->waiter); in vdo_modify_reference_count()
3255 if ((slab->status != VDO_SLAB_REBUILT) && requires_reaping(&slab->journal)) in vdo_modify_reference_count()
3258 add_entries(&slab->journal); in vdo_modify_reference_count()
3534 struct slab_journal *journal; in vdo_prepare_slabs_for_allocation() local
3548 journal = &slab->journal; in vdo_prepare_slabs_for_allocation()
3551 (journal_length(journal) >= journal->scrubbing_threshold)); in vdo_prepare_slabs_for_allocation()
3586 struct slab_journal *journal = &slab->journal; in vdo_dump_block_allocator() local
3599 vdo_waitq_num_waiters(&journal->entry_waiters), in vdo_dump_block_allocator()
3600 vdo_bool_to_string(journal->waiting_to_commit), in vdo_dump_block_allocator()
3601 vdo_bool_to_string(journal->updating_slab_summary), in vdo_dump_block_allocator()
3602 (unsigned long long) journal->head, in vdo_dump_block_allocator()
3603 (unsigned long long) journal->unreapable, in vdo_dump_block_allocator()
3604 (unsigned long long) journal->tail, in vdo_dump_block_allocator()
3605 (unsigned long long) journal->next_commit, in vdo_dump_block_allocator()
3606 (unsigned long long) journal->summarized, in vdo_dump_block_allocator()
3607 (unsigned long long) journal->last_summarized, in vdo_dump_block_allocator()
3608 (unsigned long long) journal->recovery_lock, in vdo_dump_block_allocator()
3609 vdo_bool_to_string(journal->recovery_lock != 0)); in vdo_dump_block_allocator()
3651 vdo_free(vdo_forget(slab->journal.block)); in free_slab()
3652 vdo_free(vdo_forget(slab->journal.locks)); in free_slab()
3660 struct slab_journal *journal = &slab->journal; in initialize_slab_journal() local
3665 __func__, &journal->locks); in initialize_slab_journal()
3670 (char **) &journal->block); in initialize_slab_journal()
3674 journal->slab = slab; in initialize_slab_journal()
3675 journal->size = slab_config->slab_journal_blocks; in initialize_slab_journal()
3676 journal->flushing_threshold = slab_config->slab_journal_flushing_threshold; in initialize_slab_journal()
3677 journal->blocking_threshold = slab_config->slab_journal_blocking_threshold; in initialize_slab_journal()
3678 journal->scrubbing_threshold = slab_config->slab_journal_scrubbing_threshold; in initialize_slab_journal()
3679 journal->entries_per_block = VDO_SLAB_JOURNAL_ENTRIES_PER_BLOCK; in initialize_slab_journal()
3680 journal->full_entries_per_block = VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK; in initialize_slab_journal()
3681 journal->events = &slab->allocator->slab_journal_statistics; in initialize_slab_journal()
3682 journal->recovery_journal = slab->allocator->depot->vdo->recovery_journal; in initialize_slab_journal()
3683 journal->tail = 1; in initialize_slab_journal()
3684 journal->head = 1; in initialize_slab_journal()
3686 journal->flushing_deadline = journal->flushing_threshold; in initialize_slab_journal()
3691 if ((journal->blocking_threshold - journal->flushing_threshold) > 5) in initialize_slab_journal()
3692 journal->flushing_deadline = journal->blocking_threshold - 5; in initialize_slab_journal()
3694 journal->slab_summary_waiter.callback = release_journal_locks; in initialize_slab_journal()
3696 INIT_LIST_HEAD(&journal->dirty_entry); in initialize_slab_journal()
3697 INIT_LIST_HEAD(&journal->uncommitted_blocks); in initialize_slab_journal()
3699 journal->tail_header.nonce = slab->allocator->nonce; in initialize_slab_journal()
3700 journal->tail_header.metadata_type = VDO_METADATA_SLAB_JOURNAL; in initialize_slab_journal()
3701 initialize_journal_state(journal); in initialize_slab_journal()
3849 static bool __must_check release_recovery_journal_lock(struct slab_journal *journal, in release_recovery_journal_lock() argument
3852 if (recovery_lock > journal->recovery_lock) { in release_recovery_journal_lock()
3853 VDO_ASSERT_LOG_ONLY((recovery_lock < journal->recovery_lock), in release_recovery_journal_lock()
3858 if ((recovery_lock < journal->recovery_lock) || in release_recovery_journal_lock()
3859 vdo_is_read_only(journal->slab->allocator->depot->vdo)) in release_recovery_journal_lock()
3863 commit_tail(journal); in release_recovery_journal_lock()
3876 struct slab_journal *journal, *tmp; in release_tail_block_locks() local
3880 list_for_each_entry_safe(journal, tmp, list, dirty_entry) { in release_tail_block_locks()
3881 if (!release_recovery_journal_lock(journal, in release_tail_block_locks()