Lines Matching full:mm
3 * linux/mm/mmu_notifier.c
13 #include <linux/mm.h>
19 #include <linux/sched/mm.h>
35 * mm->notifier_subscriptions inside the mm_take_all_locks() protected
40 /* all mmu notifiers registered in this mm are queued in this list */
56 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
59 * Note that the core mm creates nested invalidate_range_start()/end() regions
62 * progress on the mm side.
69 * - mm->active_invalidate_ranges != 0
75 * - mm->active_invalidate_ranges != 0
175 * subscription. If the mm invokes invalidation during the critical section
191 interval_sub->mm->notifier_subscriptions; in mmu_interval_read_begin()
264 struct mm_struct *mm) in mn_itree_release() argument
269 .mm = mm, in mn_itree_release()
291 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
293 * in parallel despite there being no task using this mm any more,
302 struct mm_struct *mm) in mn_hlist_release() argument
318 * sptes before all the pages in the mm are freed. in mn_hlist_release()
321 subscription->ops->release(subscription, mm); in mn_hlist_release()
340 * exit_mmap (which would proceed with freeing all pages in the mm) in mn_hlist_release()
350 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument
353 mm->notifier_subscriptions; in __mmu_notifier_release()
356 mn_itree_release(subscriptions, mm); in __mmu_notifier_release()
359 mn_hlist_release(subscriptions, mm); in __mmu_notifier_release()
367 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument
376 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_clear_flush_young()
380 subscription, mm, start, end); in __mmu_notifier_clear_flush_young()
387 int __mmu_notifier_clear_young(struct mm_struct *mm, in __mmu_notifier_clear_young() argument
396 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_clear_young()
400 mm, start, end); in __mmu_notifier_clear_young()
407 int __mmu_notifier_test_young(struct mm_struct *mm, in __mmu_notifier_test_young() argument
415 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_test_young()
418 young = subscription->ops->test_young(subscription, mm, in __mmu_notifier_test_young()
524 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_start()
562 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_end()
573 void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, in __mmu_notifier_arch_invalidate_secondary_tlbs() argument
581 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_arch_invalidate_secondary_tlbs()
585 subscription, mm, in __mmu_notifier_arch_invalidate_secondary_tlbs()
597 struct mm_struct *mm) in __mmu_notifier_register() argument
602 mmap_assert_write_locked(mm); in __mmu_notifier_register()
603 BUG_ON(atomic_read(&mm->mm_users) <= 0); in __mmu_notifier_register()
615 if (!mm->notifier_subscriptions) { in __mmu_notifier_register()
618 * know that mm->notifier_subscriptions can't change while we in __mmu_notifier_register()
634 ret = mm_take_all_locks(mm); in __mmu_notifier_register()
642 * current->mm or explicitly with get_task_mm() or similar). in __mmu_notifier_register()
650 * mmu_notifier_subscriptions is not freed until the mm is destroyed. in __mmu_notifier_register()
655 smp_store_release(&mm->notifier_subscriptions, subscriptions); in __mmu_notifier_register()
659 mmgrab(mm); in __mmu_notifier_register()
660 subscription->mm = mm; in __mmu_notifier_register()
663 spin_lock(&mm->notifier_subscriptions->lock); in __mmu_notifier_register()
665 &mm->notifier_subscriptions->list); in __mmu_notifier_register()
666 spin_unlock(&mm->notifier_subscriptions->lock); in __mmu_notifier_register()
668 mm->notifier_subscriptions->has_itree = true; in __mmu_notifier_register()
670 mm_drop_all_locks(mm); in __mmu_notifier_register()
671 BUG_ON(atomic_read(&mm->mm_users) <= 0); in __mmu_notifier_register()
681 * mmu_notifier_register - Register a notifier on a mm
683 * @mm: The mm to attach the notifier to
688 * so mm has to be current->mm or the mm should be pinned safely such
689 * as with get_task_mm(). If the mm is not current->mm, the mm_users
696 * While the caller has a mmu_notifier get the subscription->mm pointer will remain
697 * valid, and can be converted to an active mm pointer via mmget_not_zero().
700 struct mm_struct *mm) in mmu_notifier_register() argument
704 mmap_write_lock(mm); in mmu_notifier_register()
705 ret = __mmu_notifier_register(subscription, mm); in mmu_notifier_register()
706 mmap_write_unlock(mm); in mmu_notifier_register()
712 find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops) in find_get_mmu_notifier() argument
716 spin_lock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
718 &mm->notifier_subscriptions->list, hlist, in find_get_mmu_notifier()
719 lockdep_is_held(&mm->notifier_subscriptions->lock)) { in find_get_mmu_notifier()
727 spin_unlock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
730 spin_unlock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
736 * the mm & ops
738 * @mm : The mm to attach notifiers too
746 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
748 * While the caller has a mmu_notifier get the mm pointer will remain valid,
749 * and can be converted to an active mm pointer via mmget_not_zero().
752 struct mm_struct *mm) in mmu_notifier_get_locked() argument
757 mmap_assert_write_locked(mm); in mmu_notifier_get_locked()
759 if (mm->notifier_subscriptions) { in mmu_notifier_get_locked()
760 subscription = find_get_mmu_notifier(mm, ops); in mmu_notifier_get_locked()
765 subscription = ops->alloc_notifier(mm); in mmu_notifier_get_locked()
769 ret = __mmu_notifier_register(subscription, mm); in mmu_notifier_get_locked()
780 void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm) in __mmu_notifier_subscriptions_destroy() argument
782 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list)); in __mmu_notifier_subscriptions_destroy()
783 kfree(mm->notifier_subscriptions); in __mmu_notifier_subscriptions_destroy()
784 mm->notifier_subscriptions = LIST_POISON1; /* debug */ in __mmu_notifier_subscriptions_destroy()
788 * This releases the mm_count pin automatically and frees the mm
798 struct mm_struct *mm) in mmu_notifier_unregister() argument
800 BUG_ON(atomic_read(&mm->mm_count) <= 0); in mmu_notifier_unregister()
815 subscription->ops->release(subscription, mm); in mmu_notifier_unregister()
818 spin_lock(&mm->notifier_subscriptions->lock); in mmu_notifier_unregister()
824 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_unregister()
833 BUG_ON(atomic_read(&mm->mm_count) <= 0); in mmu_notifier_unregister()
835 mmdrop(mm); in mmu_notifier_unregister()
843 struct mm_struct *mm = subscription->mm; in mmu_notifier_free_rcu() local
847 mmdrop(mm); in mmu_notifier_free_rcu()
874 struct mm_struct *mm = subscription->mm; in mmu_notifier_put() local
876 spin_lock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
880 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
886 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
891 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, in __mmu_interval_notifier_insert() argument
895 interval_sub->mm = mm; in __mmu_interval_notifier_insert()
909 if (WARN_ON(atomic_read(&mm->mm_users) <= 0)) in __mmu_interval_notifier_insert()
913 mmgrab(mm); in __mmu_interval_notifier_insert()
961 * @mm: mm_struct to attach to
965 * mm. Upon return the ops related to mmu_interval_notifier will be called
973 struct mm_struct *mm, unsigned long start, in mmu_interval_notifier_insert() argument
980 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
982 subscriptions = smp_load_acquire(&mm->notifier_subscriptions); in mmu_interval_notifier_insert()
984 ret = mmu_notifier_register(NULL, mm); in mmu_interval_notifier_insert()
987 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert()
989 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert()
995 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, in mmu_interval_notifier_insert_locked() argument
1000 mm->notifier_subscriptions; in mmu_interval_notifier_insert_locked()
1003 mmap_assert_write_locked(mm); in mmu_interval_notifier_insert_locked()
1006 ret = __mmu_notifier_register(NULL, mm); in mmu_interval_notifier_insert_locked()
1009 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert_locked()
1011 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert_locked()
1040 struct mm_struct *mm = interval_sub->mm; in mmu_interval_notifier_remove() local
1042 mm->notifier_subscriptions; in mmu_interval_notifier_remove()
1078 mmdrop(mm); in mmu_interval_notifier_remove()