Lines Matching full:gpc
28 struct gfn_to_pfn_cache *gpc; in gfn_to_pfn_cache_invalidate_start() local
31 list_for_each_entry(gpc, &kvm->gpc_list, list) { in gfn_to_pfn_cache_invalidate_start()
32 read_lock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
35 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && in gfn_to_pfn_cache_invalidate_start()
36 gpc->uhva >= start && gpc->uhva < end) { in gfn_to_pfn_cache_invalidate_start()
37 read_unlock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
47 write_lock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
48 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && in gfn_to_pfn_cache_invalidate_start()
49 gpc->uhva >= start && gpc->uhva < end) in gfn_to_pfn_cache_invalidate_start()
50 gpc->valid = false; in gfn_to_pfn_cache_invalidate_start()
51 write_unlock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
55 read_unlock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
73 bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) in kvm_gpc_check() argument
75 struct kvm_memslots *slots = kvm_memslots(gpc->kvm); in kvm_gpc_check()
77 if (!gpc->active) in kvm_gpc_check()
84 if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation) in kvm_gpc_check()
87 if (kvm_is_error_hva(gpc->uhva)) in kvm_gpc_check()
90 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len)) in kvm_gpc_check()
93 if (!gpc->valid) in kvm_gpc_check()
137 * is not protected by gpc->lock. It is guaranteed to in mmu_notifier_retry_cache()
138 * be elevated before the mmu_notifier acquires gpc->lock, and in mmu_notifier_retry_cache()
155 static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) in hva_to_pfn_retry() argument
158 void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); in hva_to_pfn_retry()
163 lockdep_assert_held(&gpc->refresh_lock); in hva_to_pfn_retry()
165 lockdep_assert_held_write(&gpc->lock); in hva_to_pfn_retry()
168 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva in hva_to_pfn_retry()
172 gpc->valid = false; in hva_to_pfn_retry()
175 mmu_seq = gpc->kvm->mmu_invalidate_seq; in hva_to_pfn_retry()
178 write_unlock_irq(&gpc->lock); in hva_to_pfn_retry()
201 new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL); in hva_to_pfn_retry()
208 * too must be done outside of gpc->lock! in hva_to_pfn_retry()
210 if (new_pfn == gpc->pfn) in hva_to_pfn_retry()
220 write_lock_irq(&gpc->lock); in hva_to_pfn_retry()
226 WARN_ON_ONCE(gpc->valid); in hva_to_pfn_retry()
227 } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq)); in hva_to_pfn_retry()
229 gpc->valid = true; in hva_to_pfn_retry()
230 gpc->pfn = new_pfn; in hva_to_pfn_retry()
231 gpc->khva = new_khva + offset_in_page(gpc->uhva); in hva_to_pfn_retry()
243 write_lock_irq(&gpc->lock); in hva_to_pfn_retry()
248 static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva) in __kvm_gpc_refresh() argument
262 lockdep_assert_held(&gpc->refresh_lock); in __kvm_gpc_refresh()
264 write_lock_irq(&gpc->lock); in __kvm_gpc_refresh()
266 if (!gpc->active) { in __kvm_gpc_refresh()
271 old_pfn = gpc->pfn; in __kvm_gpc_refresh()
272 old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); in __kvm_gpc_refresh()
273 old_uhva = PAGE_ALIGN_DOWN(gpc->uhva); in __kvm_gpc_refresh()
278 gpc->gpa = INVALID_GPA; in __kvm_gpc_refresh()
279 gpc->memslot = NULL; in __kvm_gpc_refresh()
280 gpc->uhva = PAGE_ALIGN_DOWN(uhva); in __kvm_gpc_refresh()
282 if (gpc->uhva != old_uhva) in __kvm_gpc_refresh()
285 struct kvm_memslots *slots = kvm_memslots(gpc->kvm); in __kvm_gpc_refresh()
289 if (gpc->gpa != gpa || gpc->generation != slots->generation || in __kvm_gpc_refresh()
290 kvm_is_error_hva(gpc->uhva)) { in __kvm_gpc_refresh()
293 gpc->gpa = gpa; in __kvm_gpc_refresh()
294 gpc->generation = slots->generation; in __kvm_gpc_refresh()
295 gpc->memslot = __gfn_to_memslot(slots, gfn); in __kvm_gpc_refresh()
296 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); in __kvm_gpc_refresh()
298 if (kvm_is_error_hva(gpc->uhva)) { in __kvm_gpc_refresh()
307 if (gpc->uhva != old_uhva) in __kvm_gpc_refresh()
310 gpc->uhva = old_uhva; in __kvm_gpc_refresh()
315 gpc->uhva += page_offset; in __kvm_gpc_refresh()
321 if (!gpc->valid || hva_change) { in __kvm_gpc_refresh()
322 ret = hva_to_pfn_retry(gpc); in __kvm_gpc_refresh()
326 * But do update gpc->khva because the offset within the page in __kvm_gpc_refresh()
329 gpc->khva = old_khva + page_offset; in __kvm_gpc_refresh()
341 gpc->valid = false; in __kvm_gpc_refresh()
342 gpc->pfn = KVM_PFN_ERR_FAULT; in __kvm_gpc_refresh()
343 gpc->khva = NULL; in __kvm_gpc_refresh()
347 unmap_old = (old_pfn != gpc->pfn); in __kvm_gpc_refresh()
350 write_unlock_irq(&gpc->lock); in __kvm_gpc_refresh()
358 int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) in kvm_gpc_refresh() argument
362 guard(mutex)(&gpc->refresh_lock); in kvm_gpc_refresh()
364 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len)) in kvm_gpc_refresh()
372 uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD; in kvm_gpc_refresh()
374 return __kvm_gpc_refresh(gpc, gpc->gpa, uhva); in kvm_gpc_refresh()
377 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm) in kvm_gpc_init() argument
379 rwlock_init(&gpc->lock); in kvm_gpc_init()
380 mutex_init(&gpc->refresh_lock); in kvm_gpc_init()
382 gpc->kvm = kvm; in kvm_gpc_init()
383 gpc->pfn = KVM_PFN_ERR_FAULT; in kvm_gpc_init()
384 gpc->gpa = INVALID_GPA; in kvm_gpc_init()
385 gpc->uhva = KVM_HVA_ERR_BAD; in kvm_gpc_init()
386 gpc->active = gpc->valid = false; in kvm_gpc_init()
389 static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva, in __kvm_gpc_activate() argument
392 struct kvm *kvm = gpc->kvm; in __kvm_gpc_activate()
397 guard(mutex)(&gpc->refresh_lock); in __kvm_gpc_activate()
399 if (!gpc->active) { in __kvm_gpc_activate()
400 if (KVM_BUG_ON(gpc->valid, kvm)) in __kvm_gpc_activate()
404 list_add(&gpc->list, &kvm->gpc_list); in __kvm_gpc_activate()
412 write_lock_irq(&gpc->lock); in __kvm_gpc_activate()
413 gpc->active = true; in __kvm_gpc_activate()
414 write_unlock_irq(&gpc->lock); in __kvm_gpc_activate()
416 return __kvm_gpc_refresh(gpc, gpa, uhva); in __kvm_gpc_activate()
419 int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) in kvm_gpc_activate() argument
428 return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len); in kvm_gpc_activate()
431 int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len) in kvm_gpc_activate_hva() argument
436 return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len); in kvm_gpc_activate_hva()
439 void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) in kvm_gpc_deactivate() argument
441 struct kvm *kvm = gpc->kvm; in kvm_gpc_deactivate()
445 guard(mutex)(&gpc->refresh_lock); in kvm_gpc_deactivate()
447 if (gpc->active) { in kvm_gpc_deactivate()
451 * until gpc->lock is dropped and refresh is guaranteed to fail. in kvm_gpc_deactivate()
453 write_lock_irq(&gpc->lock); in kvm_gpc_deactivate()
454 gpc->active = false; in kvm_gpc_deactivate()
455 gpc->valid = false; in kvm_gpc_deactivate()
463 old_khva = gpc->khva - offset_in_page(gpc->khva); in kvm_gpc_deactivate()
464 gpc->khva = NULL; in kvm_gpc_deactivate()
466 old_pfn = gpc->pfn; in kvm_gpc_deactivate()
467 gpc->pfn = KVM_PFN_ERR_FAULT; in kvm_gpc_deactivate()
468 write_unlock_irq(&gpc->lock); in kvm_gpc_deactivate()
471 list_del(&gpc->list); in kvm_gpc_deactivate()