Lines Matching +full:fault +full:- +full:q

1 /* SPDX-License-Identifier: GPL-2.0 */
19 __entry->mmu_valid_gen = sp->mmu_valid_gen; \
20 __entry->gfn = sp->gfn; \
21 __entry->role = sp->role.word; \
22 __entry->root_count = sp->root_count; \
23 __entry->unsync = sp->unsync;
28 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
32 role.word = __entry->role; \
34 trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
36 __entry->mmu_valid_gen, \
37 __entry->gfn, role.level, \
45 __entry->root_count, \
46 __entry->unsync ? "unsync" : "sync", 0); \
79 __entry->addr = addr;
80 __entry->pferr = pferr;
83 TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
84 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
100 __entry->pte = pte;
101 __entry->level = level;
104 TP_printk("pte %llx level %u", __entry->pte, __entry->level)
118 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
122 TP_printk("gpa %llx", __entry->gpa)
151 __entry->pferr = pferr;
154 TP_printk("pferr %x %s", __entry->pferr,
155 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
170 __entry->created = created;
174 __entry->created ? "new" : "existing")
224 __entry->sptep = sptep;
225 __entry->gfn = gfn;
226 __entry->access = spte & ACC_ALL;
227 __entry->gen = get_mmio_spte_generation(spte);
230 TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
231 __entry->gfn, __entry->access, __entry->gen)
246 __entry->addr = addr;
247 __entry->gfn = gfn;
248 __entry->access = access;
251 TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
252 __entry->access)
257 TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
259 TP_ARGS(vcpu, fault, sptep, old_spte, ret),
272 __entry->vcpu_id = vcpu->vcpu_id;
273 __entry->cr2_or_gpa = fault->addr;
274 __entry->error_code = fault->error_code;
275 __entry->sptep = sptep;
276 __entry->old_spte = old_spte;
277 __entry->new_spte = *sptep;
278 __entry->ret = ret;
282 " new %llx spurious %d fixed %d", __entry->vcpu_id,
283 __entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
284 kvm_mmu_trace_pferr_flags), __entry->sptep,
285 __entry->old_spte, __entry->new_spte,
286 __entry->ret == RET_PF_SPURIOUS, __entry->ret == RET_PF_FIXED
301 __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
302 __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
305 TP_printk("kvm-mmu-valid-gen %u used_pages %x",
306 __entry->mmu_valid_gen, __entry->mmu_used_pages
323 __entry->kvm_gen = kvm_gen;
324 __entry->spte_gen = spte_gen;
325 __entry->spte = spte;
328 TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
329 __entry->kvm_gen, __entry->spte_gen,
330 __entry->kvm_gen == __entry->spte_gen
351 __entry->gfn = gfn;
352 __entry->spte = *sptep;
353 __entry->sptep = virt_to_phys(sptep);
354 __entry->level = level;
355 __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
356 __entry->x = is_executable_pte(__entry->spte);
357 __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
361 __entry->gfn, __entry->spte,
362 __entry->r ? "r" : "-",
363 __entry->spte & PT_WRITABLE_MASK ? "w" : "-",
364 __entry->x ? "x" : "-",
365 __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
366 __entry->level, __entry->sptep
372 TP_PROTO(struct kvm_page_fault *fault),
373 TP_ARGS(fault),
382 __entry->gfn = fault->gfn;
383 __entry->pfn = fault->pfn | (fault->gfn & (KVM_PAGES_PER_HPAGE(fault->goal_level) - 1));
384 __entry->level = fault->goal_level;
388 __entry->gfn, __entry->pfn, __entry->level
408 __entry->gfn = gfn;
409 __entry->old_spte = old_spte;
410 __entry->new_spte = new_spte;
411 __entry->level = level;
412 __entry->as_id = as_id;
416 __entry->as_id, __entry->gfn, __entry->level,
417 __entry->old_spte, __entry->new_spte
434 __entry->gfn = gfn;
435 __entry->spte = spte;
436 __entry->level = level;
437 __entry->errno = errno;
441 __entry->gfn, __entry->spte, __entry->level, __entry->errno)