Lines Matching refs:cntx
22 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vector_reset() local
24 cntx->sstatus &= ~SR_VS; in kvm_riscv_vcpu_vector_reset()
26 cntx->sstatus |= SR_VS_INITIAL; in kvm_riscv_vcpu_vector_reset()
27 WARN_ON(!cntx->vector.datap); in kvm_riscv_vcpu_vector_reset()
28 memset(cntx->vector.datap, 0, riscv_v_vsize); in kvm_riscv_vcpu_vector_reset()
30 cntx->sstatus |= SR_VS_OFF; in kvm_riscv_vcpu_vector_reset()
34 static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_vector_clean() argument
36 cntx->sstatus &= ~SR_VS; in kvm_riscv_vcpu_vector_clean()
37 cntx->sstatus |= SR_VS_CLEAN; in kvm_riscv_vcpu_vector_clean()
40 void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_vector_save() argument
43 if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) { in kvm_riscv_vcpu_guest_vector_save()
45 __kvm_riscv_vector_save(cntx); in kvm_riscv_vcpu_guest_vector_save()
46 kvm_riscv_vcpu_vector_clean(cntx); in kvm_riscv_vcpu_guest_vector_save()
50 void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_vector_restore() argument
53 if ((cntx->sstatus & SR_VS) != SR_VS_OFF) { in kvm_riscv_vcpu_guest_vector_restore()
55 __kvm_riscv_vector_restore(cntx); in kvm_riscv_vcpu_guest_vector_restore()
56 kvm_riscv_vcpu_vector_clean(cntx); in kvm_riscv_vcpu_guest_vector_restore()
60 void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_vector_save() argument
64 __kvm_riscv_vector_save(cntx); in kvm_riscv_vcpu_host_vector_save()
67 void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_vector_restore() argument
70 __kvm_riscv_vector_restore(cntx); in kvm_riscv_vcpu_host_vector_restore()
74 struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_alloc_vector_context() argument
76 cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL); in kvm_riscv_vcpu_alloc_vector_context()
77 if (!cntx->vector.datap) in kvm_riscv_vcpu_alloc_vector_context()
79 cntx->vector.vlenb = riscv_v_vsize / 32; in kvm_riscv_vcpu_alloc_vector_context()
100 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vreg_addr() local
108 *reg_addr = &cntx->vector.vstart; in kvm_riscv_vcpu_vreg_addr()
111 *reg_addr = &cntx->vector.vl; in kvm_riscv_vcpu_vreg_addr()
114 *reg_addr = &cntx->vector.vtype; in kvm_riscv_vcpu_vreg_addr()
117 *reg_addr = &cntx->vector.vcsr; in kvm_riscv_vcpu_vreg_addr()
120 *reg_addr = &cntx->vector.vlenb; in kvm_riscv_vcpu_vreg_addr()
129 *reg_addr = cntx->vector.datap + in kvm_riscv_vcpu_vreg_addr()
181 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_vector() local
186 if (reg_val != cntx->vector.vlenb) in kvm_riscv_vcpu_set_reg_vector()