Lines Matching full:shmem

21 	vcpu->arch.sta.shmem = INVALID_GPA;  in kvm_riscv_vcpu_sbi_sta_reset()
27 gpa_t shmem = vcpu->arch.sta.shmem; in kvm_riscv_vcpu_record_steal_time() local
38 if (shmem == INVALID_GPA) in kvm_riscv_vcpu_record_steal_time()
42 * shmem is 64-byte aligned (see the enforcement in in kvm_riscv_vcpu_record_steal_time()
46 gfn = shmem >> PAGE_SHIFT; in kvm_riscv_vcpu_record_steal_time()
50 vcpu->arch.sta.shmem = INVALID_GPA; in kvm_riscv_vcpu_record_steal_time()
54 sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) + in kvm_riscv_vcpu_record_steal_time()
56 steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) + in kvm_riscv_vcpu_record_steal_time()
90 gpa_t shmem; in kvm_sbi_sta_steal_time_set_shmem() local
98 vcpu->arch.sta.shmem = INVALID_GPA; in kvm_sbi_sta_steal_time_set_shmem()
105 shmem = shmem_phys_lo; in kvm_sbi_sta_steal_time_set_shmem()
109 shmem |= ((gpa_t)shmem_phys_hi << 32); in kvm_sbi_sta_steal_time_set_shmem()
114 hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable); in kvm_sbi_sta_steal_time_set_shmem()
118 ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta)); in kvm_sbi_sta_steal_time_set_shmem()
122 vcpu->arch.sta.shmem = shmem; in kvm_sbi_sta_steal_time_set_shmem()
167 *reg_val = (unsigned long)vcpu->arch.sta.shmem; in kvm_riscv_vcpu_get_reg_sbi_sta()
171 *reg_val = upper_32_bits(vcpu->arch.sta.shmem); in kvm_riscv_vcpu_get_reg_sbi_sta()
189 gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem); in kvm_riscv_vcpu_set_reg_sbi_sta()
191 vcpu->arch.sta.shmem = reg_val; in kvm_riscv_vcpu_set_reg_sbi_sta()
192 vcpu->arch.sta.shmem |= hi << 32; in kvm_riscv_vcpu_set_reg_sbi_sta()
194 vcpu->arch.sta.shmem = reg_val; in kvm_riscv_vcpu_set_reg_sbi_sta()
199 gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem); in kvm_riscv_vcpu_set_reg_sbi_sta()
201 vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32); in kvm_riscv_vcpu_set_reg_sbi_sta()
202 vcpu->arch.sta.shmem |= lo; in kvm_riscv_vcpu_set_reg_sbi_sta()