Lines Matching +full:fault +full:- +full:q
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017-2019, IBM Corporation.
6 #define pr_fmt(fmt) "xive-kvm: " fmt
21 #include <asm/xive-regs.h>
36 * load operation, so there is no need to enforce load-after-store in xive_vm_esb_load()
40 val = in_be64(xd->eoi_mmio + offset); in xive_vm_esb_load()
46 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_native_cleanup_queue()
47 struct xive_q *q = &xc->queues[prio]; in kvmppc_xive_native_cleanup_queue() local
49 xive_native_disable_queue(xc->vp_id, q, prio); in kvmppc_xive_native_cleanup_queue()
50 if (q->qpage) { in kvmppc_xive_native_cleanup_queue()
51 put_page(virt_to_page(q->qpage)); in kvmppc_xive_native_cleanup_queue()
52 q->qpage = NULL; in kvmppc_xive_native_cleanup_queue()
56 static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q, in kvmppc_xive_native_configure_queue() argument
61 __be32 *qpage_prev = q->qpage; in kvmppc_xive_native_configure_queue()
63 rc = xive_native_configure_queue(vp_id, q, prio, qpage, order, in kvmppc_xive_native_configure_queue()
76 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_native_cleanup_vcpu()
85 pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num); in kvmppc_xive_native_cleanup_vcpu()
88 xc->valid = false; in kvmppc_xive_native_cleanup_vcpu()
94 if (xc->esc_virq[i]) { in kvmppc_xive_native_cleanup_vcpu()
95 if (kvmppc_xive_has_single_escalation(xc->xive)) in kvmppc_xive_native_cleanup_vcpu()
96 xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]); in kvmppc_xive_native_cleanup_vcpu()
97 free_irq(xc->esc_virq[i], vcpu); in kvmppc_xive_native_cleanup_vcpu()
98 irq_dispose_mapping(xc->esc_virq[i]); in kvmppc_xive_native_cleanup_vcpu()
99 kfree(xc->esc_virq_names[i]); in kvmppc_xive_native_cleanup_vcpu()
100 xc->esc_virq[i] = 0; in kvmppc_xive_native_cleanup_vcpu()
105 xive_native_disable_vp(xc->vp_id); in kvmppc_xive_native_cleanup_vcpu()
108 vcpu->arch.xive_cam_word = 0; in kvmppc_xive_native_cleanup_vcpu()
119 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; in kvmppc_xive_native_cleanup_vcpu()
120 vcpu->arch.xive_vcpu = NULL; in kvmppc_xive_native_cleanup_vcpu()
126 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_native_connect_vcpu()
133 if (dev->ops != &kvm_xive_native_ops) { in kvmppc_xive_native_connect_vcpu()
135 return -EPERM; in kvmppc_xive_native_connect_vcpu()
137 if (xive->kvm != vcpu->kvm) in kvmppc_xive_native_connect_vcpu()
138 return -EPERM; in kvmppc_xive_native_connect_vcpu()
139 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) in kvmppc_xive_native_connect_vcpu()
140 return -EBUSY; in kvmppc_xive_native_connect_vcpu()
142 mutex_lock(&xive->lock); in kvmppc_xive_native_connect_vcpu()
150 rc = -ENOMEM; in kvmppc_xive_native_connect_vcpu()
154 vcpu->arch.xive_vcpu = xc; in kvmppc_xive_native_connect_vcpu()
155 xc->xive = xive; in kvmppc_xive_native_connect_vcpu()
156 xc->vcpu = vcpu; in kvmppc_xive_native_connect_vcpu()
157 xc->server_num = server_num; in kvmppc_xive_native_connect_vcpu()
159 xc->vp_id = vp_id; in kvmppc_xive_native_connect_vcpu()
160 xc->valid = true; in kvmppc_xive_native_connect_vcpu()
161 vcpu->arch.irq_type = KVMPPC_IRQ_XIVE; in kvmppc_xive_native_connect_vcpu()
163 rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); in kvmppc_xive_native_connect_vcpu()
170 pr_err("inconsistent save-restore setup for VCPU %d\n", server_num); in kvmppc_xive_native_connect_vcpu()
171 rc = -EIO; in kvmppc_xive_native_connect_vcpu()
179 rc = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive)); in kvmppc_xive_native_connect_vcpu()
186 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); in kvmppc_xive_native_connect_vcpu()
187 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); in kvmppc_xive_native_connect_vcpu()
191 mutex_unlock(&xive->lock); in kvmppc_xive_native_connect_vcpu()
203 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_native_reset_mapped()
207 return -EINVAL; in kvmppc_xive_native_reset_mapped()
211 * unmapped) into the guest and let the VM fault handler in kvmppc_xive_native_reset_mapped()
215 mutex_lock(&xive->mapping_lock); in kvmppc_xive_native_reset_mapped()
216 if (xive->mapping) in kvmppc_xive_native_reset_mapped()
217 unmap_mapping_range(xive->mapping, in kvmppc_xive_native_reset_mapped()
220 mutex_unlock(&xive->mapping_lock); in kvmppc_xive_native_reset_mapped()
230 struct vm_area_struct *vma = vmf->vma; in xive_native_esb_fault()
231 struct kvm_device *dev = vma->vm_file->private_data; in xive_native_esb_fault()
232 struct kvmppc_xive *xive = dev->private; in xive_native_esb_fault()
246 page_offset = vmf->pgoff - vma->vm_pgoff; in xive_native_esb_fault()
255 state = &sb->irq_state[src]; in xive_native_esb_fault()
258 if (!state->valid) { in xive_native_esb_fault()
265 arch_spin_lock(&sb->lock); in xive_native_esb_fault()
271 page = page_offset % 2 ? xd->eoi_page : xd->trig_page; in xive_native_esb_fault()
272 arch_spin_unlock(&sb->lock); in xive_native_esb_fault()
280 vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT); in xive_native_esb_fault()
285 .fault = xive_native_esb_fault,
290 struct vm_area_struct *vma = vmf->vma; in xive_native_tima_fault()
292 switch (vmf->pgoff - vma->vm_pgoff) { in xive_native_tima_fault()
293 case 0: /* HW - forbid access */ in xive_native_tima_fault()
294 case 1: /* HV - forbid access */ in xive_native_tima_fault()
297 vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT); in xive_native_tima_fault()
299 case 3: /* USER - TODO */ in xive_native_tima_fault()
306 .fault = xive_native_tima_fault,
312 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_native_mmap()
315 if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) { in kvmppc_xive_native_mmap()
317 return -EINVAL; in kvmppc_xive_native_mmap()
318 vma->vm_ops = &xive_native_tima_vmops; in kvmppc_xive_native_mmap()
319 } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) { in kvmppc_xive_native_mmap()
321 return -EINVAL; in kvmppc_xive_native_mmap()
322 vma->vm_ops = &xive_native_esb_vmops; in kvmppc_xive_native_mmap()
324 return -EINVAL; in kvmppc_xive_native_mmap()
328 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); in kvmppc_xive_native_mmap()
332 * the ESB pages mapping when a device is passed-through into in kvmppc_xive_native_mmap()
335 xive->mapping = vma->vm_file->f_mapping; in kvmppc_xive_native_mmap()
352 return -E2BIG; in kvmppc_xive_native_set_source()
360 return -ENOMEM; in kvmppc_xive_native_set_source()
363 state = &sb->irq_state[idx]; in kvmppc_xive_native_set_source()
366 pr_err("fault getting user info !\n"); in kvmppc_xive_native_set_source()
367 return -EFAULT; in kvmppc_xive_native_set_source()
370 arch_spin_lock(&sb->lock); in kvmppc_xive_native_set_source()
376 if (!state->ipi_number) { in kvmppc_xive_native_set_source()
377 state->ipi_number = xive_native_alloc_irq(); in kvmppc_xive_native_set_source()
378 if (state->ipi_number == 0) { in kvmppc_xive_native_set_source()
380 rc = -ENXIO; in kvmppc_xive_native_set_source()
383 xive_native_populate_irq_data(state->ipi_number, in kvmppc_xive_native_set_source()
384 &state->ipi_data); in kvmppc_xive_native_set_source()
386 state->ipi_number, irq); in kvmppc_xive_native_set_source()
391 state->lsi = true; in kvmppc_xive_native_set_source()
393 state->asserted = true; in kvmppc_xive_native_set_source()
394 pr_devel(" LSI ! Asserted=%d\n", state->asserted); in kvmppc_xive_native_set_source()
398 state->act_server = 0; in kvmppc_xive_native_set_source()
399 state->act_priority = MASKED; in kvmppc_xive_native_set_source()
400 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_native_set_source()
401 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); in kvmppc_xive_native_set_source()
404 if (!state->valid) in kvmppc_xive_native_set_source()
405 xive->src_count++; in kvmppc_xive_native_set_source()
406 state->valid = true; in kvmppc_xive_native_set_source()
411 arch_spin_unlock(&sb->lock); in kvmppc_xive_native_set_source()
422 struct kvm *kvm = xive->kvm; in kvmppc_xive_native_update_source_config()
426 arch_spin_lock(&sb->lock); in kvmppc_xive_native_update_source_config()
428 if (state->act_server == server && state->act_priority == priority && in kvmppc_xive_native_update_source_config()
429 state->eisn == eisn) in kvmppc_xive_native_update_source_config()
433 priority, server, masked, state->act_server, in kvmppc_xive_native_update_source_config()
434 state->act_priority); in kvmppc_xive_native_update_source_config()
443 state->act_priority = priority; in kvmppc_xive_native_update_source_config()
444 state->act_server = server; in kvmppc_xive_native_update_source_config()
445 state->eisn = eisn; in kvmppc_xive_native_update_source_config()
451 state->act_priority = MASKED; in kvmppc_xive_native_update_source_config()
452 state->act_server = 0; in kvmppc_xive_native_update_source_config()
453 state->eisn = 0; in kvmppc_xive_native_update_source_config()
459 arch_spin_unlock(&sb->lock); in kvmppc_xive_native_update_source_config()
478 return -ENOENT; in kvmppc_xive_native_set_source_config()
480 state = &sb->irq_state[src]; in kvmppc_xive_native_set_source_config()
482 if (!state->valid) in kvmppc_xive_native_set_source_config()
483 return -EINVAL; in kvmppc_xive_native_set_source_config()
486 return -EFAULT; in kvmppc_xive_native_set_source_config()
502 return -EINVAL; in kvmppc_xive_native_set_source_config()
523 return -ENOENT; in kvmppc_xive_native_sync_source()
525 state = &sb->irq_state[src]; in kvmppc_xive_native_sync_source()
527 rc = -EINVAL; in kvmppc_xive_native_sync_source()
529 arch_spin_lock(&sb->lock); in kvmppc_xive_native_sync_source()
531 if (state->valid) { in kvmppc_xive_native_sync_source()
537 arch_spin_unlock(&sb->lock); in kvmppc_xive_native_sync_source()
545 * advertised in the DT property "ibm,xive-eq-sizes" in xive_native_validate_queue_size()
555 return -EINVAL; in xive_native_validate_queue_size()
562 struct kvm *kvm = xive->kvm; in kvmppc_xive_native_set_queue_config()
572 struct xive_q *q; in kvmppc_xive_native_set_queue_config() local
586 return -EFAULT; in kvmppc_xive_native_set_queue_config()
591 return -ENOENT; in kvmppc_xive_native_set_queue_config()
593 xc = vcpu->arch.xive_vcpu; in kvmppc_xive_native_set_queue_config()
598 return -EINVAL; in kvmppc_xive_native_set_queue_config()
600 q = &xc->queues[priority]; in kvmppc_xive_native_set_queue_config()
608 q->guest_qaddr = 0; in kvmppc_xive_native_set_queue_config()
609 q->guest_qshift = 0; in kvmppc_xive_native_set_queue_config()
611 rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority, in kvmppc_xive_native_set_queue_config()
615 priority, xc->server_num, rc); in kvmppc_xive_native_set_queue_config()
631 return -EINVAL; in kvmppc_xive_native_set_queue_config()
640 if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) { in kvmppc_xive_native_set_queue_config()
643 return -EINVAL; in kvmppc_xive_native_set_queue_config()
646 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_xive_native_set_queue_config()
651 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_xive_native_set_queue_config()
653 return -EINVAL; in kvmppc_xive_native_set_queue_config()
658 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_xive_native_set_queue_config()
660 return -EINVAL; in kvmppc_xive_native_set_queue_config()
664 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_xive_native_set_queue_config()
670 q->guest_qaddr = kvm_eq.qaddr; in kvmppc_xive_native_set_queue_config()
671 q->guest_qshift = kvm_eq.qshift; in kvmppc_xive_native_set_queue_config()
678 rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority, in kvmppc_xive_native_set_queue_config()
682 priority, xc->server_num, rc); in kvmppc_xive_native_set_queue_config()
692 rc = xive_native_set_queue_state(xc->vp_id, priority, in kvmppc_xive_native_set_queue_config()
710 struct kvm *kvm = xive->kvm; in kvmppc_xive_native_get_queue_config()
713 struct xive_q *q; in kvmppc_xive_native_get_queue_config() local
736 return -ENOENT; in kvmppc_xive_native_get_queue_config()
738 xc = vcpu->arch.xive_vcpu; in kvmppc_xive_native_get_queue_config()
743 return -EINVAL; in kvmppc_xive_native_get_queue_config()
745 q = &xc->queues[priority]; in kvmppc_xive_native_get_queue_config()
749 if (!q->qpage) in kvmppc_xive_native_get_queue_config()
752 rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift, in kvmppc_xive_native_get_queue_config()
761 kvm_eq.qshift = q->guest_qshift; in kvmppc_xive_native_get_queue_config()
762 kvm_eq.qaddr = q->guest_qaddr; in kvmppc_xive_native_get_queue_config()
764 rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle, in kvmppc_xive_native_get_queue_config()
774 return -EFAULT; in kvmppc_xive_native_get_queue_config()
784 struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; in kvmppc_xive_reset_sources()
786 if (!state->valid) in kvmppc_xive_reset_sources()
789 if (state->act_priority == MASKED) in kvmppc_xive_reset_sources()
792 state->eisn = 0; in kvmppc_xive_reset_sources()
793 state->act_server = 0; in kvmppc_xive_reset_sources()
794 state->act_priority = MASKED; in kvmppc_xive_reset_sources()
795 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_reset_sources()
796 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); in kvmppc_xive_reset_sources()
797 if (state->pt_number) { in kvmppc_xive_reset_sources()
798 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_reset_sources()
799 xive_native_configure_irq(state->pt_number, in kvmppc_xive_reset_sources()
807 struct kvm *kvm = xive->kvm; in kvmppc_xive_reset()
813 mutex_lock(&xive->lock); in kvmppc_xive_reset()
816 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_reset()
830 if (xc->esc_virq[prio]) { in kvmppc_xive_reset()
831 free_irq(xc->esc_virq[prio], vcpu); in kvmppc_xive_reset()
832 irq_dispose_mapping(xc->esc_virq[prio]); in kvmppc_xive_reset()
833 kfree(xc->esc_virq_names[prio]); in kvmppc_xive_reset()
834 xc->esc_virq[prio] = 0; in kvmppc_xive_reset()
841 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_reset()
842 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in kvmppc_xive_reset()
845 arch_spin_lock(&sb->lock); in kvmppc_xive_reset()
847 arch_spin_unlock(&sb->lock); in kvmppc_xive_reset()
851 mutex_unlock(&xive->lock); in kvmppc_xive_reset()
861 struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; in kvmppc_xive_native_sync_sources()
865 if (!state->valid) in kvmppc_xive_native_sync_sources()
872 * '-Q', which is what is being done before calling in kvmppc_xive_native_sync_sources()
882 if (state->act_priority == MASKED) in kvmppc_xive_native_sync_sources()
893 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_native_vcpu_eq_sync()
898 return -ENOENT; in kvmppc_xive_native_vcpu_eq_sync()
901 struct xive_q *q = &xc->queues[prio]; in kvmppc_xive_native_vcpu_eq_sync() local
903 if (!q->qpage) in kvmppc_xive_native_vcpu_eq_sync()
907 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_xive_native_vcpu_eq_sync()
908 mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr)); in kvmppc_xive_native_vcpu_eq_sync()
909 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_xive_native_vcpu_eq_sync()
916 struct kvm *kvm = xive->kvm; in kvmppc_xive_native_eq_sync()
922 mutex_lock(&xive->lock); in kvmppc_xive_native_eq_sync()
923 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_native_eq_sync()
924 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in kvmppc_xive_native_eq_sync()
927 arch_spin_lock(&sb->lock); in kvmppc_xive_native_eq_sync()
929 arch_spin_unlock(&sb->lock); in kvmppc_xive_native_eq_sync()
936 mutex_unlock(&xive->lock); in kvmppc_xive_native_eq_sync()
944 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_native_set_attr()
946 switch (attr->group) { in kvmppc_xive_native_set_attr()
948 switch (attr->attr) { in kvmppc_xive_native_set_attr()
954 return kvmppc_xive_set_nr_servers(xive, attr->addr); in kvmppc_xive_native_set_attr()
958 return kvmppc_xive_native_set_source(xive, attr->attr, in kvmppc_xive_native_set_attr()
959 attr->addr); in kvmppc_xive_native_set_attr()
961 return kvmppc_xive_native_set_source_config(xive, attr->attr, in kvmppc_xive_native_set_attr()
962 attr->addr); in kvmppc_xive_native_set_attr()
964 return kvmppc_xive_native_set_queue_config(xive, attr->attr, in kvmppc_xive_native_set_attr()
965 attr->addr); in kvmppc_xive_native_set_attr()
967 return kvmppc_xive_native_sync_source(xive, attr->attr, in kvmppc_xive_native_set_attr()
968 attr->addr); in kvmppc_xive_native_set_attr()
970 return -ENXIO; in kvmppc_xive_native_set_attr()
976 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_native_get_attr()
978 switch (attr->group) { in kvmppc_xive_native_get_attr()
980 return kvmppc_xive_native_get_queue_config(xive, attr->attr, in kvmppc_xive_native_get_attr()
981 attr->addr); in kvmppc_xive_native_get_attr()
983 return -ENXIO; in kvmppc_xive_native_get_attr()
989 switch (attr->group) { in kvmppc_xive_native_has_attr()
991 switch (attr->attr) { in kvmppc_xive_native_has_attr()
1001 if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ && in kvmppc_xive_native_has_attr()
1002 attr->attr < KVMPPC_XIVE_NR_IRQS) in kvmppc_xive_native_has_attr()
1008 return -ENXIO; in kvmppc_xive_native_has_attr()
1012 * Called when device fd is closed. kvm->lock is held.
1016 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_native_release()
1017 struct kvm *kvm = xive->kvm; in kvmppc_xive_native_release()
1025 * unmap the ESB pages when a device is passed-through. in kvmppc_xive_native_release()
1027 mutex_lock(&xive->mapping_lock); in kvmppc_xive_native_release()
1028 xive->mapping = NULL; in kvmppc_xive_native_release()
1029 mutex_unlock(&xive->mapping_lock); in kvmppc_xive_native_release()
1035 * device attribute set/get, mmap, or page fault functions in kvmppc_xive_native_release()
1041 debugfs_remove(xive->dentry); in kvmppc_xive_native_release()
1048 * Take vcpu->mutex to ensure that no one_reg get/set ioctl in kvmppc_xive_native_release()
1050 * Holding the vcpu->mutex also means that the vcpu cannot in kvmppc_xive_native_release()
1055 mutex_lock(&vcpu->mutex); in kvmppc_xive_native_release()
1057 mutex_unlock(&vcpu->mutex); in kvmppc_xive_native_release()
1061 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type in kvmppc_xive_native_release()
1062 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe in kvmppc_xive_native_release()
1066 kvm->arch.xive = NULL; in kvmppc_xive_native_release()
1068 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_native_release()
1069 if (xive->src_blocks[i]) in kvmppc_xive_native_release()
1070 kvmppc_xive_free_sources(xive->src_blocks[i]); in kvmppc_xive_native_release()
1071 kfree(xive->src_blocks[i]); in kvmppc_xive_native_release()
1072 xive->src_blocks[i] = NULL; in kvmppc_xive_native_release()
1075 if (xive->vp_base != XIVE_INVALID_VP) in kvmppc_xive_native_release()
1076 xive_native_free_vp_block(xive->vp_base); in kvmppc_xive_native_release()
1089 * Create a XIVE device. kvm->lock is held.
1094 struct kvm *kvm = dev->kvm; in kvmppc_xive_native_create()
1098 if (kvm->arch.xive) in kvmppc_xive_native_create()
1099 return -EEXIST; in kvmppc_xive_native_create()
1103 return -ENOMEM; in kvmppc_xive_native_create()
1105 dev->private = xive; in kvmppc_xive_native_create()
1106 xive->dev = dev; in kvmppc_xive_native_create()
1107 xive->kvm = kvm; in kvmppc_xive_native_create()
1108 mutex_init(&xive->mapping_lock); in kvmppc_xive_native_create()
1109 mutex_init(&xive->lock); in kvmppc_xive_native_create()
1112 xive->vp_base = XIVE_INVALID_VP; in kvmppc_xive_native_create()
1116 xive->nr_servers = KVM_MAX_VCPUS; in kvmppc_xive_native_create()
1119 xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION; in kvmppc_xive_native_create()
1122 xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE; in kvmppc_xive_native_create()
1124 xive->ops = &kvmppc_xive_native_ops; in kvmppc_xive_native_create()
1126 kvm->arch.xive = xive; in kvmppc_xive_native_create()
1138 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_native_get_vp()
1143 return -EPERM; in kvmppc_xive_native_get_vp()
1146 return -ENOENT; in kvmppc_xive_native_get_vp()
1149 val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01; in kvmppc_xive_native_get_vp()
1152 rc = xive_native_get_vp_state(xc->vp_id, &opal_state); in kvmppc_xive_native_get_vp()
1160 val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK); in kvmppc_xive_native_get_vp()
1164 vcpu->arch.xive_saved_state.nsr, in kvmppc_xive_native_get_vp()
1165 vcpu->arch.xive_saved_state.cppr, in kvmppc_xive_native_get_vp()
1166 vcpu->arch.xive_saved_state.ipb, in kvmppc_xive_native_get_vp()
1167 vcpu->arch.xive_saved_state.pipr, in kvmppc_xive_native_get_vp()
1168 vcpu->arch.xive_saved_state.w01, in kvmppc_xive_native_get_vp()
1169 (u32) vcpu->arch.xive_cam_word, opal_state); in kvmppc_xive_native_get_vp()
1176 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_native_set_vp()
1177 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in kvmppc_xive_native_set_vp()
1180 val->xive_timaval[0], val->xive_timaval[1]); in kvmppc_xive_native_set_vp()
1183 return -EPERM; in kvmppc_xive_native_set_vp()
1186 return -ENOENT; in kvmppc_xive_native_set_vp()
1189 if (WARN_ON(vcpu->arch.xive_pushed)) in kvmppc_xive_native_set_vp()
1190 return -EBUSY; in kvmppc_xive_native_set_vp()
1196 vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0]; in kvmppc_xive_native_set_vp()
1213 struct kvmppc_xive *xive = m->private; in xive_native_debug_show()
1214 struct kvm *kvm = xive->kvm; in xive_native_debug_show()
1224 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_native_debug_show()
1231 xc->server_num, xc->vp_id, xc->vp_chip_id, in xive_native_debug_show()
1232 vcpu->arch.xive_saved_state.nsr, in xive_native_debug_show()
1233 vcpu->arch.xive_saved_state.cppr, in xive_native_debug_show()
1234 vcpu->arch.xive_saved_state.ipb, in xive_native_debug_show()
1235 vcpu->arch.xive_saved_state.pipr, in xive_native_debug_show()
1236 be64_to_cpu(vcpu->arch.xive_saved_state.w01), in xive_native_debug_show()
1237 be32_to_cpu(vcpu->arch.xive_cam_word)); in xive_native_debug_show()
1244 for (i = 0; i <= xive->max_sbid; i++) { in xive_native_debug_show()
1245 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_native_debug_show()
1248 arch_spin_lock(&sb->lock); in xive_native_debug_show()
1250 arch_spin_unlock(&sb->lock); in xive_native_debug_show()
1261 xive->dentry = debugfs_create_file("xive", 0444, xive->kvm->debugfs_dentry, in xive_native_debugfs_init()
1269 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_native_init()
1276 .name = "kvm-xive-native",