Home
last modified time | relevance | path

Searched refs:run (Results 1 – 25 of 1499) sorted by relevance

12345678910>>...60

/linux-6.12.1/tools/testing/selftests/sgx/
Dmain.c172 struct sgx_enclave_run run; in FIXTURE() local
262 #define ENCL_CALL(op, run, clobbered) \ argument
267 EENTER, 0, 0, (run)); \
270 (run)); \
274 #define EXPECT_EEXIT(run) \ argument
276 EXPECT_EQ((run)->function, EEXIT); \
277 if ((run)->function != EEXIT) \
278 TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \
279 (run)->exception_error_code, (run)->exception_addr); \
289 memset(&self->run, 0, sizeof(self->run)); in TEST_F()
[all …]
/linux-6.12.1/drivers/media/test-drivers/visl/
Dvisl-dec.c62 __kernel_size_t buflen, struct visl_run *run) in visl_get_ref_frames() argument
80 vb2_buf = vb2_find_buffer(cap_q, run->fwht.params->backward_ref_ts); in visl_get_ref_frames()
83 run->fwht.params->backward_ref_ts, vb2_buf); in visl_get_ref_frames()
92 b_ref = vb2_find_buffer(cap_q, run->mpeg2.pic->backward_ref_ts); in visl_get_ref_frames()
93 f_ref = vb2_find_buffer(cap_q, run->mpeg2.pic->forward_ref_ts); in visl_get_ref_frames()
96 run->mpeg2.pic->backward_ref_ts, b_ref); in visl_get_ref_frames()
98 run->mpeg2.pic->forward_ref_ts, f_ref); in visl_get_ref_frames()
108 last = vb2_find_buffer(cap_q, run->vp8.frame->last_frame_ts); in visl_get_ref_frames()
109 golden = vb2_find_buffer(cap_q, run->vp8.frame->golden_frame_ts); in visl_get_ref_frames()
110 alt = vb2_find_buffer(cap_q, run->vp8.frame->alt_frame_ts); in visl_get_ref_frames()
[all …]
/linux-6.12.1/tools/testing/selftests/kvm/s390x/
Dsync_regs_test.c76 struct kvm_run *run = vcpu->run; in test_read_invalid() local
80 run->kvm_valid_regs = INVALID_SYNC_FIELD; in test_read_invalid()
85 run->kvm_valid_regs = 0; in test_read_invalid()
87 run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; in test_read_invalid()
92 run->kvm_valid_regs = 0; in test_read_invalid()
97 struct kvm_run *run = vcpu->run; in test_set_invalid() local
101 run->kvm_dirty_regs = INVALID_SYNC_FIELD; in test_set_invalid()
106 run->kvm_dirty_regs = 0; in test_set_invalid()
108 run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; in test_set_invalid()
113 run->kvm_dirty_regs = 0; in test_set_invalid()
[all …]
Ducontrol_test.c64 struct kvm_run *run; in FIXTURE() local
100 self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size, in FIXTURE_SETUP()
102 ASSERT_NE(self->run, MAP_FAILED); in FIXTURE_SETUP()
114 TH_LOG("VM created %p %p", self->run, self->sie_block); in FIXTURE_SETUP()
145 munmap(self->run, self->kvm_run_size); in FIXTURE_TEARDOWN()
231 struct kvm_run *run = self->run; in uc_handle_sieic() local
235 run->s390_sieic.icptcode, in uc_handle_sieic()
236 run->s390_sieic.ipa, in uc_handle_sieic()
237 run->s390_sieic.ipb); in uc_handle_sieic()
238 switch (run->s390_sieic.icptcode) { in uc_handle_sieic()
[all …]
/linux-6.12.1/tools/testing/selftests/kvm/x86_64/
Dsync_regs_test.c92 struct kvm_run *run = (struct kvm_run *)arg; in race_events_inj_pen() local
93 struct kvm_vcpu_events *events = &run->s.regs.events; in race_events_inj_pen()
98 WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS); in race_events_inj_pen()
115 struct kvm_run *run = (struct kvm_run *)arg; in race_events_exc() local
116 struct kvm_vcpu_events *events = &run->s.regs.events; in race_events_exc()
119 WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS); in race_events_exc()
137 struct kvm_run *run = (struct kvm_run *)arg; in race_sregs_cr4() local
138 __u64 *cr4 = &run->s.regs.sregs.cr4; in race_sregs_cr4()
143 WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_SREGS); in race_sregs_cr4()
161 struct kvm_run *run; in race_sync_regs() local
[all …]
Ddebug_regs.c86 struct kvm_run *run; in main() local
104 run = vcpu->run; in main()
111 TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && in main()
112 run->debug.arch.exception == BP_VECTOR && in main()
113 run->debug.arch.pc == CAST_TO_RIP(sw_bp), in main()
115 run->exit_reason, run->debug.arch.exception, in main()
116 run->debug.arch.pc, CAST_TO_RIP(sw_bp)); in main()
128 TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && in main()
129 run->debug.arch.exception == DB_VECTOR && in main()
130 run->debug.arch.pc == CAST_TO_RIP(hw_bp) && in main()
[all …]
Duserspace_msr_exit_test.c382 if (vcpu->run->exit_reason == KVM_EXIT_IO && in check_for_guest_assert()
390 struct kvm_run *run = vcpu->run; in process_rdmsr() local
395 TEST_ASSERT(run->msr.index == msr_index, in process_rdmsr()
397 run->msr.index, msr_index); in process_rdmsr()
399 switch (run->msr.index) { in process_rdmsr()
401 run->msr.data = 0; in process_rdmsr()
404 run->msr.error = 1; in process_rdmsr()
407 run->msr.data = msr_non_existent_data; in process_rdmsr()
410 run->msr.data = MSR_FS_BASE; in process_rdmsr()
413 run->msr.data = MSR_GS_BASE; in process_rdmsr()
[all …]
Dxen_vmcall_test.c106 volatile struct kvm_run *run = vcpu->run; in main() local
111 if (run->exit_reason == KVM_EXIT_XEN) { in main()
112 TEST_ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL); in main()
113 TEST_ASSERT_EQ(run->xen.u.hcall.cpl, 0); in main()
114 TEST_ASSERT_EQ(run->xen.u.hcall.longmode, 1); in main()
115 TEST_ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE); in main()
116 TEST_ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1)); in main()
117 TEST_ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2)); in main()
118 TEST_ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3)); in main()
119 TEST_ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4)); in main()
[all …]
Dflds_emulation.h22 struct kvm_run *run = vcpu->run; in handle_flds_emulation_failure_exit() local
29 TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION, in handle_flds_emulation_failure_exit()
31 run->emulation_failure.suberror); in handle_flds_emulation_failure_exit()
33 flags = run->emulation_failure.flags; in handle_flds_emulation_failure_exit()
34 TEST_ASSERT(run->emulation_failure.ndata >= 3 && in handle_flds_emulation_failure_exit()
38 TEST_ASSERT(run->emulation_failure.insn_size >= 2, in handle_flds_emulation_failure_exit()
40 run->emulation_failure.insn_size); in handle_flds_emulation_failure_exit()
42 insn_bytes = run->emulation_failure.insn_bytes; in handle_flds_emulation_failure_exit()
Dhyperv_extended_hypercalls.c41 struct kvm_run *run; in main() local
56 run = vcpu->run; in main()
72 TEST_ASSERT(run->exit_reason == KVM_EXIT_HYPERV, in main()
74 run->exit_reason, exit_reason_str(run->exit_reason)); in main()
76 outval = addr_gpa2hva(vm, run->hyperv.u.hcall.params[1]); in main()
78 run->hyperv.u.hcall.result = HV_STATUS_SUCCESS; in main()
82 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, in main()
84 run->exit_reason, exit_reason_str(run->exit_reason)); in main()
/linux-6.12.1/fs/ntfs3/
Drun.c34 static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index) in run_lookup() argument
39 if (!run->count) { in run_lookup()
45 max_idx = run->count - 1; in run_lookup()
48 r = run->runs; in run_lookup()
61 *index = run->count; in run_lookup()
72 r = run->runs + mid_idx; in run_lookup()
93 static void run_consolidate(struct runs_tree *run, size_t index) in run_consolidate() argument
96 struct ntfs_run *r = run->runs + index; in run_consolidate()
98 while (index + 1 < run->count) { in run_consolidate()
154 i = run->count - (index + 1); in run_consolidate()
[all …]
/linux-6.12.1/arch/loongarch/kvm/
Dexit.c157 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) in kvm_emu_iocsr() argument
171 run->iocsr_io.phys_addr = addr; in kvm_emu_iocsr()
172 run->iocsr_io.is_write = 0; in kvm_emu_iocsr()
177 run->iocsr_io.len = 1; in kvm_emu_iocsr()
180 run->iocsr_io.len = 2; in kvm_emu_iocsr()
183 run->iocsr_io.len = 4; in kvm_emu_iocsr()
186 run->iocsr_io.len = 8; in kvm_emu_iocsr()
189 run->iocsr_io.len = 1; in kvm_emu_iocsr()
190 run->iocsr_io.is_write = 1; in kvm_emu_iocsr()
193 run->iocsr_io.len = 2; in kvm_emu_iocsr()
[all …]
/linux-6.12.1/drivers/staging/media/sunxi/cedrus/
Dcedrus_dec.c29 struct cedrus_run run = {}; in cedrus_device_run() local
33 run.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); in cedrus_device_run()
34 run.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); in cedrus_device_run()
37 src_req = run.src->vb2_buf.req_obj.req; in cedrus_device_run()
44 run.mpeg2.sequence = cedrus_find_control_data(ctx, in cedrus_device_run()
46 run.mpeg2.picture = cedrus_find_control_data(ctx, in cedrus_device_run()
48 run.mpeg2.quantisation = cedrus_find_control_data(ctx, in cedrus_device_run()
53 run.h264.decode_params = cedrus_find_control_data(ctx, in cedrus_device_run()
55 run.h264.pps = cedrus_find_control_data(ctx, in cedrus_device_run()
57 run.h264.scaling_matrix = cedrus_find_control_data(ctx, in cedrus_device_run()
[all …]
/linux-6.12.1/tools/perf/tests/
Dmake10 # run only specific test over 'Makefile'
130 # $(run) contains all available tests
131 run := make_pure
132 # Targets 'clean all' can be run together only through top level
136 run += make_clean_all
141 run += make_python_perf_so
142 run += make_debug
143 run += make_nondistro
144 run += make_extra_tests
145 run += make_jevents_all
[all …]
/linux-6.12.1/tools/testing/selftests/arm64/mte/
Dcheck_mmap_options.c63 int run, result, map_size; in check_anonymous_memory_mapping() local
67 for (run = 0; run < item; run++) { in check_anonymous_memory_mapping()
68 map_size = sizes[run] + OVERFLOW + UNDERFLOW; in check_anonymous_memory_mapping()
74 mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[run]); in check_anonymous_memory_mapping()
76 ptr = mte_insert_tags((void *)ptr, sizes[run]); in check_anonymous_memory_mapping()
82 result = check_mte_memory(ptr, sizes[run], mode, tag_check); in check_anonymous_memory_mapping()
83 mte_clear_tags((void *)ptr, sizes[run]); in check_anonymous_memory_mapping()
94 int run, fd, map_size; in check_file_memory_mapping() local
99 for (run = 0; run < total; run++) { in check_file_memory_mapping()
104 map_size = sizes[run] + UNDERFLOW + OVERFLOW; in check_file_memory_mapping()
[all …]
Dcheck_child_memory.c87 int run, result; in check_child_memory_mapping() local
92 for (run = 0; run < item; run++) { in check_child_memory_mapping()
93 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_child_memory_mapping()
95 if (check_allocated_memory_range(ptr, sizes[run], mem_type, in check_child_memory_mapping()
98 result = check_child_tag_inheritance(ptr, sizes[run], mode); in check_child_memory_mapping()
99 mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW); in check_child_memory_mapping()
109 int run, fd, map_size, result = KSFT_PASS; in check_child_file_mapping() local
113 for (run = 0; run < total; run++) { in check_child_file_mapping()
118 map_size = sizes[run] + OVERFLOW + UNDERFLOW; in check_child_file_mapping()
125 mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[run]); in check_child_file_mapping()
[all …]
/linux-6.12.1/tools/testing/selftests/kvm/
Dcoalesced_io_test.c78 struct kvm_run *run = vcpu->run; in vcpu_run_and_verify_io_exit() local
90 if (run->exit_reason == KVM_EXIT_IO) in vcpu_run_and_verify_io_exit()
91 pio_value = *(uint32_t *)((void *)run + run->io.data_offset); in vcpu_run_and_verify_io_exit()
95 TEST_ASSERT((!want_pio && (run->exit_reason == KVM_EXIT_MMIO && run->mmio.is_write && in vcpu_run_and_verify_io_exit()
96 run->mmio.phys_addr == io->mmio_gpa && run->mmio.len == 8 && in vcpu_run_and_verify_io_exit()
97 *(uint64_t *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) || in vcpu_run_and_verify_io_exit()
98 (want_pio && (run->exit_reason == KVM_EXIT_IO && run->io.port == io->pio_port && in vcpu_run_and_verify_io_exit()
99 run->io.direction == KVM_EXIT_IO_OUT && run->io.count == 1 && in vcpu_run_and_verify_io_exit()
106 (want_pio ? io->pio_port : io->mmio_gpa) + io->ring_size - 1, run->exit_reason, in vcpu_run_and_verify_io_exit()
107run->exit_reason == KVM_EXIT_MMIO ? "MMIO" : run->exit_reason == KVM_EXIT_IO ? "PIO" : "other", in vcpu_run_and_verify_io_exit()
[all …]
/linux-6.12.1/arch/s390/kvm/
Ddiag.c25 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages()
26 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; in diag_release_pages()
76 vcpu->run->s.regs.gprs[rx]); in __diag_page_ref_service()
78 if (vcpu->run->s.regs.gprs[rx] & 7) in __diag_page_ref_service()
80 rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); in __diag_page_ref_service()
97 vcpu->run->s.regs.gprs[ry] = 8; in __diag_page_ref_service()
111 vcpu->run->s.regs.gprs[ry] = 0; in __diag_page_ref_service()
125 vcpu->run->s.regs.gprs[ry] = 0; in __diag_page_ref_service()
131 vcpu->run->s.regs.gprs[ry] = 4; in __diag_page_ref_service()
172 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in __diag_time_slice_end_directed()
[all …]
/linux-6.12.1/arch/mips/kvm/
Demulate.c962 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in kvm_mips_emul_wait()
975 struct kvm_run *run = vcpu->run; in kvm_mips_emulate_store() local
976 void *data = run->mmio.data; in kvm_mips_emulate_store()
991 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
993 if (run->mmio.phys_addr == KVM_INVALID_ADDR) in kvm_mips_emulate_store()
999 run->mmio.len = 8; in kvm_mips_emulate_store()
1009 run->mmio.len = 4; in kvm_mips_emulate_store()
1018 run->mmio.len = 2; in kvm_mips_emulate_store()
1027 run->mmio.len = 1; in kvm_mips_emulate_store()
1036 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
[all …]
/linux-6.12.1/tools/perf/scripts/python/
Dstat-cpi.py23 def store(time, event, cpu, thread, val, ena, run): argument
29 data[key] = [ val, ena, run]
35 def stat__cycles_k(cpu, thread, time, val, ena, run): argument
36 store(time, "cycles", cpu, thread, val, ena, run);
38 def stat__instructions_k(cpu, thread, time, val, ena, run): argument
39 store(time, "instructions", cpu, thread, val, ena, run);
41 def stat__cycles_u(cpu, thread, time, val, ena, run): argument
42 store(time, "cycles", cpu, thread, val, ena, run);
44 def stat__instructions_u(cpu, thread, time, val, ena, run): argument
45 store(time, "instructions", cpu, thread, val, ena, run);
[all …]
/linux-6.12.1/tools/testing/selftests/kvm/include/s390x/
Ddebug_print.h43 static inline void print_psw(struct kvm_run *run, struct kvm_s390_sie_block *sie_block) in print_psw() argument
46 run->flags, in print_psw()
47 run->psw_mask, run->psw_addr, in print_psw()
48 run->exit_reason, exit_reason_str(run->exit_reason)); in print_psw()
53 static inline void print_run(struct kvm_run *run, struct kvm_s390_sie_block *sie_block) in print_run() argument
55 print_hex_bytes("run", (u64)run, 0x150); in print_run()
57 print_psw(run, sie_block); in print_run()
60 static inline void print_regs(struct kvm_run *run) in print_regs() argument
62 struct kvm_sync_regs *sync_regs = &run->s.regs; in print_regs()
/linux-6.12.1/arch/riscv/kvm/
Dvcpu_insn.c153 int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn);
156 static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, in truly_illegal_insn() argument
172 static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, in truly_virtual_insn() argument
202 static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) in wfi_insn() argument
209 static int wrs_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) in wrs_insn() argument
253 int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_riscv_vcpu_csr_return() argument
265 run->riscv_csr.ret_value); in kvm_riscv_vcpu_csr_return()
273 static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) in csr_insn() argument
317 run->riscv_csr.csr_num = csr_num; in csr_insn()
318 run->riscv_csr.new_value = new_val; in csr_insn()
[all …]
/linux-6.12.1/fs/befs/
Dendian.h74 befs_block_run run; in fsrun_to_cpu() local
77 run.allocation_group = le32_to_cpu((__force __le32)n.allocation_group); in fsrun_to_cpu()
78 run.start = le16_to_cpu((__force __le16)n.start); in fsrun_to_cpu()
79 run.len = le16_to_cpu((__force __le16)n.len); in fsrun_to_cpu()
81 run.allocation_group = be32_to_cpu((__force __be32)n.allocation_group); in fsrun_to_cpu()
82 run.start = be16_to_cpu((__force __be16)n.start); in fsrun_to_cpu()
83 run.len = be16_to_cpu((__force __be16)n.len); in fsrun_to_cpu()
85 return run; in fsrun_to_cpu()
91 befs_disk_block_run run; in cpu_to_fsrun() local
94 run.allocation_group = cpu_to_le32(n.allocation_group); in cpu_to_fsrun()
[all …]
/linux-6.12.1/tools/testing/selftests/kvm/lib/s390x/
Ducall.c11 struct kvm_run *run = vcpu->run; in ucall_arch_get_ucall() local
13 if (run->exit_reason == KVM_EXIT_S390_SIEIC && in ucall_arch_get_ucall()
14 run->s390_sieic.icptcode == 4 && in ucall_arch_get_ucall()
15 (run->s390_sieic.ipa >> 8) == 0x83 && /* 0x83 means DIAGNOSE */ in ucall_arch_get_ucall()
16 (run->s390_sieic.ipb >> 16) == 0x501) { in ucall_arch_get_ucall()
17 int reg = run->s390_sieic.ipa & 0xf; in ucall_arch_get_ucall()
19 return (void *)run->s.regs.gprs[reg]; in ucall_arch_get_ucall()
/linux-6.12.1/arch/arm64/kvm/
Dmmio.c94 struct kvm_run *run = vcpu->run; in kvm_handle_mmio_return() local
97 data = kvm_mmio_read_buf(run->mmio.data, len); in kvm_handle_mmio_return()
108 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, in kvm_handle_mmio_return()
125 struct kvm_run *run = vcpu->run; in io_mem_abort() local
151 run->exit_reason = KVM_EXIT_ARM_NISV; in io_mem_abort()
152 run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu); in io_mem_abort()
153 run->arm_nisv.fault_ipa = fault_ipa; in io_mem_abort()
187 run->mmio.is_write = is_write; in io_mem_abort()
188 run->mmio.phys_addr = fault_ipa; in io_mem_abort()
189 run->mmio.len = len; in io_mem_abort()
[all …]

12345678910>>...60