/linux-6.12.1/include/asm-generic/ |
D | xor.h | 33 const unsigned long * __restrict p3) in xor_8regs_3() argument 38 p1[0] ^= p2[0] ^ p3[0]; in xor_8regs_3() 39 p1[1] ^= p2[1] ^ p3[1]; in xor_8regs_3() 40 p1[2] ^= p2[2] ^ p3[2]; in xor_8regs_3() 41 p1[3] ^= p2[3] ^ p3[3]; in xor_8regs_3() 42 p1[4] ^= p2[4] ^ p3[4]; in xor_8regs_3() 43 p1[5] ^= p2[5] ^ p3[5]; in xor_8regs_3() 44 p1[6] ^= p2[6] ^ p3[6]; in xor_8regs_3() 45 p1[7] ^= p2[7] ^ p3[7]; in xor_8regs_3() 48 p3 += 8; in xor_8regs_3() [all …]
|
/linux-6.12.1/arch/arm/include/asm/ |
D | xor.h | 70 const unsigned long * __restrict p3) in xor_arm4regs_3() argument 85 XOR_BLOCK_4(p3); in xor_arm4regs_3() 93 const unsigned long * __restrict p3, in xor_arm4regs_4() argument 105 XOR_BLOCK_2(p3); in xor_arm4regs_4() 114 const unsigned long * __restrict p3, in xor_arm4regs_5() argument 127 XOR_BLOCK_2(p3); in xor_arm4regs_5() 171 const unsigned long * __restrict p3) in xor_neon_3() argument 174 xor_arm4regs_3(bytes, p1, p2, p3); in xor_neon_3() 177 xor_block_neon_inner.do_3(bytes, p1, p2, p3); in xor_neon_3() 185 const unsigned long * __restrict p3, in xor_neon_4() argument [all …]
|
/linux-6.12.1/arch/sparc/include/asm/ |
D | xor_32.h | 56 const unsigned long * __restrict p3) in sparc_3() argument 95 : "r" (p1), "r" (p2), "r" (p3) in sparc_3() 101 p3 += 8; in sparc_3() 108 const unsigned long * __restrict p3, in sparc_4() argument 160 : "r" (p1), "r" (p2), "r" (p3), "r" (p4) in sparc_4() 166 p3 += 8; in sparc_4() 174 const unsigned long * __restrict p3, in sparc_5() argument 239 : "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5) in sparc_5() 245 p3 += 8; in sparc_5()
|
D | xor_64.h | 19 const unsigned long * __restrict p3); 22 const unsigned long * __restrict p3, 26 const unsigned long * __restrict p3, 44 const unsigned long * __restrict p3); 47 const unsigned long * __restrict p3, 51 const unsigned long * __restrict p3,
|
/linux-6.12.1/arch/hexagon/lib/ |
D | memcpy.S | 192 p3 = cmp.gtu(len, #95); /* %8 < 97 */ define 202 p2 = and(p2,!p3); /* %8 < 97 */ 255 p3 = cmp.gtu(back, #8); define 290 if(p3) dataF8 = memd(ptr_in+#8); 320 p3 = sp1loop0(.Ldword_loop_prolog, prolog) define 327 if(p3) memd(ptr_out++#8) = ldata0; 339 p3 = cmp.gtu(kernel, #0); define 341 if(p3.new) kernel = add(kernel, #-1); 343 if(p3.new) epilog = add(epilog, #32); 349 p3 = cmp.gtu(dalign, #24); define [all …]
|
/linux-6.12.1/tools/testing/selftests/ftrace/test.d/instances/ |
D | instance-event.tc | 55 p3=$! 56 echo $p3 60 kill -1 $p3 65 wait $p1 $p2 $p3 107 p3=$! 108 echo $p3 123 kill -1 $p3 128 wait $p1 $p2 $p3 $p4 $p5
|
D | instance.tc | 47 p3=$! 48 echo $p3 63 kill -1 $p3 68 wait $p1 $p2 $p3 $p4 $p5
|
/linux-6.12.1/arch/x86/include/asm/ |
D | xor.h | 149 const unsigned long * __restrict p3) in xor_sse_3() argument 199 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) in xor_sse_3() 209 const unsigned long * __restrict p3) in xor_sse_3_pf64() argument 237 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) in xor_sse_3_pf64() 247 const unsigned long * __restrict p3, in xor_sse_4() argument 305 [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) in xor_sse_4() 315 const unsigned long * __restrict p3, in xor_sse_4_pf64() argument 346 [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) in xor_sse_4_pf64() 356 const unsigned long * __restrict p3, in xor_sse_5() argument 422 [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5) in xor_sse_5() [all …]
|
D | xor_avx.h | 90 const unsigned long * __restrict p3) in xor_avx_4() argument 100 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p3[i / sizeof(*p3)])); \ in xor_avx_4() 116 p3 = (unsigned long *)((uintptr_t)p3 + 512); in xor_avx_4() 125 const unsigned long * __restrict p3, in xor_avx_5() argument 138 "m" (p3[i / sizeof(*p3)])); \ in xor_avx_5() 154 p3 = (unsigned long *)((uintptr_t)p3 + 512); in xor_avx_5()
|
D | kvm_para.h | 78 unsigned long p2, unsigned long p3) in kvm_hypercall3() argument 83 return tdx_kvm_hypercall(nr, p1, p2, p3, 0); in kvm_hypercall3() 87 : "a"(nr), "b"(p1), "c"(p2), "d"(p3) in kvm_hypercall3() 93 unsigned long p2, unsigned long p3, in kvm_hypercall4() argument 99 return tdx_kvm_hypercall(nr, p1, p2, p3, p4); in kvm_hypercall4() 103 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4) in kvm_hypercall4() 109 unsigned long p2, unsigned long p3) in kvm_sev_hypercall3() argument 115 : "a"(nr), "b"(p1), "c"(p2), "d"(p3) in kvm_sev_hypercall3()
|
D | xor_32.h | 70 const unsigned long * __restrict p3) in xor_pII_mmx_3() argument 110 "+r" (p1), "+r" (p2), "+r" (p3) in xor_pII_mmx_3() 120 const unsigned long * __restrict p3, in xor_pII_mmx_4() argument 166 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) in xor_pII_mmx_4() 177 const unsigned long * __restrict p3, in xor_pII_mmx_5() argument 237 "+r" (p1), "+r" (p2), "+r" (p3) in xor_pII_mmx_5() 308 const unsigned long * __restrict p3) in xor_p5_mmx_3() argument 356 "+r" (p1), "+r" (p2), "+r" (p3) in xor_p5_mmx_3() 366 const unsigned long * __restrict p3, in xor_p5_mmx_4() argument 424 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) in xor_p5_mmx_4() [all …]
|
/linux-6.12.1/arch/s390/lib/ |
D | xor.c | 38 const unsigned long * __restrict p3) in xor_xc_3() argument 59 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3) in xor_xc_3() 65 const unsigned long * __restrict p3, in xor_xc_4() argument 91 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4) in xor_xc_4() 97 const unsigned long * __restrict p3, in xor_xc_5() argument 128 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4), in xor_xc_5()
|
/linux-6.12.1/scripts/coccinelle/free/ |
D | iounmap.cocci | 23 position p1,p2,p3; 46 return@p3 ...; } 54 p3 << iom.p3; 59 cocci.print_secs("needed iounmap",p3) 64 p3 << iom.p3; 68 coccilib.report.print_report(p3[0],msg)
|
D | clk_put.cocci | 23 position p1,p2,p3; 46 return@p3 ...; } 54 p3 << clk.p3; 59 cocci.print_secs("needed clk_put",p3) 64 p3 << clk.p3; 68 coccilib.report.print_report(p3[0],msg)
|
/linux-6.12.1/arch/loongarch/lib/ |
D | xor_simd.h | 16 const unsigned long * __restrict p2, const unsigned long * __restrict p3); 18 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 21 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 29 const unsigned long * __restrict p2, const unsigned long * __restrict p3); 31 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 34 const unsigned long * __restrict p2, const unsigned long * __restrict p3,
|
D | xor_simd_glue.c | 27 const unsigned long * __restrict p3) \ 30 __xor_##flavor##_3(bytes, p1, p2, p3); \ 38 const unsigned long * __restrict p3, \ 42 __xor_##flavor##_4(bytes, p1, p2, p3, p4); \ 50 const unsigned long * __restrict p3, \ 55 __xor_##flavor##_5(bytes, p1, p2, p3, p4, p5); \
|
/linux-6.12.1/arch/loongarch/include/asm/ |
D | xor_simd.h | 12 const unsigned long * __restrict p2, const unsigned long * __restrict p3); 14 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 17 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 25 const unsigned long * __restrict p2, const unsigned long * __restrict p3); 27 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 30 const unsigned long * __restrict p2, const unsigned long * __restrict p3,
|
/linux-6.12.1/arch/powerpc/lib/ |
D | xor_vmx_glue.c | 28 const unsigned long * __restrict p3) in xor_altivec_3() argument 32 __xor_altivec_3(bytes, p1, p2, p3); in xor_altivec_3() 40 const unsigned long * __restrict p3, in xor_altivec_4() argument 45 __xor_altivec_4(bytes, p1, p2, p3, p4); in xor_altivec_4() 53 const unsigned long * __restrict p3, in xor_altivec_5() argument 59 __xor_altivec_5(bytes, p1, p2, p3, p4, p5); in xor_altivec_5()
|
D | xor_vmx.h | 13 const unsigned long * __restrict p3); 16 const unsigned long * __restrict p3, 20 const unsigned long * __restrict p3,
|
/linux-6.12.1/arch/riscv/include/asm/ |
D | xor.h | 23 const unsigned long *__restrict p3) in xor_vector_3() argument 26 xor_regs_3_(bytes, p1, p2, p3); in xor_vector_3() 32 const unsigned long *__restrict p3, in xor_vector_4() argument 36 xor_regs_4_(bytes, p1, p2, p3, p4); in xor_vector_4() 42 const unsigned long *__restrict p3, in xor_vector_5() argument 47 xor_regs_5_(bytes, p1, p2, p3, p4, p5); in xor_vector_5()
|
/linux-6.12.1/arch/arm64/include/asm/ |
D | xor.h | 30 const unsigned long * __restrict p3) in xor_neon_3() argument 33 xor_block_inner_neon.do_3(bytes, p1, p2, p3); in xor_neon_3() 40 const unsigned long * __restrict p3, in xor_neon_4() argument 44 xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4); in xor_neon_4() 51 const unsigned long * __restrict p3, in xor_neon_5() argument 56 xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5); in xor_neon_5()
|
/linux-6.12.1/arch/alpha/include/asm/ |
D | xor.h | 14 const unsigned long * __restrict p3); 18 const unsigned long * __restrict p3, 23 const unsigned long * __restrict p3, 33 const unsigned long * __restrict p3); 37 const unsigned long * __restrict p3, 42 const unsigned long * __restrict p3,
|
/linux-6.12.1/arch/hexagon/mm/ |
D | copy_user_template.S | 35 p3=sp1loop0(.Loop8,loopcount) 40 if (p3) memd(dst++#8) = d_dbuf 65 p3=sp1loop0(.Loop4,loopcount) 70 if (p3) memw(dst++#4) = w_dbuf 91 p3=sp1loop0(.Loop2,loopcount) 96 if (p3) memh(dst++#2) = w_dbuf 107 p3=sp1loop0(.Loop1,bytes) 112 if (p3) memb(dst++#1) = w_dbuf
|
/linux-6.12.1/arch/arm64/lib/ |
D | xor-neon.c | 42 const unsigned long * __restrict p3) in xor_arm64_neon_3() argument 46 uint64_t *dp3 = (uint64_t *)p3; in xor_arm64_neon_3() 78 const unsigned long * __restrict p3, in xor_arm64_neon_4() argument 83 uint64_t *dp3 = (uint64_t *)p3; in xor_arm64_neon_4() 123 const unsigned long * __restrict p3, in xor_arm64_neon_5() argument 129 uint64_t *dp3 = (uint64_t *)p3; in xor_arm64_neon_5() 197 const unsigned long * __restrict p3) in xor_arm64_eor3_3() argument 201 uint64_t *dp3 = (uint64_t *)p3; in xor_arm64_eor3_3() 232 const unsigned long * __restrict p3, in xor_arm64_eor3_4() argument 237 uint64_t *dp3 = (uint64_t *)p3; in xor_arm64_eor3_4() [all …]
|
/linux-6.12.1/arch/powerpc/include/asm/ |
D | xor_altivec.h | 10 const unsigned long * __restrict p3); 13 const unsigned long * __restrict p3, 17 const unsigned long * __restrict p3,
|