/linux-6.12.1/arch/powerpc/crypto/ |
D | sha256-spe-asm.S | 114 rotrwi rT0,a,2; /* 1: S0 = a rotr 2 */ \ 116 rotrwi rT1,a,13; /* 1: S0' = a rotr 13 */ \ 118 rotrwi rT3,a,22; /* 1: S0" = a rotr 22 */ \ 119 xor rT0,rT0,rT1; /* 1: S0 = S0 xor S0' */ \ 121 xor rT3,rT0,rT3; /* 1: S0 = S0 xor S0" */ \ 129 add rT3,rT3,rT2; /* 1: temp2 = S0 + maj */ \ 141 rotrwi rT0,h,2; /* 2: S0 = a rotr 2 */ \ 143 rotrwi rT1,h,13; /* 2: S0' = a rotr 13 */ \ 145 rotrwi rT3,h,22; /* 2: S0" = a rotr 22 */ \ 146 xor rT0,rT0,rT1; /* 2: S0 = S0 xor S0' */ \ [all …]
|
/linux-6.12.1/arch/loongarch/kernel/ |
D | relocate_kernel.S | 26 move s0, a4 32 beqz s0, done 35 PTR_L s1, s0, 0 36 PTR_ADDI s0, s0, SZREG 46 /* indirection page, update s0 */ 50 and s0, s1, t0 99 iocsrrd.w s0, t1 /* check PC as an indicator */ 100 beqz s0, 1b 101 iocsrrd.d s0, t1 /* get PC via mailbox */ 104 or s0, s0, t0 /* s0 = TO_CACHE(s0) */ [all …]
|
D | mcount.S | 21 PTR_S s0, sp, MCOUNT_S0_OFFSET 23 move s0, a0 27 move a0, s0 29 PTR_L s0, sp, MCOUNT_S0_OFFSET 42 move a1, s0 /* arg1: parent's return address */ 74 move a2, s0 /* arg2: Callsite parent ra */
|
/linux-6.12.1/arch/riscv/kernel/ |
D | mcount.S | 19 REG_S s0, 0*SZREG(sp) 21 addi s0, sp, 16 30 REG_S s0, 2*SZREG(sp) 34 addi s0, sp, 4*SZREG 39 REG_L s0, 0*SZREG(sp) 45 REG_L s0, 2*SZREG(sp) 67 * s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return. 71 * value stored in -16(s0) on entry, and the s0 on return. 106 addi a0, s0, -SZREG 109 REG_L a2, -2*SZREG(s0) [all …]
|
D | entry.S | 148 REG_L s0, TASK_TI_USER_SP(tp) 154 REG_S s0, PT_SP(sp) 213 REG_L s0, PT_STATUS(sp) 217 and s0, s0, t0 219 andi s0, s0, SR_SPP 221 bnez s0, 1f 228 addi s0, sp, PT_SIZE_ON_STACK 229 REG_S s0, TASK_TI_KERNEL_SP(tp) 303 REG_L s0, TASK_TI_KERNEL_SP(tp) 309 REG_S s0, PT_SP(sp) [all …]
|
D | kexec_relocate.S | 16 * s0: Pointer to the current entry 24 mv s0, a0 59 REG_L t0, 0(s0) /* t0 = *image->entry */ 60 addi s0, s0, RISCV_SZPTR /* image->entry++ */ 72 andi s0, t0, ~0x2 113 mv s0, zero 153 * s0: (const) Phys address to jump to 157 mv s0, a1 168 mv a2, s0 177 mv s0, zero
|
/linux-6.12.1/arch/x86/crypto/ |
D | sha512-avx2-asm.S | 200 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 201 rorx $28, a, T1 # T1 = (a >> 28) # S0 204 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 210 add y1, h # h = k + w + h + S0 # -- 214 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 215 add y3, h # h = t1 + S0 + MAJ # -- 227 vpxor YTMP1, YTMP3, YTMP1 # YTMP1 = s0 231 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 233 vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA} 235 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00} [all …]
|
D | sha256-avx2-asm.S | 179 xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 180 rorx $2, a, T1 # T1 = (a >> 2) # S0 184 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 191 add y1, h # h = k + w + h + S0 # -- 197 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 198 add y3, h # h = t1 + S0 + MAJ # -- 228 xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 231 rorx $2, a, T1 # T1 = (a >> 2) # S0 235 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 240 vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0 [all …]
|
D | sm4-aesni-avx-asm_64.S | 188 #define ROUND(round, s0, s1, s2, s3) \ argument 201 vpxor RTMP0, s0, s0; /* s0 ^ x */ \ 207 vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ 210 vpxor RTMP0, s0, s0; \ 211 /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ 212 vpxor RTMP1, s0, s0; 276 #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ argument 301 vpxor RTMP0, s0, s0; /* s0 ^ x */ \ 316 vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ 317 /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ [all …]
|
D | sha256-ssse3-asm.S | 149 ## compute s0 four at a time and s1 two at a time 167 ## compute s0 173 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) 186 add y1, h # h = h + S1 + CH + k + w + S0 189 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ 215 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) 220 pxor XTMP4, XTMP1 # XTMP1 = s0 227 add y1, h # h = h + S1 + CH + k + w + S0 228 paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 230 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ [all …]
|
/linux-6.12.1/drivers/soc/bcm/brcmstb/pm/ |
D | s2-mips.S | 24 sw s0, 4(sp) 35 * s0: AON_CTRL base register 44 lw s0, 0(t0) 79 sw zero, AON_CTRL_PM_CTRL(s0) 80 lw zero, AON_CTRL_PM_CTRL(s0) 81 sw t0, AON_CTRL_PM_CTRL(s0) 82 lw t0, AON_CTRL_PM_CTRL(s0) 134 sw t1, AON_CTRL_HOST_MISC_CMDS(s0) 135 lw t1, AON_CTRL_HOST_MISC_CMDS(s0) 137 sw zero, AON_CTRL_PM_CTRL(s0) [all …]
|
/linux-6.12.1/scripts/selinux/ |
D | install_policy.sh | 45 echo "__default__:user_u:s0" > /etc/selinux/dummy/seusers 46 echo "base_r:base_t:s0" > /etc/selinux/dummy/contexts/failsafe_context 47 echo "base_r:base_t:s0 base_r:base_t:s0" > /etc/selinux/dummy/default_contexts 49 client * user_u:base_r:base_t:s0 50 property * user_u:object_r:base_t:s0 51 extension * user_u:object_r:base_t:s0 52 selection * user_u:object_r:base_t:s0 53 event * user_u:object_r:base_t:s0
|
/linux-6.12.1/net/dccp/ccids/lib/ |
D | packet_history.c | 154 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, in __do_track_loss() local 157 if (!dccp_loss_free(s0, s1, n1)) { /* gap between S0 and S1 */ in __do_track_loss() 165 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, in __one_after_loss() local 175 /* S0 < S2 < S1 */ in __one_after_loss() 177 if (dccp_loss_free(s0, s2, n2)) { in __one_after_loss() 181 /* hole is filled: S0, S2, and S1 are consecutive */ in __one_after_loss() 188 } else { /* gap between S0 and S2 */ in __one_after_loss() 190 * Reorder history to insert S2 between S0 and S1 in __one_after_loss() 202 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, in __two_after_loss() local 225 /* S0 < S3 < S1 */ in __two_after_loss() [all …]
|
/linux-6.12.1/arch/arm/boot/dts/aspeed/ |
D | aspeed-bmc-ampere-mtjade.dts | 117 channels = "s0", "s1"; 126 channels = "s0", "s1"; 135 channels = "s0", "s1"; 144 channels = "s0", "s1"; 153 channels = "s0", "s1"; 162 channels = "s0", "s1"; 171 channels = "s0", "s1"; 180 channels = "s0", "s1"; 189 channels = "s0", "s1"; 198 channels = "s0", "s1"; [all …]
|
D | aspeed-bmc-ampere-mtmitchell.dts | 168 channels = "s0", "s1"; 178 channels = "s0", "s1"; 188 channels = "s0", "s1"; 198 channels = "s0", "s1"; 208 channels = "s0", "s1"; 218 channels = "s0", "s1"; 228 channels = "s0", "s1"; 238 channels = "s0", "s1"; 248 channels = "s0", "s1"; 258 channels = "s0", "s1"; [all …]
|
/linux-6.12.1/arch/arm64/crypto/ |
D | sm3-ce-core.S | 44 .macro round, ab, s0, t0, t1, i 49 sm3tt2\ab v9.4s, v5.4s, \s0\().4s, \i 52 .macro qround, ab, s0, s1, s2, s3, s4 55 ext v6.16b, \s0\().16b, \s1\().16b, #12 57 sm3partw1 \s4\().4s, \s0\().4s, \s3\().4s 60 eor v10.16b, \s0\().16b, \s1\().16b 62 round \ab, \s0, v11, v12, 0 63 round \ab, \s0, v12, v11, 1 64 round \ab, \s0, v11, v12, 2 65 round \ab, \s0, v12, v11, 3
|
D | sm4-neon-core.S | 40 #define transpose_4x4(s0, s1, s2, s3) \ argument 41 zip1 RTMP0.4s, s0.4s, s1.4s; \ 43 zip2 RTMP2.4s, s0.4s, s1.4s; \ 45 zip1 s0.2d, RTMP0.2d, RTMP1.2d; \ 50 #define transpose_4x4_2x(s0, s1, s2, s3, s4, s5, s6, s7) \ argument 51 zip1 RTMP0.4s, s0.4s, s1.4s; \ 53 zip2 RTMP2.4s, s0.4s, s1.4s; \ 59 zip1 s0.2d, RTMP0.2d, RTMP1.2d; \ 68 #define rotate_clockwise_4x4(s0, s1, s2, s3) \ argument 69 zip1 RTMP0.4s, s1.4s, s0.4s; \ [all …]
|
D | sha1-ce-core.S | 34 .macro add_only, op, ev, rc, s0, dg1 36 add t1.4s, v\s0\().4s, \rc\().4s 44 .ifnb \s0 45 add t0.4s, v\s0\().4s, \rc\().4s 52 .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1 53 sha1su0 v\s0\().4s, v\s1\().4s, v\s2\().4s 55 sha1su1 v\s0\().4s, v\s3\().4s
|
D | sha2-ce-core.S | 29 .macro add_only, ev, rc, s0 argument 32 add t1.4s, v\s0\().4s, \rc\().4s 36 .ifnb \s0 37 add t0.4s, v\s0\().4s, \rc\().4s 44 .macro add_update, ev, rc, s0, s1, s2, s3 45 sha256su0 v\s0\().4s, v\s1\().4s 47 sha256su1 v\s0\().4s, v\s2\().4s, v\s3\().4s
|
/linux-6.12.1/arch/mips/kernel/ |
D | relocate_kernel.S | 22 PTR_L s0, kexec_indirection_page 26 PTR_L s2, (s0) 27 PTR_ADDIU s0, s0, SZREG 42 /* indirection page, update s0 */ 45 and s0, s2, ~0x2 126 1: LONG_L s0, (t0) 127 bne s0, zero,1b
|
/linux-6.12.1/arch/arm/crypto/ |
D | sha2-ce-core.S | 32 .macro add_only, ev, s0 argument 34 .ifnb \s0 39 .ifnb \s0 40 vadd.u32 ta\ev, q\s0, k\ev 44 .macro add_update, ev, s0, s1, s2, s3 45 sha256su0.32 q\s0, q\s1 47 sha256su1.32 q\s0, q\s2, q\s3
|
D | sha1-ce-core.S | 36 .macro add_only, op, ev, rc, s0, dg1 37 .ifnb \s0 38 vadd.u32 tb\ev, q\s0, \rc 48 .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1 49 sha1su0.32 q\s0, q\s1, q\s2 51 sha1su1.32 q\s0, q\s3
|
/linux-6.12.1/arch/x86/include/asm/ |
D | string_64.h | 33 const __auto_type s0 = s; in memset16() local 40 return s0; in memset16() 46 const __auto_type s0 = s; in memset32() local 53 return s0; in memset32() 59 const __auto_type s0 = s; in memset64() local 66 return s0; in memset64()
|
/linux-6.12.1/arch/mips/include/asm/mach-loongson64/ |
D | kernel-entry-init.h | 80 /* s0:prid s1:initfn */ 87 mfc0 s0, CP0_PRID 88 andi s0, s0, (PRID_IMP_MASK | PRID_REV_MASK) 89 beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R1), 1f 90 beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R2), 1f
|
/linux-6.12.1/drivers/gpu/drm/amd/display/dc/spl/ |
D | dc_spl.c | 1278 dscl_prog_data->easf_v_bf1_pwl_in_seg0 = 0x600; // S0.10, BF1 PWL Segment 0 = -512 in spl_set_easf_data() 1281 dscl_prog_data->easf_v_bf1_pwl_in_seg1 = 0x7EC; // S0.10, BF1 PWL Segment 1 = -20 in spl_set_easf_data() 1284 dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2 in spl_set_easf_data() 1287 dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3 in spl_set_easf_data() 1290 dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4 in spl_set_easf_data() 1293 dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5 in spl_set_easf_data() 1296 dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6 in spl_set_easf_data() 1299 dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7 in spl_set_easf_data() 1303 dscl_prog_data->easf_v_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0 in spl_set_easf_data() 1307 dscl_prog_data->easf_v_bf3_pwl_base_set1 = 62; // S0.6, BF3 Base PWL Segment 1 in spl_set_easf_data() [all …]
|