/linux-6.12.1/tools/testing/selftests/tc-testing/tc-tests/actions/ |
D | police.json | 20 "cmdUnderTest": "$TC actions add action police rate 1kbit burst 10k index 1", 23 "matchPattern": "action order [0-9]*: police 0x1 rate 1Kbit burst 10Kb", 46 "$TC actions add action police rate 4Mbit burst 120k index 9" 48 "cmdUnderTest": "$TC actions add action police rate 8kbit burst 24k index 9", 51 "matchPattern": "action order [0-9]*: police 0x9", 75 "cmdUnderTest": "$TC actions add action police rate 90kbit burst 10k mtu 1k index 98", 78 "matchPattern": "action order [0-9]*: police 0x62 rate 90Kbit burst 10Kb mtu 1Kb", 102 …"cmdUnderTest": "$TC actions add action police rate 90kbit burst 10k mtu 2kb peakrate 100kbit inde… 105 …"matchPattern": "action order [0-9]*: police 0x3 rate 90Kbit burst 10Kb mtu 2Kb peakrate 100Kbit", 129 … "cmdUnderTest": "$TC actions add action police rate 5kbit burst 6kb peakrate 10kbit index 9", [all …]
|
/linux-6.12.1/drivers/media/platform/verisilicon/ |
D | hantro_vp8.c | 42 { 0, -9, 93, 50, -6, 0 }, 44 { 0, -6, 50, 93, -9, 0 }, 53 u32 i, j, k; in hantro_vp8_prob_update() local 84 dst[4] = entropy->mv_probs[0][8 + 9]; in hantro_vp8_prob_update() 85 dst[5] = entropy->mv_probs[0][9 + 9]; in hantro_vp8_prob_update() 86 dst[6] = entropy->mv_probs[1][8 + 9]; in hantro_vp8_prob_update() 87 dst[7] = entropy->mv_probs[1][9 + 9]; in hantro_vp8_prob_update() 91 dst[0] = entropy->mv_probs[i][j + 9 + 0]; in hantro_vp8_prob_update() 92 dst[1] = entropy->mv_probs[i][j + 9 + 1]; in hantro_vp8_prob_update() 93 dst[2] = entropy->mv_probs[i][j + 9 + 2]; in hantro_vp8_prob_update() [all …]
|
/linux-6.12.1/drivers/ata/pata_parport/ |
D | ktti.c | 42 w0(r); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9); in ktti_read_regr() 43 a = r1(); w2(0xc); b = r1(); w2(9); w2(0xc); w2(9); in ktti_read_regr() 49 int k, a, b; in ktti_read_block() local 51 for (k = 0; k < count / 2; k++) { in ktti_read_block() 52 w0(0x10); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9); in ktti_read_block() 53 a = r1(); w2(0xc); b = r1(); w2(9); in ktti_read_block() 54 buf[2*k] = j44(a, b); in ktti_read_block() 55 a = r1(); w2(0xc); b = r1(); w2(9); in ktti_read_block() 56 buf[2*k+1] = j44(a, b); in ktti_read_block() 62 int k; in ktti_write_block() local [all …]
|
D | dstr.c | 117 int k, a, b; in dstr_read_block() local 123 w0(9); in dstr_read_block() 128 for (k = 0; k < count; k++) { in dstr_read_block() 131 buf[k] = j44(a, b); in dstr_read_block() 136 for (k = 0; k < count; k++) { in dstr_read_block() 138 buf[k] = r0(); in dstr_read_block() 145 for (k = 0; k < count; k++) in dstr_read_block() 146 buf[k] = r4(); in dstr_read_block() 151 for (k = 0; k < count / 2; k++) in dstr_read_block() 152 ((u16 *)buf)[k] = r4w(); in dstr_read_block() [all …]
|
/linux-6.12.1/include/uapi/linux/ |
D | keyboard.h | 18 #define NR_SHIFT 9 38 #define KT_ASCII 9 45 #define K(t,v) (((t)<<8)|(v)) macro 49 #define K_F1 K(KT_FN,0) 50 #define K_F2 K(KT_FN,1) 51 #define K_F3 K(KT_FN,2) 52 #define K_F4 K(KT_FN,3) 53 #define K_F5 K(KT_FN,4) 54 #define K_F6 K(KT_FN,5) 55 #define K_F7 K(KT_FN,6) [all …]
|
/linux-6.12.1/fs/bcachefs/ |
D | fs-io.c | 226 subvol, 0, k, ({ in range_has_data() 227 bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k); in range_has_data() 294 for (i = round_up(start_offset, block_bytes(c)) >> 9; in __bch2_truncate_folio() 295 i < round_down(end_offset, block_bytes(c)) >> 9; in __bch2_truncate_folio() 520 block_start >> 9, block_end >> 9, in bchfs_fpunch() 565 ret = bch2_fcollapse_finsert(c, inode_inum(inode), offset >> 9, len >> 9, in bchfs_fcollapse_finsert() 593 struct bkey_s_c k; in __bchfs_fallocate() local 611 k = bch2_btree_iter_peek_slot(&iter); in __bchfs_fallocate() 612 if ((ret = bkey_err(k))) in __bchfs_fallocate() 616 hole_end = bpos_min(k.k->p, end_pos).offset; in __bchfs_fallocate() [all …]
|
D | io_read.c | 93 static inline int should_promote(struct bch_fs *c, struct bkey_s_c k, in should_promote() argument 105 if (bch2_bkey_has_target(c, k, opts.promote_target)) in should_promote() 108 if (bkey_extent_is_unwritten(k)) in should_promote() 165 struct bkey_s_c k, in __promote_alloc() argument 206 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) { in __promote_alloc() 233 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in __promote_alloc() 246 btree_id, k); in __promote_alloc() 274 struct bkey_s_c k, in promote_alloc() argument 296 ? bkey_start_pos(k.k) in promote_alloc() 297 : POS(k.k->p.inode, iter.bi_sector); in promote_alloc() [all …]
|
D | io_write.c | 168 new->k.p, BTREE_ITER_slots, old, ret) { in bch2_sum_sector_overwrites() 169 s64 sectors = min(new->k.p.offset, old.k->p.offset) - in bch2_sum_sector_overwrites() 170 max(bkey_start_offset(&new->k), in bch2_sum_sector_overwrites() 171 bkey_start_offset(old.k)); in bch2_sum_sector_overwrites() 174 (bkey_extent_is_allocation(&new->k) - in bch2_sum_sector_overwrites() 175 bkey_extent_is_allocation(old.k)); in bch2_sum_sector_overwrites() 178 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot in bch2_sum_sector_overwrites() 183 (new->k.p.snapshot != old.k->p.snapshot || in bch2_sum_sector_overwrites() 188 if (bkey_ge(old.k->p, new->k.p)) in bch2_sum_sector_overwrites() 215 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, in bch2_extent_update_i_size_sectors() local [all …]
|
D | fs-io-pagecache.c | 146 static unsigned bkey_to_sector_state(struct bkey_s_c k) in bkey_to_sector_state() argument 148 if (bkey_extent_is_reservation(k)) in bkey_to_sector_state() 150 if (bkey_extent_is_allocation(k.k)) in bkey_to_sector_state() 205 inum.subvol, BTREE_ITER_slots, k, ({ in bch2_folio_set() 206 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k); in bch2_folio_set() 207 unsigned state = bkey_to_sector_state(k); in bch2_folio_set() 213 unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) - in bch2_folio_set() 215 unsigned folio_len = min(k.k->p.offset, folio_end) - in bch2_folio_set() 218 BUG_ON(k.k->p.offset < folio_start); in bch2_folio_set() 219 BUG_ON(bkey_start_offset(k.k) > folio_end); in bch2_folio_set() [all …]
|
D | move.c | 39 static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k, in trace_move_extent2() argument 46 bch2_bkey_val_to_text(&buf, c, k); in trace_move_extent2() 54 static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k) in trace_move_extent_read2() argument 59 bch2_bkey_val_to_text(&buf, c, k); in trace_move_extent_read2() 124 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(io->write.k.k)); in move_write() 246 struct bkey_s_c k, in bch2_move_extent() argument 252 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_move_extent() 256 unsigned sectors = k.k->size, pages; in bch2_move_extent() 259 trace_move_extent2(c, k, &io_opts, &data_opts); in bch2_move_extent() 264 bch2_data_update_opts_normalize(k, &data_opts); in bch2_move_extent() [all …]
|
D | io_misc.c | 35 struct bkey_s_c k; in bch2_extent_fallocate() local 46 k = bch2_btree_iter_peek_slot(iter); in bch2_extent_fallocate() 47 ret = bkey_err(k); in bch2_extent_fallocate() 51 sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset); in bch2_extent_fallocate() 53 (int) bch2_bkey_nr_ptrs_fully_allocated(k)); in bch2_extent_fallocate() 63 bch2_bkey_buf_reassemble(&old, c, k); in bch2_extent_fallocate() 69 reservation = bkey_reservation_init(new.k); in bch2_extent_fallocate() 70 reservation->k.p = iter->pos; in bch2_extent_fallocate() 71 bch2_key_resize(&reservation->k, sectors); in bch2_extent_fallocate() 82 e = bkey_extent_init(new.k); in bch2_extent_fallocate() [all …]
|
D | btree_io.c | 82 struct bkey_packed *k, *p; in verify_no_dups() local 87 for (p = start, k = bkey_p_next(start); in verify_no_dups() 88 k != end; in verify_no_dups() 89 p = k, k = bkey_p_next(k)) { in verify_no_dups() 91 struct bkey r = bkey_unpack_key(b, k); in verify_no_dups() 100 struct bkey_packed *k; in set_needs_whiteout() local 102 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) in set_needs_whiteout() 103 k->needs_whiteout = v; in set_needs_whiteout() 172 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k; in bch2_sort_whiteouts() local 183 for (k = unwritten_whiteouts_start(b); in bch2_sort_whiteouts() [all …]
|
/linux-6.12.1/arch/powerpc/include/asm/book3s/64/ |
D | hash-4k.h | 5 #define H_PTE_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 4KB = 2MB 7 #define H_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 256MB = 128GB 8 #define H_PGD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 128GB = 64TB 11 * Each context is 512TB. But on 4k we restrict our max TASK size to 64TB 51 * Not supported by 4k linux page size 72 * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() 78 * With 4K page size the real_pte machinery is all nops. 98 * 4K PTE format is different from 64K PTE format. Saving the hash_slot is just 99 * a matter of returning the PTE bits that need to be modified. On 64K PTE,
|
D | radix-4k.h | 6 * For 4K page size supported index is 13/9/9/9 8 #define RADIX_PTE_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 4K = 2MB 9 #define RADIX_PMD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 2MB = 1GB 10 #define RADIX_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 1GB = 512GB
|
D | radix-64k.h | 6 * For 64K page size supported index is 13/9/9/5 8 #define RADIX_PTE_INDEX_SIZE 5 // size: 8B << 5 = 256B, maps 2^5 x 64K = 2MB 9 #define RADIX_PMD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 2MB = 1GB 10 #define RADIX_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 1GB = 512GB
|
/linux-6.12.1/net/ceph/ |
D | ceph_hash.c | 25 const unsigned char *k = (const unsigned char *)str; in ceph_str_hash_rjenkins() local 37 a = a + (k[0] + ((__u32)k[1] << 8) + ((__u32)k[2] << 16) + in ceph_str_hash_rjenkins() 38 ((__u32)k[3] << 24)); in ceph_str_hash_rjenkins() 39 b = b + (k[4] + ((__u32)k[5] << 8) + ((__u32)k[6] << 16) + in ceph_str_hash_rjenkins() 40 ((__u32)k[7] << 24)); in ceph_str_hash_rjenkins() 41 c = c + (k[8] + ((__u32)k[9] << 8) + ((__u32)k[10] << 16) + in ceph_str_hash_rjenkins() 42 ((__u32)k[11] << 24)); in ceph_str_hash_rjenkins() 44 k = k + 12; in ceph_str_hash_rjenkins() 52 c = c + ((__u32)k[10] << 24); in ceph_str_hash_rjenkins() 55 c = c + ((__u32)k[9] << 16); in ceph_str_hash_rjenkins() [all …]
|
/linux-6.12.1/crypto/ |
D | serpent_generic.c | 25 ({ b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b, 11); k[j] = b; }) 28 ({ x0 = k[i]; x1 = k[i+1]; x2 = k[i+2]; x3 = k[i+3]; }) 31 ({ k[i] = x0; k[i+1] = x1; k[i+2] = x2; k[i+3] = x3; }) 36 #define K(x0, x1, x2, x3, i) ({ \ macro 37 x3 ^= k[4*(i)+3]; x2 ^= k[4*(i)+2]; \ 38 x1 ^= k[4*(i)+1]; x0 ^= k[4*(i)+0]; \ 48 x0 ^= x3; x2 ^= x4; x3 ^= k[4*i+3]; \ 49 x1 ^= k[4*i+1]; x0 = rol32(x0, 5); x2 = rol32(x2, 22);\ 50 x0 ^= k[4*i+0]; x2 ^= k[4*i+2]; \ 54 x0 ^= k[4*i+0]; x1 ^= k[4*i+1]; x2 ^= k[4*i+2]; \ [all …]
|
D | sm3.c | 15 static const u32 ____cacheline_aligned K[64] = { variable 44 b = rol32((b), 9); \ 61 #define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17)) 67 ^ W[(i-9) & 0x0f] \ 85 R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4)); in sm3_transform() 86 R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5)); in sm3_transform() 87 R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6)); in sm3_transform() 88 R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7)); in sm3_transform() 89 R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8)); in sm3_transform() 90 R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9)); in sm3_transform() [all …]
|
D | cast5_generic.c | 324 * Rounds 3, 6, 9, 12, and 15 use f function Type 3. in __cast5_encrypt() 336 t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); in __cast5_encrypt() 379 t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); in __cast5_decrypt() 400 static void key_schedule(u32 *x, u32 *z, u32 *k) in key_schedule() argument 411 s5[xi(9)]; in key_schedule() 412 z[3] = x[1] ^ s5[zi(10)] ^ s6[zi(9)] ^ s7[zi(11)] ^ sb8[zi(8)] ^ in key_schedule() 414 k[0] = s5[zi(8)] ^ s6[zi(9)] ^ s7[zi(7)] ^ sb8[zi(6)] ^ s5[zi(2)]; in key_schedule() 415 k[1] = s5[zi(10)] ^ s6[zi(11)] ^ s7[zi(5)] ^ sb8[zi(4)] ^ in key_schedule() 417 k[2] = s5[zi(12)] ^ s6[zi(13)] ^ s7[zi(3)] ^ sb8[zi(2)] ^ in key_schedule() 418 s7[zi(9)]; in key_schedule() [all …]
|
D | md4.c | 63 #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) argument 64 #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s)) argument 65 #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s)) argument 85 ROUND1(d, a, b, c, in[9], 7); in md4_transform() 95 ROUND2(c, d, a, b, in[8], 9); in md4_transform() 99 ROUND2(c, d, a, b, in[9], 9); in md4_transform() 103 ROUND2(c, d, a, b, in[10], 9); in md4_transform() 107 ROUND2(c, d, a, b, in[11], 9); in md4_transform() 111 ROUND3(d, a, b, c, in[8], 9); in md4_transform() 115 ROUND3(d, a, b, c, in[10], 9); in md4_transform() [all …]
|
/linux-6.12.1/tools/testing/selftests/bpf/progs/ |
D | test_jhash.h | 39 const unsigned char *k = key; in jhash() local 44 a += *(volatile u32 *)(k); in jhash() 45 b += *(volatile u32 *)(k + 4); in jhash() 46 c += *(volatile u32 *)(k + 8); in jhash() 49 k += 12; in jhash() 52 case 12: c += (u32)k[11]<<24; in jhash() 53 case 11: c += (u32)k[10]<<16; in jhash() 54 case 10: c += (u32)k[9]<<8; in jhash() 55 case 9: c += k[8]; in jhash() 56 case 8: b += (u32)k[7]<<24; in jhash() [all …]
|
/linux-6.12.1/include/trace/events/ |
D | bcache.h | 30 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 42 TP_PROTO(struct bkey *k), 43 TP_ARGS(k), 53 __entry->inode = KEY_INODE(k); 54 __entry->offset = KEY_OFFSET(k); 55 __entry->size = KEY_SIZE(k); 56 __entry->dirty = KEY_DIRTY(k); 104 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 139 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 170 __entry->nr_sector = bio->bi_iter.bi_size >> 9; [all …]
|
/linux-6.12.1/drivers/media/usb/pwc/ |
D | pwc-dec23.c | 92 int compression_mode, j, k, bit, pw; in build_table_color() local 104 for (k = 0; k < 16; k++) { in build_table_color() 105 if (k == 0) in build_table_color() 107 else if (k >= 1 && k < 3) in build_table_color() 109 else if (k >= 3 && k < 6) in build_table_color() 111 else if (k >= 6 && k < 10) in build_table_color() 112 bit = (r[0] >> 9) & 7; in build_table_color() 113 else if (k >= 10 && k < 13) in build_table_color() 115 else if (k >= 13 && k < 15) in build_table_color() 119 if (k == 0) in build_table_color() [all …]
|
/linux-6.12.1/fs/smb/common/ |
D | cifs_md4.c | 51 #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) argument 52 #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s)) argument 53 #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s)) argument 73 ROUND1(d, a, b, c, in[9], 7); in md4_transform() 83 ROUND2(c, d, a, b, in[8], 9); in md4_transform() 87 ROUND2(c, d, a, b, in[9], 9); in md4_transform() 91 ROUND2(c, d, a, b, in[10], 9); in md4_transform() 95 ROUND2(c, d, a, b, in[11], 9); in md4_transform() 99 ROUND3(d, a, b, c, in[8], 9); in md4_transform() 103 ROUND3(d, a, b, c, in[10], 9); in md4_transform() [all …]
|
/linux-6.12.1/drivers/md/dm-vdo/ |
D | murmurhash3.c | 22 static __always_inline u64 fmix64(u64 k) in fmix64() argument 24 k ^= k >> 33; in fmix64() 25 k *= 0xff51afd7ed558ccdLLU; in fmix64() 26 k ^= k >> 33; in fmix64() 27 k *= 0xc4ceb9fe1a85ec53LLU; in fmix64() 28 k ^= k >> 33; in fmix64() 30 return k; in fmix64() 100 k2 ^= ((u64)tail[9]) << 8; in murmurhash3_128() 102 case 9: in murmurhash3_128()
|