Home
last modified time | relevance | path

Searched full:partial (Results 1 – 25 of 1351) sorted by relevance

12345678910>>...55

/linux-6.12.1/fs/minix/
Ditree_common.c158 Indirect *partial; in get_block() local
166 partial = get_branch(inode, depth, offsets, chain, &err); in get_block()
169 if (!partial) { in get_block()
173 partial = chain+depth-1; /* the whole chain */ in get_block()
180 while (partial > chain) { in get_block()
181 brelse(partial->bh); in get_block()
182 partial--; in get_block()
196 left = (chain + depth) - partial; in get_block()
197 err = alloc_branch(inode, left, offsets+(partial-chain), partial); in get_block()
201 if (splice_branch(inode, chain, partial, left) < 0) in get_block()
[all …]
/linux-6.12.1/include/crypto/
Dsha1_base.h41 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_base_do_update() local
45 if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) { in sha1_base_do_update()
48 if (partial) { in sha1_base_do_update()
49 int p = SHA1_BLOCK_SIZE - partial; in sha1_base_do_update()
51 memcpy(sctx->buffer + partial, data, p); in sha1_base_do_update()
65 partial = 0; in sha1_base_do_update()
68 memcpy(sctx->buffer + partial, data, len); in sha1_base_do_update()
79 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_base_do_finalize() local
81 sctx->buffer[partial++] = 0x80; in sha1_base_do_finalize()
82 if (partial > bit_offset) { in sha1_base_do_finalize()
[all …]
Dsm3_base.h44 unsigned int partial = sctx->count % SM3_BLOCK_SIZE; in sm3_base_do_update() local
48 if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) { in sm3_base_do_update()
51 if (partial) { in sm3_base_do_update()
52 int p = SM3_BLOCK_SIZE - partial; in sm3_base_do_update()
54 memcpy(sctx->buffer + partial, data, p); in sm3_base_do_update()
68 partial = 0; in sm3_base_do_update()
71 memcpy(sctx->buffer + partial, data, len); in sm3_base_do_update()
82 unsigned int partial = sctx->count % SM3_BLOCK_SIZE; in sm3_base_do_finalize() local
84 sctx->buffer[partial++] = 0x80; in sm3_base_do_finalize()
85 if (partial > bit_offset) { in sm3_base_do_finalize()
[all …]
Dsha256_base.h42 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in lib_sha256_base_do_update() local
46 if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { in lib_sha256_base_do_update()
49 if (partial) { in lib_sha256_base_do_update()
50 int p = SHA256_BLOCK_SIZE - partial; in lib_sha256_base_do_update()
52 memcpy(sctx->buf + partial, data, p); in lib_sha256_base_do_update()
66 partial = 0; in lib_sha256_base_do_update()
69 memcpy(sctx->buf + partial, data, len); in lib_sha256_base_do_update()
89 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in lib_sha256_base_do_finalize() local
91 sctx->buf[partial++] = 0x80; in lib_sha256_base_do_finalize()
92 if (partial > bit_offset) { in lib_sha256_base_do_finalize()
[all …]
Dsha512_base.h62 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_base_do_update() local
68 if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) { in sha512_base_do_update()
71 if (partial) { in sha512_base_do_update()
72 int p = SHA512_BLOCK_SIZE - partial; in sha512_base_do_update()
74 memcpy(sctx->buf + partial, data, p); in sha512_base_do_update()
88 partial = 0; in sha512_base_do_update()
91 memcpy(sctx->buf + partial, data, len); in sha512_base_do_update()
102 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_base_do_finalize() local
104 sctx->buf[partial++] = 0x80; in sha512_base_do_finalize()
105 if (partial > bit_offset) { in sha512_base_do_finalize()
[all …]
/linux-6.12.1/fs/sysv/
Ditree.c214 Indirect *partial; in get_block() local
222 partial = get_branch(inode, depth, offsets, chain, &err); in get_block()
225 if (!partial) { in get_block()
230 partial = chain+depth-1; /* the whole chain */ in get_block()
237 while (partial > chain) { in get_block()
238 brelse(partial->bh); in get_block()
239 partial--; in get_block()
253 left = (chain + depth) - partial; in get_block()
254 err = alloc_branch(inode, left, offsets+(partial-chain), partial); in get_block()
258 if (splice_branch(inode, chain, partial, left) < 0) in get_block()
[all …]
/linux-6.12.1/fs/ext4/
Dindirect.c244 * @partial: pointer to the last triple within a chain
252 Indirect *partial) in ext4_find_goal() argument
260 goal = ext4_find_near(inode, partial); in ext4_find_goal()
316 * we had read the existing part of chain and partial points to the last
538 Indirect *partial; in ext4_ind_map_blocks() local
554 partial = ext4_get_branch(inode, depth, offsets, chain, &err); in ext4_ind_map_blocks()
557 if (!partial) { in ext4_ind_map_blocks()
580 * Count number blocks in a subtree under 'partial'. At each in ext4_ind_map_blocks()
586 for (i = partial - chain + 1; i < depth; i++) in ext4_ind_map_blocks()
620 ar.goal = ext4_find_goal(inode, map->m_lblk, partial); in ext4_ind_map_blocks()
[all …]
/linux-6.12.1/drivers/crypto/
Dpadlock-sha.c284 unsigned int partial, done; in padlock_sha1_update_nano() local
291 partial = sctx->count & 0x3f; in padlock_sha1_update_nano()
297 if ((partial + len) >= SHA1_BLOCK_SIZE) { in padlock_sha1_update_nano()
300 if (partial) { in padlock_sha1_update_nano()
301 done = -partial; in padlock_sha1_update_nano()
302 memcpy(sctx->buffer + partial, data, in padlock_sha1_update_nano()
321 partial = 0; in padlock_sha1_update_nano()
324 memcpy(sctx->buffer + partial, src, len - done); in padlock_sha1_update_nano()
332 unsigned int partial, padlen; in padlock_sha1_final_nano() local
339 partial = state->count & 0x3f; in padlock_sha1_final_nano()
[all …]
/linux-6.12.1/arch/arm64/crypto/
Dsha3-ce-glue.c43 if ((sctx->partial + len) >= sctx->rsiz) { in sha3_update()
46 if (sctx->partial) { in sha3_update()
47 int p = sctx->rsiz - sctx->partial; in sha3_update()
49 memcpy(sctx->buf + sctx->partial, data, p); in sha3_update()
56 sctx->partial = 0; in sha3_update()
75 memcpy(sctx->buf + sctx->partial, data, len); in sha3_update()
76 sctx->partial += len; in sha3_update()
91 sctx->buf[sctx->partial++] = 0x06; in sha3_final()
92 memset(sctx->buf + sctx->partial, 0, sctx->rsiz - sctx->partial); in sha3_final()
/linux-6.12.1/arch/powerpc/crypto/
Dsha1.c30 unsigned int partial, done; in powerpc_sha1_update() local
33 partial = sctx->count & 0x3f; in powerpc_sha1_update()
38 if ((partial + len) > 63) { in powerpc_sha1_update()
40 if (partial) { in powerpc_sha1_update()
41 done = -partial; in powerpc_sha1_update()
42 memcpy(sctx->buffer + partial, data, done + 64); in powerpc_sha1_update()
52 partial = 0; in powerpc_sha1_update()
54 memcpy(sctx->buffer + partial, src, len - done); in powerpc_sha1_update()
/linux-6.12.1/Documentation/devicetree/bindings/fpga/
Dfpga-region.yaml25 FPGA Regions represent FPGA's and partial reconfiguration regions of FPGA's in
41 Partial Reconfiguration (PR)
46 Partial Reconfiguration Region (PRR)
58 * Also called a "partial bit stream"
71 * During Partial Reconfiguration of a specific region, that region's bridge
86 * A base image may set up a set of partial reconfiguration regions that may
157 For partial reconfiguration (PR), each PR region will have an FPGA Region.
194 * Partial reconfiguration with bridges in the FPGA
198 region while the buses are enabled for other sections. Before any partial
207 constraints required to make partial reconfiguration work[1] [2] [3], but a few
[all …]
/linux-6.12.1/fs/ext2/
Dinode.c325 * @partial: pointer to the last triple within a chain
331 Indirect *partial) in ext2_find_goal() argument
346 return ext2_find_near(inode, partial); in ext2_find_goal()
466 * we had read the existing part of chain and partial points to the last
632 Indirect *partial; in ext2_get_blocks() local
648 partial = ext2_get_branch(inode, depth, offsets, chain, &err); in ext2_get_blocks()
650 if (!partial) { in ext2_get_blocks()
666 partial = chain + depth - 1; in ext2_get_blocks()
696 if (err == -EAGAIN || !verify_chain(chain, partial)) { in ext2_get_blocks()
697 while (partial > chain) { in ext2_get_blocks()
[all …]
/linux-6.12.1/arch/sparc/crypto/
Dsha1_glue.c31 unsigned int len, unsigned int partial) in __sha1_sparc64_update() argument
36 if (partial) { in __sha1_sparc64_update()
37 done = SHA1_BLOCK_SIZE - partial; in __sha1_sparc64_update()
38 memcpy(sctx->buffer + partial, data, done); in __sha1_sparc64_update()
55 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_sparc64_update() local
58 if (partial + len < SHA1_BLOCK_SIZE) { in sha1_sparc64_update()
60 memcpy(sctx->buffer + partial, data, len); in sha1_sparc64_update()
62 __sha1_sparc64_update(sctx, data, len, partial); in sha1_sparc64_update()
Dmd5_glue.c47 unsigned int len, unsigned int partial) in __md5_sparc64_update() argument
52 if (partial) { in __md5_sparc64_update()
53 done = MD5_HMAC_BLOCK_SIZE - partial; in __md5_sparc64_update()
54 memcpy((u8 *)sctx->block + partial, data, done); in __md5_sparc64_update()
71 unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; in md5_sparc64_update() local
74 if (partial + len < MD5_HMAC_BLOCK_SIZE) { in md5_sparc64_update()
76 memcpy((u8 *)sctx->block + partial, data, len); in md5_sparc64_update()
78 __md5_sparc64_update(sctx, data, len, partial); in md5_sparc64_update()
Dsha512_glue.c30 unsigned int len, unsigned int partial) in __sha512_sparc64_update() argument
36 if (partial) { in __sha512_sparc64_update()
37 done = SHA512_BLOCK_SIZE - partial; in __sha512_sparc64_update()
38 memcpy(sctx->buf + partial, data, done); in __sha512_sparc64_update()
55 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_sparc64_update() local
58 if (partial + len < SHA512_BLOCK_SIZE) { in sha512_sparc64_update()
61 memcpy(sctx->buf + partial, data, len); in sha512_sparc64_update()
63 __sha512_sparc64_update(sctx, data, len, partial); in sha512_sparc64_update()
Dsha256_glue.c31 unsigned int len, unsigned int partial) in __sha256_sparc64_update() argument
36 if (partial) { in __sha256_sparc64_update()
37 done = SHA256_BLOCK_SIZE - partial; in __sha256_sparc64_update()
38 memcpy(sctx->buf + partial, data, done); in __sha256_sparc64_update()
55 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in sha256_sparc64_update() local
58 if (partial + len < SHA256_BLOCK_SIZE) { in sha256_sparc64_update()
60 memcpy(sctx->buf + partial, data, len); in sha256_sparc64_update()
62 __sha256_sparc64_update(sctx, data, len, partial); in sha256_sparc64_update()
/linux-6.12.1/Documentation/ABI/testing/
Dsysfs-kernel-slab95 allocation from a partial or new slab. It can be written to
178 The deactivate_to_head file shows how many times a partial cpu
179 slab was deactivated and added to the head of its node's partial
189 The deactivate_to_tail file shows how many times a partial cpu
190 slab was deactivated and added to the tail of its node's partial
211 partial list. It can be written to clear the current count.
254 its node's partial list. It can be written to clear the current
276 using the slow path (i.e. to a full or partial slab). It can
296 remain on a node's partial list to avoid the overhead of
325 objects are on partial slabs and from which nodes they are
[all …]
/linux-6.12.1/drivers/crypto/stm32/
Dstm32-crc32.c69 u32 partial; /* crc32c: partial in first 4 bytes of that struct */ member
136 /* Store partial result */ in stm32_crc_init()
137 ctx->partial = readl_relaxed(crc->regs + CRC_DR); in stm32_crc_init()
163 ctx->partial = crc32_le(ctx->partial, d8, length); in burst_update()
165 ctx->partial = __crc32c_le(ctx->partial, d8, length); in burst_update()
176 writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT); in burst_update()
205 /* Store partial result */ in burst_update()
206 ctx->partial = readl_relaxed(crc->regs + CRC_DR); in burst_update()
249 ~ctx->partial : ctx->partial, out); in stm32_crc_final()
/linux-6.12.1/arch/mips/cavium-octeon/crypto/
Docteon-sha1.c78 unsigned int partial; in __octeon_sha1_update() local
82 partial = sctx->count % SHA1_BLOCK_SIZE; in __octeon_sha1_update()
87 if ((partial + len) >= SHA1_BLOCK_SIZE) { in __octeon_sha1_update()
88 if (partial) { in __octeon_sha1_update()
89 done = -partial; in __octeon_sha1_update()
90 memcpy(sctx->buffer + partial, data, in __octeon_sha1_update()
101 partial = 0; in __octeon_sha1_update()
103 memcpy(sctx->buffer + partial, src, len - done); in __octeon_sha1_update()
Docteon-sha256.c70 unsigned int partial; in __octeon_sha256_update() local
74 partial = sctx->count % SHA256_BLOCK_SIZE; in __octeon_sha256_update()
79 if ((partial + len) >= SHA256_BLOCK_SIZE) { in __octeon_sha256_update()
80 if (partial) { in __octeon_sha256_update()
81 done = -partial; in __octeon_sha256_update()
82 memcpy(sctx->buf + partial, data, in __octeon_sha256_update()
93 partial = 0; in __octeon_sha256_update()
95 memcpy(sctx->buf + partial, src, len - done); in __octeon_sha256_update()
/linux-6.12.1/arch/x86/include/asm/
Dunwind.h69 * If 'partial' returns true, only the iret frame registers are valid.
72 bool *partial) in unwind_get_entry_regs() argument
77 if (partial) { in unwind_get_entry_regs()
79 *partial = !state->full_regs; in unwind_get_entry_regs()
81 *partial = false; in unwind_get_entry_regs()
89 bool *partial) in unwind_get_entry_regs() argument
/linux-6.12.1/Documentation/driver-api/md/
Draid5-ppl.rst2 Partial Parity Log
5 Partial Parity Log (PPL) is a feature available for RAID5 arrays. The issue
15 Partial parity for a write operation is the XOR of stripe data chunks not
17 write hole. XORing partial parity with the modified chunks produces parity for
26 When handling a write request PPL writes partial parity before new data and
/linux-6.12.1/tools/mm/
Dslabinfo.c36 unsigned long partial, objects, slabs, objects_partial, objects_total; member
128 "-P|--partial Sort by number of partial slabs\n" in usage()
152 "\nSorting options (--Loss, --Size, --Partial) are mutually exclusive\n" in usage()
416 printf("%-21s ", "Partial slabs"); in slab_numa()
500 printf("Add partial %8lu %8lu %3lu %3lu\n", in slab_stats()
505 printf("Remove partial %8lu %8lu %3lu %3lu\n", in slab_stats()
510 printf("Cpu partial list %8lu %8lu %3lu %3lu\n", in slab_stats()
535 printf("Moved to head of partial list %7lu %3lu%%\n", in slab_stats()
537 printf("Moved to tail of partial list %7lu %3lu%%\n", in slab_stats()
576 s->slab_size, s->slabs - s->partial - s->cpu_slabs, in report()
[all …]
/linux-6.12.1/crypto/
Dsm3.c180 unsigned int partial = sctx->count % SM3_BLOCK_SIZE; in sm3_update() local
185 if ((partial + len) >= SM3_BLOCK_SIZE) { in sm3_update()
188 if (partial) { in sm3_update()
189 int p = SM3_BLOCK_SIZE - partial; in sm3_update()
191 memcpy(sctx->buffer + partial, data, p); in sm3_update()
208 partial = 0; in sm3_update()
211 memcpy(sctx->buffer + partial, data, len); in sm3_update()
220 unsigned int partial = sctx->count % SM3_BLOCK_SIZE; in sm3_final() local
224 sctx->buffer[partial++] = 0x80; in sm3_final()
225 if (partial > bit_offset) { in sm3_final()
[all …]
/linux-6.12.1/net/smc/
Dsmc_rx.c154 struct partial_page *partial; in smc_rx_splice() local
167 partial = kcalloc(nr_pages, sizeof(*partial), GFP_KERNEL); in smc_rx_splice()
168 if (!partial) in smc_rx_splice()
184 partial[0].offset = src - (char *)smc->conn.rmb_desc->cpu_addr; in smc_rx_splice()
185 partial[0].len = len; in smc_rx_splice()
186 partial[0].private = (unsigned long)priv[0]; in smc_rx_splice()
197 partial[i].offset = offset; in smc_rx_splice()
198 partial[i].len = size; in smc_rx_splice()
199 partial[i].private = (unsigned long)priv[i]; in smc_rx_splice()
208 spd.partial = partial; in smc_rx_splice()
[all …]

12345678910>>...55