Home
last modified time | relevance | path

Searched full:reserve (Results 1 – 25 of 1567) sorted by relevance

12345678910>>...63

/linux-6.12.1/tools/testing/selftests/mm/
Dcharge_reserved_hugetlb.sh161 local reserve="$9"
187 "$cgroup" "$path" "$method" "$private" "-l" "$reserve" 2>&1 | tee $output &
209 elif [[ "$reserve" != "-n" ]]; then
220 "$cgroup" "$path" "$method" "$private" "$reserve"
223 if [[ "$reserve" != "-n" ]]; then
279 local reserve="${10}"
297 "$reserve"
327 local reserve="${15}"
351 "$expect_failure" "$reserve"
368 "$expect_failure" "$reserve"
[all …]
Dwrite_to_hugetlbfs.c77 int reserve = 1; in main() local
116 reserve = 0; in main()
161 if (!reserve) in main()
164 printf("RESERVE mapping.\n"); in main()
176 (reserve ? 0 : MAP_NORESERVE), in main()
190 (reserve ? 0 : MAP_NORESERVE), in main()
Dwrite_hugetlb_memory.sh14 reserve=$9
23 "$private" "$want_sleep" "$reserve"
/linux-6.12.1/fs/btrfs/
Dblock-rsv.c18 * currently reserved for this block reserve.
24 * -> Reserve
62 * to make our extent tree updates. This block reserve acts as an overflow
63 * buffer in case our delayed refs reserve does not reserve enough space to
79 * reservation. We use the transaction items and pre-reserve space for every
85 * the most dynamic block reserve in the system, we will try to refill this
86 * block reserve first with any excess returned by any other block reserve.
89 * This is the fallback block reserve to make us try to reserve space if we
92 * content to just reserve space from the space_info on demand.
96 * allocate a block reserve, set it to some size, and then truncate bytes
[all …]
Dblock-rsv.h38 /* Block reserve type, one of BTRFS_BLOCK_RSV_* */
46 * reserve.
103 * Fast path to check if the reserve is full, may be carefully used outside of
112 * Get the reserved mount of a block reserve in a context where getting a stale
128 * Get the size of a block reserve in a context where getting a stale value is
Ddelalloc-space.c22 * -> Reserve
78 * necessary, either by attempting to reserve more space, or freeing up excess
88 * -> reserve
151 /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */ in btrfs_check_data_free_space()
343 * over-reserve slightly, and clean up the mess when we are done. in btrfs_delalloc_reserve_metadata()
419 * When we reserve space we increase outstanding_extents for the extents we may
443 * Reserve data and metadata space for delalloc
453 * - reserve space in data space info for num bytes and reserve precious
457 * - reserve space for metadata space, based on the number of outstanding
458 * extents and how much csums will be needed also reserve metadata space in a
[all …]
/linux-6.12.1/arch/arm/mach-omap2/
Dboard-generic.c56 .reserve = omap_reserve,
73 .reserve = omap_reserve,
119 .reserve = rx51_reserve,
137 .reserve = omap_reserve,
154 .reserve = omap_reserve,
171 .reserve = omap_reserve,
187 .reserve = omap_reserve,
206 .reserve = omap_reserve,
223 .reserve = omap_reserve,
241 .reserve = omap_reserve,
[all …]
/linux-6.12.1/rust/kernel/alloc/
Dvec_ext.rs64 /// v.reserve(10, GFP_KERNEL)?;
68 /// v.reserve(10, GFP_KERNEL)?;
74 fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError>; in reserve() method
80 <Self as VecExt<_>>::reserve(&mut v, capacity, flags)?; in with_capacity()
85 <Self as VecExt<_>>::reserve(self, 1, flags)?; in push()
91 // `reserve` above. in push()
100 <Self as VecExt<_>>::reserve(self, other.len(), flags)?; in extend_from_slice()
107 // of the previous call to `reserve` above. in extend_from_slice()
113 fn reserve(&mut self, additional: usize, _flags: Flags) -> Result<(), AllocError> { in reserve() method
114 Vec::reserve(self, additional); in reserve()
[all …]
/linux-6.12.1/arch/powerpc/platforms/pseries/
Dvio.c81 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
94 struct vio_cmo_pool reserve; member
154 /* Determine the amount of free entitlement available in reserve */ in vio_cmo_alloc()
184 * the reserve pool is used to reduce device entitlement, the excess
185 * pool is used to increase the reserve pool toward the desired entitlement
211 /* Spare is a subset of the reserve pool, replenish it first. */ in vio_cmo_dealloc()
215 * Replenish the spare in the reserve pool from the excess pool. in vio_cmo_dealloc()
216 * This moves entitlement into the reserve pool. in vio_cmo_dealloc()
221 vio_cmo.reserve.size += tmp; in vio_cmo_dealloc()
229 * Replenish the spare in the reserve pool from the reserve pool. in vio_cmo_dealloc()
[all …]
/linux-6.12.1/drivers/scsi/
Dconstants.c44 "Reserve(6)",
68 "Reserve track", "Send OPC info", "Mode Select(10)",
69 /* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue",
71 /* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in",
72 "Persistent reserve out",
160 {0x0, "Persistent reserve in, read keys"},
161 {0x1, "Persistent reserve in, read reservation"},
162 {0x2, "Persistent reserve in, report capabilities"},
163 {0x3, "Persistent reserve in, read full status"},
168 {0x0, "Persistent reserve out, register"},
[all …]
/linux-6.12.1/fs/xfs/
Dxfs_fsops.c218 * Reserve AG metadata blocks. ENOSPC here does not mean there in xfs_growfs_data_private()
350 * Reserve the requested number of blocks if available. Otherwise return
372 * retry if we end up trying to reserve more space than is available. in xfs_reserve_blocks()
399 * If the request is larger than the current reservation, reserve the in xfs_reserve_blocks()
400 * blocks before we update the reserve counters. Sample m_fdblocks and in xfs_reserve_blocks()
404 * fdblocks to stash in the reserve pool. This is a classic TOCTOU in xfs_reserve_blocks()
406 * m_sb_lock. Set the reserve size even if there's not enough free in xfs_reserve_blocks()
408 * reserve when it can. in xfs_reserve_blocks()
418 * here - we don't want to reserve the extra reserve blocks in xfs_reserve_blocks()
419 * from the reserve. in xfs_reserve_blocks()
[all …]
/linux-6.12.1/arch/x86/include/asm/trace/
Dirq_vectors.h271 bool reserve),
273 TP_ARGS(irq, is_managed, can_reserve, reserve),
279 __field( bool, reserve )
286 __entry->reserve = reserve;
289 TP_printk("irq=%u is_managed=%d can_reserve=%d reserve=%d",
291 __entry->reserve)
297 bool can_reserve, bool reserve), \
298 TP_ARGS(irq, is_managed, can_reserve, reserve), NULL, NULL); \
/linux-6.12.1/tools/testing/selftests/bpf/benchs/
Drun_bench_ringbufs.sh34 header "Ringbuf back-to-back, reserve+commit vs output"
35 summarize "reserve" "$($RUN_RB_BENCH --rb-b2b rb-custom)"
38 header "Ringbuf sampled, reserve+commit vs output"
39 summarize "reserve-sampled" "$($RUN_RB_BENCH --rb-sampled rb-custom)"
/linux-6.12.1/arch/x86/kernel/
Debda.c27 * to reserve the EBDA area in the 'RAM size' value ...
30 * chipset: reserve a page before VGA to prevent PCI prefetch
90 * then also reserve everything from the EBDA start address up to in reserve_bios_regions()
96 /* Reserve all memory between bios_start and the 1MB mark: */ in reserve_bios_regions()
/linux-6.12.1/arch/powerpc/include/asm/
Dkdump.h9 /* How many bytes to reserve at zero for kdump. The reserve limit should
11 * Reserve to the end of the FWNMI area, see head_64.S */
/linux-6.12.1/Documentation/mm/
Dhugetlbfs_reserv.rst19 'reserve' huge pages at mmap() time to ensure that huge pages would be
21 describe how huge page reserve processing is done in the v4.10 kernel.
38 Reserve Map
39 A reserve map is described by the structure::
50 There is one reserve map for each huge page mapping in the system.
90 Specifically, vma->vm_private_data. This reserve map is created at the
239 The routine vma_commit_reservation() is then called to adjust the reserve
243 in the reserve map already existed so no change is made. However, if there
247 It is possible that the reserve map could have been changed between the call
254 a race is detected, the subpool and global reserve counts are adjusted to
[all …]
/linux-6.12.1/arch/um/kernel/
Dphysmem.c83 unsigned long reserve = reserve_end - start; in setup_physmem() local
84 long map_size = len - reserve; in setup_physmem()
89 reserve, len); in setup_physmem()
95 err = os_map_memory((void *) reserve_end, physmem_fd, reserve, in setup_physmem()
113 memblock_reserve(__pa(start), reserve); in setup_physmem()
/linux-6.12.1/include/linux/
Drelay.h98 * NOTE: the client can reserve bytes at the beginning of the new
232 * relay_reserve - reserve slot in channel buffer
234 * @length: number of bytes to reserve
261 * subbuf_start_reserve - reserve bytes at the start of a sub-buffer
263 * @length: number of bytes to reserve
265 * Helper function used to reserve bytes at the beginning of
/linux-6.12.1/include/drm/ttm/
Dttm_execbuf_util.h57 * @ticket: ww_acquire_ctx from reserve call
75 * Tries to reserve bos pointed to by the list entries for validation.
80 * reserve the same buffers in reverse order, all threads except one will
106 * @ticket: ww_acquire_ctx from reserve call
/linux-6.12.1/drivers/gpu/drm/tests/
Ddrm_mm_test.c49 "Expected to find no holes (after reserve), found %lu instead\n", count); in assert_no_holes()
168 KUNIT_FAIL(test, "failed to reserve whole drm_mm\n"); in drm_test_mm_init()
204 "failed to reserve node[0] {start=%lld, size=%lld)\n", in drm_test_mm_debug()
210 "failed to reserve node[0] {start=%lld, size=%lld)\n", in drm_test_mm_debug()
299 KUNIT_FAIL(test, "Could not reserve low node\n"); in drm_test_mm_once()
307 KUNIT_FAIL(test, "Could not reserve low node\n"); in drm_test_mm_once()
/linux-6.12.1/drivers/md/bcache/
Dalloc.c334 /* Prios/gens are actually the most important reserve */ in bch_allocator_push()
419 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait) in bch_bucket_alloc() argument
432 fifo_pop(&ca->free[reserve], r)) in bch_bucket_alloc()
436 trace_bcache_alloc_fail(ca, reserve); in bch_bucket_alloc()
448 !fifo_pop(&ca->free[reserve], r)); in bch_bucket_alloc()
455 trace_bcache_alloc(ca, reserve); in bch_bucket_alloc()
478 if (reserve <= RESERVE_PRIO) { in bch_bucket_alloc()
515 int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, in __bch_bucket_alloc_set() argument
530 b = bch_bucket_alloc(ca, reserve, wait); in __bch_bucket_alloc_set()
543 int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, in bch_bucket_alloc_set() argument
[all …]
/linux-6.12.1/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/
Dctrl90f1.h46 * [in] Page size (VA coverage) of the level to reserve.
53 * [in] First GPU virtual address of the range to reserve.
59 * [in] Last GPU virtual address of the range to reserve.
/linux-6.12.1/drivers/dax/
Dpmem.c38 /* reserve the metadata area, device-dax will reserve the data */ in __dax_pmem_probe()
44 dev_warn(dev, "could not reserve metadata\n"); in __dax_pmem_probe()
/linux-6.12.1/arch/arm/mach-qcom/
DKconfig19 bool "Reserve SMEM at the beginning of RAM"
21 Reserve 2MB at the beginning of the System RAM for shared mem.
/linux-6.12.1/drivers/gpu/drm/amd/pm/swsmu/inc/
Dsmu_v14_0_2_pptable.h126 …uint8_t reserve[3]; // Zero filled field r… member
165 …uint8_t reserve[143]; // Zero filled field reserved for future u… member
184 uint8_t reserve[3]; member
198 uint32_t reserve[8]; member

12345678910>>...63