Home
last modified time | relevance | path

Searched refs:allocation (Results 1 – 25 of 355) sorted by relevance

12345678910>>...15

/linux-6.12.1/drivers/acpi/acpica/
Duttrack.c32 *allocation);
94 struct acpi_debug_mem_block *allocation; in acpi_ut_allocate_and_track() local
105 allocation = in acpi_ut_allocate_and_track()
107 if (!allocation) { in acpi_ut_allocate_and_track()
118 acpi_ut_track_allocation(allocation, size, ACPI_MEM_MALLOC, in acpi_ut_allocate_and_track()
121 acpi_os_free(allocation); in acpi_ut_allocate_and_track()
135 return ((void *)&allocation->user_space); in acpi_ut_allocate_and_track()
157 struct acpi_debug_mem_block *allocation; in acpi_ut_allocate_zeroed_and_track() local
168 allocation = in acpi_ut_allocate_zeroed_and_track()
171 if (!allocation) { in acpi_ut_allocate_zeroed_and_track()
[all …]
Dutalloc.c33 void *allocation; in acpi_os_allocate_zeroed() local
37 allocation = acpi_os_allocate(size); in acpi_os_allocate_zeroed()
38 if (allocation) { in acpi_os_allocate_zeroed()
42 memset(allocation, 0, size); in acpi_os_allocate_zeroed()
45 return (allocation); in acpi_os_allocate_zeroed()
/linux-6.12.1/drivers/md/dm-vdo/
Dphysical-zone.c483 static int allocate_and_lock_block(struct allocation *allocation) in allocate_and_lock_block() argument
488 VDO_ASSERT_LOG_ONLY(allocation->lock == NULL, in allocate_and_lock_block()
491 result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn); in allocate_and_lock_block()
495 result = vdo_attempt_physical_zone_pbn_lock(allocation->zone, allocation->pbn, in allocate_and_lock_block()
496 allocation->write_lock_type, &lock); in allocate_and_lock_block()
504 (unsigned long long) allocation->pbn, in allocate_and_lock_block()
510 allocation->lock = lock; in allocate_and_lock_block()
525 data_vio->allocation.wait_for_clean_slab = false; in retry_allocation()
526 data_vio->allocation.first_allocation_zone = data_vio->allocation.zone->zone_number; in retry_allocation()
541 struct allocation *allocation = &data_vio->allocation; in continue_allocating() local
[all …]
Dpacker.c258 struct allocation *allocation) in release_compressed_write_waiter() argument
261 .pbn = allocation->pbn, in release_compressed_write_waiter()
262 .zone = allocation->zone, in release_compressed_write_waiter()
266 vdo_share_compressed_write_lock(data_vio, allocation->lock); in release_compressed_write_waiter()
267 update_metadata_for_data_vio_write(data_vio, allocation->lock); in release_compressed_write_waiter()
289 release_compressed_write_waiter(client, &agent->allocation); in finish_compressed_write()
293 release_compressed_write_waiter(agent, &agent->allocation); in finish_compressed_write()
299 struct allocation *allocation = &agent->allocation; in handle_compressed_write_error() local
302 if (vdo_requeue_completion_if_needed(completion, allocation->zone->thread_id)) in handle_compressed_write_error()
307 (unsigned long long) allocation->pbn); in handle_compressed_write_error()
[all …]
Ddata-vio.c498 memset(&data_vio->allocation, 0, sizeof(data_vio->allocation)); in launch_data_vio()
1280 VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL, in finish_cleanup()
1363 (unsigned long long) data_vio->allocation.pbn, in enter_read_only_mode()
1407 struct allocation *allocation = &data_vio->allocation; in data_vio_allocate_data_block() local
1409 VDO_ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK), in data_vio_allocate_data_block()
1411 allocation->write_lock_type = write_lock_type; in data_vio_allocate_data_block()
1412 allocation->zone = vdo_get_next_allocation_zone(data_vio->logical.zone); in data_vio_allocate_data_block()
1413 allocation->first_allocation_zone = allocation->zone->zone_number; in data_vio_allocate_data_block()
1427 struct allocation *allocation = &data_vio->allocation; in release_data_vio_allocation_lock() local
1428 physical_block_number_t locked_pbn = allocation->pbn; in release_data_vio_allocation_lock()
[all …]
Ddump.c163 wait_on, data_vio, data_vio->allocation.pbn, data_vio->logical.lbn, in dump_vio_waiters()
169 data_vio, data_vio->allocation.pbn, data_vio->logical.lbn, in dump_vio_waiters()
239 data_vio->allocation.pbn, data_vio->logical.lbn, in dump_data_vio()
244 data_vio->allocation.pbn, data_vio->logical.lbn); in dump_data_vio()
Ddata-vio.h144 struct allocation { struct
209 struct allocation allocation; member
320 return (data_vio->allocation.pbn != VDO_ZERO_BLOCK); in data_vio_has_allocation()
430 thread_id_t expected = data_vio->allocation.zone->thread_id; in assert_data_vio_in_allocated_zone()
435 (unsigned long long) data_vio->allocation.pbn, thread_id, in assert_data_vio_in_allocated_zone()
443 data_vio->allocation.zone->thread_id); in set_data_vio_allocated_zone_callback()
/linux-6.12.1/tools/testing/selftests/resctrl/
Dmba_test.c42 static int runs_per_allocation, allocation = 100; in mba_setup() local
53 if (allocation < ALLOCATION_MIN || allocation > ALLOCATION_MAX) in mba_setup()
56 sprintf(allocation_str, "%d", allocation); in mba_setup()
62 allocation -= ALLOCATION_STEP; in mba_setup()
75 int allocation, runs; in show_mba_info() local
80 for (allocation = 0; allocation < ALLOCATION_MAX / ALLOCATION_STEP; in show_mba_info()
81 allocation++) { in show_mba_info()
91 for (runs = NUM_OF_RUNS * allocation + 1; in show_mba_info()
92 runs < NUM_OF_RUNS * allocation + NUM_OF_RUNS ; runs++) { in show_mba_info()
106 ALLOCATION_MAX - ALLOCATION_STEP * allocation); in show_mba_info()
/linux-6.12.1/mm/
Ddmapool.c57 unsigned int allocation; member
127 if ((dma - page->dma) < pool->allocation) in pool_find_page()
161 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); in pool_init_page()
228 size_t allocation; in dma_pool_create() local
245 allocation = max_t(size_t, size, PAGE_SIZE); in dma_pool_create()
248 boundary = allocation; in dma_pool_create()
252 boundary = min(boundary, allocation); in dma_pool_create()
266 retval->allocation = allocation; in dma_pool_create()
306 while (offset + pool->size <= pool->allocation) { in pool_initialise_page()
342 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
[all …]
/linux-6.12.1/include/linux/
Ddmapool.h22 size_t size, size_t align, size_t allocation);
34 size_t size, size_t align, size_t allocation);
39 struct device *dev, size_t size, size_t align, size_t allocation) in dma_pool_create() argument
47 struct device *dev, size_t size, size_t align, size_t allocation) in dmam_pool_create() argument
/linux-6.12.1/Documentation/core-api/
Dmemory-allocation.rst7 Linux provides a variety of APIs for memory allocation. You can
14 Most of the memory allocation APIs use GFP flags to express how that
16 pages", the underlying memory allocation function.
18 Diversity of the allocation APIs combined with the numerous GFP flags
26 Of course there are cases when other allocation APIs and different GFP
45 * If the allocation is performed from an atomic context, e.g interrupt
48 ``GFP_NOWAIT`` allocation is likely to fail. Users of this flag need
52 will be stressed unless allocation succeeds, you may use ``GFP_ATOMIC``.
67 example may be a hardware allocation that maps data directly into
94 * ``GFP_KERNEL & ~__GFP_RECLAIM`` - optimistic allocation without _any_
[all …]
Dgenalloc.rst4 There are a number of memory-allocation subsystems in the kernel, each
32 which NUMA node should be used for the allocation of the housekeeping
87 how the allocation functions choose which specific piece of memory to
107 - gen_pool_first_fit_align forces the allocation to have a specific
110 - gen_pool_first_fit_order_align aligns the allocation to the order of the
111 size. A 60-byte allocation will thus be 64-byte aligned, for example.
117 If the indicated memory is not available the allocation fails.
/linux-6.12.1/Documentation/trace/
Devents-kmem.rst5 The kmem tracing system captures events related to object and page allocation
8 - Slab allocation of small objects of unknown type (kmalloc)
9 - Slab allocation of small objects of known type
10 - Page allocation
17 1. Slab allocation of small objects of unknown type
27 internal fragmented as a result of the allocation pattern. By correlating
29 the allocation sites were.
32 2. Slab allocation of small objects of known type
45 3. Page allocation
54 These four events deal with page allocation and freeing. mm_page_alloc is
[all …]
/linux-6.12.1/Documentation/mm/
Dallocation-profiling.rst23 When set to "never", memory allocation profiling overhead is minimized and it
52 Memory allocation profiling builds off of code tagging, which is a library for
57 To add accounting for an allocation call, we replace it with a macro
61 - calls the real allocation function
66 do not properly belong to the outer allocation context and should be counted
70 Thus, proper usage requires determining which function in an allocation call
77 - switch its allocation call to the _noprof() version, e.g. kmalloc_noprof()
93 - Hook your data structure's init function, like any other allocation function.
Dpage_frags.rst11 simple allocation framework for page fragments. This is used by the
17 cache is needed. This provides a central point for the fragment allocation
20 which can be expensive at allocation time. However due to the nature of
23 to be disabled when executing the fragment allocation.
26 allocation. The netdev_alloc_cache is used by callers making use of the
41 avoid calling get_page per allocation.
Dsplit_page_table_lock.rst30 PTE with pointer to its lock, or returns NULL if allocation failed;
62 must be called on PTE table allocation / freeing.
65 allocation: slab uses page->slab_cache for its pages.
72 allocation and pagetable_pmd_dtor() on freeing.
75 pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing
Dbalance.rst12 be that the caller is willing to fail the allocation without incurring the
14 allocation requests that have order-0 fallback options. In such cases,
17 __GFP_IO allocation requests are made to prevent file system deadlocks.
19 In the absence of non sleepable allocation requests, it seems detrimental
46 with a slight change in the allocation routine, it is possible to reduce
74 probably because all allocation requests are coming from intr context
88 watermark[WMARK_HIGH]. When low_on_memory is set, page allocation requests will
Dpage_owner.rst10 When allocation happens, information about allocation such as call stack
15 Although we already have tracepoint for tracing page allocation/free,
30 allocation and free operation.
38 the page allocator hotpath and if not enabled, then allocation is done
40 not affect to allocation performance, especially if the static keys jump
155 -a Sort by memory allocation time.
221 st stacktrace stack trace of the page allocation
234 st stacktrace stack trace of the page allocation
Dnuma.rst75 selected zone/node cannot satisfy the allocation request. This situation,
87 By default, Linux will attempt to satisfy memory allocation requests from the
90 for the node where the request originates. This is called "local allocation."
95 Local allocation will tend to keep subsequent access to the allocated memory
110 allocation behavior using Linux NUMA memory policy. [see
127 Some kernel allocations do not want or cannot tolerate this allocation fallback
132 A typical model for making such an allocation is to obtain the node id of the
135 the node id returned. When such an allocation fails, the requesting subsystem
138 itself on allocation failure. The kernel profiling subsystem is an example of
/linux-6.12.1/Documentation/filesystems/ext4/
Dbigalloc.rst15 use clustered allocation, so that each bit in the ext4 block allocation
19 This means that each bit in the block allocation bitmap now addresses
20 256 4k blocks. This shrinks the total size of the block allocation
29 128MiB); however, the minimum allocation unit becomes a cluster, not a
/linux-6.12.1/tools/testing/memblock/
DREADME17 allocation functionalities of memblock. The main data structure of the boot time
58 allocation functions. Tests for each group are defined in dedicated files, as it
82 Some allocation functions clear the memory in the process, so it is required for
85 points to a block of memory allocated via malloc. For each group of allocation
87 at the end of the test run. The structure of a test runner checking allocation
95 (...allocation checks...)
/linux-6.12.1/Documentation/admin-guide/mm/
Dnuma_memory_policy.rst38 use "local allocation" described below. However, during boot
82 A VMA policy will govern the allocation of pages that back
138 support allocation at fault time--a.k.a lazy allocation--so hugetlbfs
140 Although hugetlbfs segments now support lazy allocation, their support
197 closest to the node where the allocation takes place.
200 This mode specifies that the allocation should be attempted
202 allocation fails, the kernel will search other nodes, in order
209 and the policy is interpreted as local allocation. "Local"
210 allocation policy can be viewed as a Preferred policy that
211 starts at the node containing the cpu where the allocation
[all …]
/linux-6.12.1/Documentation/admin-guide/
Dnumastat.rst12 the preferred node and numa_miss on the node where allocation succeeded.
17 incremented on allocation from a node by CPU on the same node. other_node is
18 similar to numa_miss and is incremented on the node where allocation succeeds
53 preferred node. As a result, such allocation will not increase the numa_foreign
/linux-6.12.1/drivers/staging/gdm724x/
DTODO10 - Review use of atomic allocation for tx structs
12 - fix up static tty port allocation to be dynamic
/linux-6.12.1/drivers/soundwire/
DMakefile11 soundwire-generic-allocation-objs := generic_bandwidth_allocation.o
12 obj-$(CONFIG_SOUNDWIRE_GENERIC_ALLOCATION) += soundwire-generic-allocation.o

12345678910>>...15