Lines Matching +full:reserved +full:- +full:memory
1 // SPDX-License-Identifier: GPL-2.0-or-later
23 * A test that tries to allocate a memory region in a specific NUMA node that
24 * has enough memory to allocate a region of the requested size.
30 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_simple_check()
31 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_simple_check()
40 ASSERT_LE(SZ_4, req_node->size); in alloc_exact_nid_top_down_numa_simple_check()
41 size = req_node->size / SZ_4; in alloc_exact_nid_top_down_numa_simple_check()
52 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_top_down_numa_simple_check()
53 ASSERT_EQ(new_rgn->base, region_end(req_node) - size); in alloc_exact_nid_top_down_numa_simple_check()
54 ASSERT_LE(req_node->base, new_rgn->base); in alloc_exact_nid_top_down_numa_simple_check()
56 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_exact_nid_top_down_numa_simple_check()
57 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_exact_nid_top_down_numa_simple_check()
65 * A test that tries to allocate a memory region in a specific NUMA node that
66 * is partially reserved but has enough memory for the allocated region:
68 * | +---------------------------------------+ |
70 * +-----------+---------------------------------------+----------+
72 * | +------------------+ +-----+ |
73 * | | reserved | | new | |
74 * +-----------+------------------+--------------+-----+----------+
82 struct memblock_region *new_rgn = &memblock.reserved.regions[1]; in alloc_exact_nid_top_down_numa_part_reserved_check()
83 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_part_reserved_check()
93 ASSERT_LE(SZ_8, req_node->size); in alloc_exact_nid_top_down_numa_part_reserved_check()
94 r1.base = req_node->base; in alloc_exact_nid_top_down_numa_part_reserved_check()
95 r1.size = req_node->size / SZ_2; in alloc_exact_nid_top_down_numa_part_reserved_check()
108 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_top_down_numa_part_reserved_check()
109 ASSERT_EQ(new_rgn->base, region_end(req_node) - size); in alloc_exact_nid_top_down_numa_part_reserved_check()
110 ASSERT_LE(req_node->base, new_rgn->base); in alloc_exact_nid_top_down_numa_part_reserved_check()
112 ASSERT_EQ(memblock.reserved.cnt, 2); in alloc_exact_nid_top_down_numa_part_reserved_check()
113 ASSERT_EQ(memblock.reserved.total_size, size + r1.size); in alloc_exact_nid_top_down_numa_part_reserved_check()
121 * A test that tries to allocate a memory region that spans over the min_addr
129 * | +-----------------------+-----------+ |
131 * +-----------+-----------------------+-----------+--------------+
133 * | +-----------+ |
135 * +-----------------------+-----------+--------------------------+
137 * Expect to drop the lower limit and allocate a memory region that ends at
143 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_split_range_low_check()
144 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_split_range_low_check()
155 min_addr = req_node_end - SZ_256; in alloc_exact_nid_top_down_numa_split_range_low_check()
165 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_top_down_numa_split_range_low_check()
166 ASSERT_EQ(new_rgn->base, req_node_end - size); in alloc_exact_nid_top_down_numa_split_range_low_check()
167 ASSERT_LE(req_node->base, new_rgn->base); in alloc_exact_nid_top_down_numa_split_range_low_check()
169 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_exact_nid_top_down_numa_split_range_low_check()
170 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_exact_nid_top_down_numa_split_range_low_check()
178 * A test that tries to allocate a memory region that spans over the min_addr
186 * | +---------------+ +-------------+---------+ |
188 * +----+---------------+--------+-------------+---------+----------+
190 * | +---------+ |
192 * +----------+---------+-------------------------------------------+
194 * Expect to drop the lower limit and allocate a memory region that ends at
200 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
201 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
202 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
212 min_addr = node2->base - SZ_256; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
222 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_top_down_numa_no_overlap_split_check()
223 ASSERT_EQ(new_rgn->base, region_end(req_node) - size); in alloc_exact_nid_top_down_numa_no_overlap_split_check()
224 ASSERT_LE(req_node->base, new_rgn->base); in alloc_exact_nid_top_down_numa_no_overlap_split_check()
226 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_exact_nid_top_down_numa_no_overlap_split_check()
227 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_exact_nid_top_down_numa_no_overlap_split_check()
235 * A test that tries to allocate memory within min_addr and max_add range when
244 * |-----------+ +----------+----...----+----------+ |
246 * +-----------+-----------+----------+----...----+----------+------+
248 * | +-----+ |
250 * +-----+-----+----------------------------------------------------+
252 * Expect to drop the lower limit and allocate a memory region that ends at
258 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_low_check()
259 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_no_overlap_low_check()
260 struct memblock_region *min_node = &memblock.memory.regions[2]; in alloc_exact_nid_top_down_numa_no_overlap_low_check()
261 struct memblock_region *max_node = &memblock.memory.regions[5]; in alloc_exact_nid_top_down_numa_no_overlap_low_check()
270 min_addr = min_node->base; in alloc_exact_nid_top_down_numa_no_overlap_low_check()
280 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_top_down_numa_no_overlap_low_check()
281 ASSERT_EQ(new_rgn->base, region_end(req_node) - size); in alloc_exact_nid_top_down_numa_no_overlap_low_check()
283 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_exact_nid_top_down_numa_no_overlap_low_check()
284 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_exact_nid_top_down_numa_no_overlap_low_check()
292 * A test that tries to allocate a memory region in a specific NUMA node that
293 * has enough memory to allocate a region of the requested size.
299 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_bottom_up_numa_simple_check()
300 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_bottom_up_numa_simple_check()
309 ASSERT_LE(SZ_4, req_node->size); in alloc_exact_nid_bottom_up_numa_simple_check()
310 size = req_node->size / SZ_4; in alloc_exact_nid_bottom_up_numa_simple_check()
321 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_bottom_up_numa_simple_check()
322 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_exact_nid_bottom_up_numa_simple_check()
325 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_exact_nid_bottom_up_numa_simple_check()
326 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_exact_nid_bottom_up_numa_simple_check()
334 * A test that tries to allocate a memory region in a specific NUMA node that
335 * is partially reserved but has enough memory for the allocated region:
337 * | +---------------------------------------+ |
339 * +-----------+---------------------------------------+---------+
341 * | +------------------+-----+ |
342 * | | reserved | new | |
343 * +-----------+------------------+-----+------------------------+
346 * the existing reserved region. The total size gets updated.
351 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_bottom_up_numa_part_reserved_check()
352 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_bottom_up_numa_part_reserved_check()
363 ASSERT_LE(SZ_8, req_node->size); in alloc_exact_nid_bottom_up_numa_part_reserved_check()
364 r1.base = req_node->base; in alloc_exact_nid_bottom_up_numa_part_reserved_check()
365 r1.size = req_node->size / SZ_2; in alloc_exact_nid_bottom_up_numa_part_reserved_check()
379 ASSERT_EQ(new_rgn->size, total_size); in alloc_exact_nid_bottom_up_numa_part_reserved_check()
380 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_exact_nid_bottom_up_numa_part_reserved_check()
383 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_exact_nid_bottom_up_numa_part_reserved_check()
384 ASSERT_EQ(memblock.reserved.total_size, total_size); in alloc_exact_nid_bottom_up_numa_part_reserved_check()
392 * A test that tries to allocate a memory region that spans over the min_addr
400 * | +-----------------------+-----------+ |
402 * +-----------+-----------------------+-----------+--------------+
404 * | +-----------+ |
406 * +-----------+-----------+--------------------------------------+
408 * Expect to drop the lower limit and allocate a memory region at the beginning
414 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_bottom_up_numa_split_range_low_check()
415 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_bottom_up_numa_split_range_low_check()
426 min_addr = req_node_end - SZ_256; in alloc_exact_nid_bottom_up_numa_split_range_low_check()
436 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_bottom_up_numa_split_range_low_check()
437 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_exact_nid_bottom_up_numa_split_range_low_check()
440 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_exact_nid_bottom_up_numa_split_range_low_check()
441 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_exact_nid_bottom_up_numa_split_range_low_check()
449 * A test that tries to allocate a memory region that spans over the min_addr
457 * | +---------------+ +-------------+---------+ |
459 * +----+---------------+--------+-------------+---------+---------+
461 * | +---------+ |
463 * +----+---------+------------------------------------------------+
465 * Expect to drop the lower limit and allocate a memory region that starts at
471 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
472 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
473 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
483 min_addr = node2->base - SZ_256; in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
493 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
494 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
497 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
498 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
506 * A test that tries to allocate memory within min_addr and max_add range when
515 * |-----------+ +----------+----...----+----------+ |
517 * +-----------+-----------+----------+----...----+----------+------+
519 * |-----+ |
521 * +-----+----------------------------------------------------------+
523 * Expect to drop the lower limit and allocate a memory region that starts at
529 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
530 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
531 struct memblock_region *min_node = &memblock.memory.regions[2]; in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
532 struct memblock_region *max_node = &memblock.memory.regions[5]; in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
541 min_addr = min_node->base; in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
551 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
552 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
555 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
556 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
564 * A test that tries to allocate a memory region in a specific NUMA node that
565 * does not have enough memory to allocate a region of the requested size:
567 * | +-----+ |
569 * +---+-----+----------------------------+
571 * +---------+
573 * +---------+
580 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_numa_small_node_generic_check()
589 size = SZ_2 * req_node->size; in alloc_exact_nid_numa_small_node_generic_check()
605 * A test that tries to allocate a memory region in a specific NUMA node that
606 * is fully reserved:
608 * | +---------+ |
610 * +--------------+---------+-------------+
612 * | +---------+ |
613 * | | reserved| |
614 * +--------------+---------+-------------+
621 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_numa_node_reserved_generic_check()
630 size = req_node->size; in alloc_exact_nid_numa_node_reserved_generic_check()
634 memblock_reserve(req_node->base, req_node->size); in alloc_exact_nid_numa_node_reserved_generic_check()
647 * A test that tries to allocate a memory region in a specific NUMA node that
648 * is partially reserved and does not have enough contiguous memory for the
651 * | +-----------------------+ |
653 * +-----------+-----------------------+----+
655 * | +----------+ |
656 * | | reserved | |
657 * +-----------------+----------+-----------+
664 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_numa_part_reserved_fail_generic_check()
674 ASSERT_LE(SZ_4, req_node->size); in alloc_exact_nid_numa_part_reserved_fail_generic_check()
675 size = req_node->size / SZ_2; in alloc_exact_nid_numa_part_reserved_fail_generic_check()
676 r1.base = req_node->base + (size / SZ_2); in alloc_exact_nid_numa_part_reserved_fail_generic_check()
695 * A test that tries to allocate a memory region that spans over the min_addr
703 * | +--------------------------+---------+ |
705 * +------+--------------------------+---------+----------------+
712 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_numa_split_range_high_generic_check()
721 min_addr = req_node->base - SZ_256; in alloc_exact_nid_numa_split_range_high_generic_check()
736 * A test that tries to allocate memory within min_addr and max_add range when
745 * | +----------+----...----+----------+ +-----------+ |
747 * +-----+----------+----...----+----------+--------+-----------+---+
754 struct memblock_region *min_node = &memblock.memory.regions[2]; in alloc_exact_nid_numa_no_overlap_high_generic_check()
755 struct memblock_region *max_node = &memblock.memory.regions[5]; in alloc_exact_nid_numa_no_overlap_high_generic_check()
764 min_addr = min_node->base; in alloc_exact_nid_numa_no_overlap_high_generic_check()
779 * A test that tries to allocate a memory region in a specific NUMA node that
780 * does not have enough memory to allocate a region of the requested size.
781 * Additionally, none of the nodes have enough memory to allocate the region:
783 * +-----------------------------------+
785 * +-----------------------------------+
786 * |-------+-------+-------+-------+-------+-------+-------+-------|
788 * +-------+-------+-------+-------+-------+-------+-------+-------+
817 * A test that tries to allocate memory within min_addr and max_addr range when
818 * there are two reserved regions at the borders. The requested node starts at
826 * | +-----------+-----------------------+-----------------------|
828 * +------+-----------+-----------------------+-----------------------+
830 * | +----+-----------------------+----+ |
832 * +-------------+----+-----------------------+----+------------------+
841 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_numa_reserved_full_merge_generic_check()
842 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_numa_reserved_full_merge_generic_check()
843 struct memblock_region *next_node = &memblock.memory.regions[nid_next]; in alloc_exact_nid_numa_reserved_full_merge_generic_check()
846 phys_addr_t size = req_node->size; in alloc_exact_nid_numa_reserved_full_merge_generic_check()
854 r1.base = next_node->base; in alloc_exact_nid_numa_reserved_full_merge_generic_check()
858 r2.base = r1.base - (size + r2.size); in alloc_exact_nid_numa_reserved_full_merge_generic_check()
874 ASSERT_EQ(new_rgn->size, total_size); in alloc_exact_nid_numa_reserved_full_merge_generic_check()
875 ASSERT_EQ(new_rgn->base, r2.base); in alloc_exact_nid_numa_reserved_full_merge_generic_check()
877 ASSERT_LE(new_rgn->base, req_node->base); in alloc_exact_nid_numa_reserved_full_merge_generic_check()
880 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_exact_nid_numa_reserved_full_merge_generic_check()
881 ASSERT_EQ(memblock.reserved.total_size, total_size); in alloc_exact_nid_numa_reserved_full_merge_generic_check()
889 * A test that tries to allocate memory within min_addr and max_add range,
891 * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
894 * +-----------+
896 * +-----------+
897 * | +---------------------+-----------|
899 * +------+---------------------+-----------+
901 * |----------------------+ +-----|
903 * +----------------------+-----------+-----+
915 struct memblock_region *next_node = &memblock.memory.regions[7]; in alloc_exact_nid_numa_split_all_reserved_generic_check()
924 r2.base = next_node->base + SZ_128; in alloc_exact_nid_numa_split_all_reserved_generic_check()
925 r2.size = memblock_end_of_DRAM() - r2.base; in alloc_exact_nid_numa_split_all_reserved_generic_check()
927 r1.size = MEM_SIZE - (r2.size + size); in alloc_exact_nid_numa_split_all_reserved_generic_check()