Lines Matching +full:memory +full:- +full:region

1 // SPDX-License-Identifier: GPL-2.0-or-later
23 * A test that tries to allocate a memory region in a specific NUMA node that
24 * has enough memory to allocate a region of the requested size.
25 * Expect to allocate an aligned region at the end of the requested node.
31 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_simple_check()
40 ASSERT_LE(SZ_4, req_node->size); in alloc_exact_nid_top_down_numa_simple_check()
41 size = req_node->size / SZ_4; in alloc_exact_nid_top_down_numa_simple_check()
52 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_top_down_numa_simple_check()
53 ASSERT_EQ(new_rgn->base, region_end(req_node) - size); in alloc_exact_nid_top_down_numa_simple_check()
54 ASSERT_LE(req_node->base, new_rgn->base); in alloc_exact_nid_top_down_numa_simple_check()
65 * A test that tries to allocate a memory region in a specific NUMA node that
66 * is partially reserved but has enough memory for the allocated region:
68 * | +---------------------------------------+ |
70 * +-----------+---------------------------------------+----------+
72 * | +------------------+ +-----+ |
74 * +-----------+------------------+--------------+-----+----------+
76 * Expect to allocate an aligned region at the end of the requested node. The
77 * region count and total size get updated.
83 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_part_reserved_check()
85 struct region r1; in alloc_exact_nid_top_down_numa_part_reserved_check()
93 ASSERT_LE(SZ_8, req_node->size); in alloc_exact_nid_top_down_numa_part_reserved_check()
94 r1.base = req_node->base; in alloc_exact_nid_top_down_numa_part_reserved_check()
95 r1.size = req_node->size / SZ_2; in alloc_exact_nid_top_down_numa_part_reserved_check()
108 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_top_down_numa_part_reserved_check()
109 ASSERT_EQ(new_rgn->base, region_end(req_node) - size); in alloc_exact_nid_top_down_numa_part_reserved_check()
110 ASSERT_LE(req_node->base, new_rgn->base); in alloc_exact_nid_top_down_numa_part_reserved_check()
121 * A test that tries to allocate a memory region that spans over the min_addr
129 * | +-----------------------+-----------+ |
131 * +-----------+-----------------------+-----------+--------------+
133 * | +-----------+ |
135 * +-----------------------+-----------+--------------------------+
137 * Expect to drop the lower limit and allocate a memory region that ends at
144 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_split_range_low_check()
155 min_addr = req_node_end - SZ_256; in alloc_exact_nid_top_down_numa_split_range_low_check()
165 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_top_down_numa_split_range_low_check()
166 ASSERT_EQ(new_rgn->base, req_node_end - size); in alloc_exact_nid_top_down_numa_split_range_low_check()
167 ASSERT_LE(req_node->base, new_rgn->base); in alloc_exact_nid_top_down_numa_split_range_low_check()
178 * A test that tries to allocate a memory region that spans over the min_addr
186 * | +---------------+ +-------------+---------+ |
188 * +----+---------------+--------+-------------+---------+----------+
190 * | +---------+ |
192 * +----------+---------+-------------------------------------------+
194 * Expect to drop the lower limit and allocate a memory region that ends at
201 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
202 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
212 min_addr = node2->base - SZ_256; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
222 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_top_down_numa_no_overlap_split_check()
223 ASSERT_EQ(new_rgn->base, region_end(req_node) - size); in alloc_exact_nid_top_down_numa_no_overlap_split_check()
224 ASSERT_LE(req_node->base, new_rgn->base); in alloc_exact_nid_top_down_numa_no_overlap_split_check()
235 * A test that tries to allocate memory within min_addr and max_add range when
244 * |-----------+ +----------+----...----+----------+ |
246 * +-----------+-----------+----------+----...----+----------+------+
248 * | +-----+ |
250 * +-----+-----+----------------------------------------------------+
252 * Expect to drop the lower limit and allocate a memory region that ends at
259 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_no_overlap_low_check()
260 struct memblock_region *min_node = &memblock.memory.regions[2]; in alloc_exact_nid_top_down_numa_no_overlap_low_check()
261 struct memblock_region *max_node = &memblock.memory.regions[5]; in alloc_exact_nid_top_down_numa_no_overlap_low_check()
270 min_addr = min_node->base; in alloc_exact_nid_top_down_numa_no_overlap_low_check()
280 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_top_down_numa_no_overlap_low_check()
281 ASSERT_EQ(new_rgn->base, region_end(req_node) - size); in alloc_exact_nid_top_down_numa_no_overlap_low_check()
292 * A test that tries to allocate a memory region in a specific NUMA node that
293 * has enough memory to allocate a region of the requested size.
294 * Expect to allocate an aligned region at the beginning of the requested node.
300 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_bottom_up_numa_simple_check()
309 ASSERT_LE(SZ_4, req_node->size); in alloc_exact_nid_bottom_up_numa_simple_check()
310 size = req_node->size / SZ_4; in alloc_exact_nid_bottom_up_numa_simple_check()
321 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_bottom_up_numa_simple_check()
322 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_exact_nid_bottom_up_numa_simple_check()
334 * A test that tries to allocate a memory region in a specific NUMA node that
335 * is partially reserved but has enough memory for the allocated region:
337 * | +---------------------------------------+ |
339 * +-----------+---------------------------------------+---------+
341 * | +------------------+-----+ |
343 * +-----------+------------------+-----+------------------------+
345 * Expect to allocate an aligned region in the requested node that merges with
346 * the existing reserved region. The total size gets updated.
352 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_bottom_up_numa_part_reserved_check()
354 struct region r1; in alloc_exact_nid_bottom_up_numa_part_reserved_check()
363 ASSERT_LE(SZ_8, req_node->size); in alloc_exact_nid_bottom_up_numa_part_reserved_check()
364 r1.base = req_node->base; in alloc_exact_nid_bottom_up_numa_part_reserved_check()
365 r1.size = req_node->size / SZ_2; in alloc_exact_nid_bottom_up_numa_part_reserved_check()
379 ASSERT_EQ(new_rgn->size, total_size); in alloc_exact_nid_bottom_up_numa_part_reserved_check()
380 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_exact_nid_bottom_up_numa_part_reserved_check()
392 * A test that tries to allocate a memory region that spans over the min_addr
400 * | +-----------------------+-----------+ |
402 * +-----------+-----------------------+-----------+--------------+
404 * | +-----------+ |
406 * +-----------+-----------+--------------------------------------+
408 * Expect to drop the lower limit and allocate a memory region at the beginning
415 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_bottom_up_numa_split_range_low_check()
426 min_addr = req_node_end - SZ_256; in alloc_exact_nid_bottom_up_numa_split_range_low_check()
436 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_bottom_up_numa_split_range_low_check()
437 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_exact_nid_bottom_up_numa_split_range_low_check()
449 * A test that tries to allocate a memory region that spans over the min_addr
457 * | +---------------+ +-------------+---------+ |
459 * +----+---------------+--------+-------------+---------+---------+
461 * | +---------+ |
463 * +----+---------+------------------------------------------------+
465 * Expect to drop the lower limit and allocate a memory region that starts at
472 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
473 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
483 min_addr = node2->base - SZ_256; in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
493 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
494 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_exact_nid_bottom_up_numa_no_overlap_split_check()
506 * A test that tries to allocate memory within min_addr and max_add range when
515 * |-----------+ +----------+----...----+----------+ |
517 * +-----------+-----------+----------+----...----+----------+------+
519 * |-----+ |
521 * +-----+----------------------------------------------------------+
523 * Expect to drop the lower limit and allocate a memory region that starts at
530 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
531 struct memblock_region *min_node = &memblock.memory.regions[2]; in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
532 struct memblock_region *max_node = &memblock.memory.regions[5]; in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
541 min_addr = min_node->base; in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
551 ASSERT_EQ(new_rgn->size, size); in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
552 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_exact_nid_bottom_up_numa_no_overlap_low_check()
564 * A test that tries to allocate a memory region in a specific NUMA node that
565 * does not have enough memory to allocate a region of the requested size:
567 * | +-----+ |
569 * +---+-----+----------------------------+
571 * +---------+
573 * +---------+
580 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_numa_small_node_generic_check()
589 size = SZ_2 * req_node->size; in alloc_exact_nid_numa_small_node_generic_check()
605 * A test that tries to allocate a memory region in a specific NUMA node that
608 * | +---------+ |
610 * +--------------+---------+-------------+
612 * | +---------+ |
614 * +--------------+---------+-------------+
621 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_numa_node_reserved_generic_check()
630 size = req_node->size; in alloc_exact_nid_numa_node_reserved_generic_check()
634 memblock_reserve(req_node->base, req_node->size); in alloc_exact_nid_numa_node_reserved_generic_check()
647 * A test that tries to allocate a memory region in a specific NUMA node that
648 * is partially reserved and does not have enough contiguous memory for the
649 * allocated region:
651 * | +-----------------------+ |
653 * +-----------+-----------------------+----+
655 * | +----------+ |
657 * +-----------------+----------+-----------+
664 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_numa_part_reserved_fail_generic_check()
666 struct region r1; in alloc_exact_nid_numa_part_reserved_fail_generic_check()
674 ASSERT_LE(SZ_4, req_node->size); in alloc_exact_nid_numa_part_reserved_fail_generic_check()
675 size = req_node->size / SZ_2; in alloc_exact_nid_numa_part_reserved_fail_generic_check()
676 r1.base = req_node->base + (size / SZ_2); in alloc_exact_nid_numa_part_reserved_fail_generic_check()
695 * A test that tries to allocate a memory region that spans over the min_addr
703 * | +--------------------------+---------+ |
705 * +------+--------------------------+---------+----------------+
712 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_numa_split_range_high_generic_check()
721 min_addr = req_node->base - SZ_256; in alloc_exact_nid_numa_split_range_high_generic_check()
736 * A test that tries to allocate memory within min_addr and max_add range when
745 * | +----------+----...----+----------+ +-----------+ |
747 * +-----+----------+----...----+----------+--------+-----------+---+
754 struct memblock_region *min_node = &memblock.memory.regions[2]; in alloc_exact_nid_numa_no_overlap_high_generic_check()
755 struct memblock_region *max_node = &memblock.memory.regions[5]; in alloc_exact_nid_numa_no_overlap_high_generic_check()
764 min_addr = min_node->base; in alloc_exact_nid_numa_no_overlap_high_generic_check()
779 * A test that tries to allocate a memory region in a specific NUMA node that
780 * does not have enough memory to allocate a region of the requested size.
781 * Additionally, none of the nodes have enough memory to allocate the region:
783 * +-----------------------------------+
785 * +-----------------------------------+
786 * |-------+-------+-------+-------+-------+-------+-------+-------|
788 * +-------+-------+-------+-------+-------+-------+-------+-------+
817 * A test that tries to allocate memory within min_addr and max_addr range when
819 * min_addr and ends at max_addr and is the same size as the region to be
826 * | +-----------+-----------------------+-----------------------|
828 * +------+-----------+-----------------------+-----------------------+
830 * | +----+-----------------------+----+ |
832 * +-------------+----+-----------------------+----+------------------+
834 * Expect to merge all of the regions into one. The region counter and total
842 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_numa_reserved_full_merge_generic_check()
843 struct memblock_region *next_node = &memblock.memory.regions[nid_next]; in alloc_exact_nid_numa_reserved_full_merge_generic_check()
845 struct region r1, r2; in alloc_exact_nid_numa_reserved_full_merge_generic_check()
846 phys_addr_t size = req_node->size; in alloc_exact_nid_numa_reserved_full_merge_generic_check()
854 r1.base = next_node->base; in alloc_exact_nid_numa_reserved_full_merge_generic_check()
858 r2.base = r1.base - (size + r2.size); in alloc_exact_nid_numa_reserved_full_merge_generic_check()
874 ASSERT_EQ(new_rgn->size, total_size); in alloc_exact_nid_numa_reserved_full_merge_generic_check()
875 ASSERT_EQ(new_rgn->base, r2.base); in alloc_exact_nid_numa_reserved_full_merge_generic_check()
877 ASSERT_LE(new_rgn->base, req_node->base); in alloc_exact_nid_numa_reserved_full_merge_generic_check()
889 * A test that tries to allocate memory within min_addr and max_add range,
890 * where the total range can fit the region, but it is split between two nodes
894 * +-----------+
896 * +-----------+
897 * | +---------------------+-----------|
899 * +------+---------------------+-----------+
901 * |----------------------+ +-----|
903 * +----------------------+-----------+-----+
915 struct memblock_region *next_node = &memblock.memory.regions[7]; in alloc_exact_nid_numa_split_all_reserved_generic_check()
916 struct region r1, r2; in alloc_exact_nid_numa_split_all_reserved_generic_check()
924 r2.base = next_node->base + SZ_128; in alloc_exact_nid_numa_split_all_reserved_generic_check()
925 r2.size = memblock_end_of_DRAM() - r2.base; in alloc_exact_nid_numa_split_all_reserved_generic_check()
927 r1.size = MEM_SIZE - (r2.size + size); in alloc_exact_nid_numa_split_all_reserved_generic_check()