Lines Matching +full:fine +full:- +full:tune
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
88 * Two behaviors are supported for searching and allocating: bottom-up and
89 * top-down. The default is bottom-up. Top-down allocation can be used if the
95 * Note that this range allocator is not thread-safe, drivers need to protect
115 node->stack = stack_depot_save(entries, n, GFP_NOWAIT); in save_stack()
128 if (!node->stack) { in show_leaks()
130 node->start, node->size); in show_leaks()
134 stack_depot_snprint(node->stack, buf, BUFSZ, 0); in show_leaks()
136 node->start, node->size, buf); in show_leaks()
149 #define START(node) ((node)->start)
150 #define LAST(node) ((node)->start + (node)->size - 1)
159 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, in INTERVAL_TREE_DEFINE()
160 start, last) ?: (struct drm_mm_node *)&mm->head_node; in INTERVAL_TREE_DEFINE()
167 struct drm_mm *mm = hole_node->mm; in drm_mm_interval_tree_add_node()
172 node->__subtree_last = LAST(node); in drm_mm_interval_tree_add_node()
175 rb = &hole_node->rb; in drm_mm_interval_tree_add_node()
178 if (parent->__subtree_last >= node->__subtree_last) in drm_mm_interval_tree_add_node()
181 parent->__subtree_last = node->__subtree_last; in drm_mm_interval_tree_add_node()
185 rb = &hole_node->rb; in drm_mm_interval_tree_add_node()
186 link = &hole_node->rb.rb_right; in drm_mm_interval_tree_add_node()
190 link = &mm->interval_tree.rb_root.rb_node; in drm_mm_interval_tree_add_node()
197 if (parent->__subtree_last < node->__subtree_last) in drm_mm_interval_tree_add_node()
198 parent->__subtree_last = node->__subtree_last; in drm_mm_interval_tree_add_node()
199 if (node->start < parent->start) { in drm_mm_interval_tree_add_node()
200 link = &parent->rb.rb_left; in drm_mm_interval_tree_add_node()
202 link = &parent->rb.rb_right; in drm_mm_interval_tree_add_node()
207 rb_link_node(&node->rb, rb, link); in drm_mm_interval_tree_add_node()
208 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, in drm_mm_interval_tree_add_node()
212 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
217 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; in rb_to_hole_size()
223 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; in insert_hole_size()
224 u64 x = node->hole_size; in insert_hole_size()
230 link = &rb->rb_left; in insert_hole_size()
232 link = &rb->rb_right; in insert_hole_size()
237 rb_link_node(&node->rb_hole_size, rb, link); in insert_hole_size()
238 rb_insert_color_cached(&node->rb_hole_size, root, first); in insert_hole_size()
247 struct rb_node **link = &root->rb_node, *rb_parent = NULL; in RB_DECLARE_CALLBACKS_MAX()
248 u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole; in RB_DECLARE_CALLBACKS_MAX()
254 if (parent->subtree_max_hole < subtree_max_hole) in RB_DECLARE_CALLBACKS_MAX()
255 parent->subtree_max_hole = subtree_max_hole; in RB_DECLARE_CALLBACKS_MAX()
257 link = &parent->rb_hole_addr.rb_left; in RB_DECLARE_CALLBACKS_MAX()
259 link = &parent->rb_hole_addr.rb_right; in RB_DECLARE_CALLBACKS_MAX()
262 rb_link_node(&node->rb_hole_addr, rb_parent, link); in RB_DECLARE_CALLBACKS_MAX()
263 rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks); in RB_DECLARE_CALLBACKS_MAX()
268 struct drm_mm *mm = node->mm; in add_hole()
270 node->hole_size = in add_hole()
271 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); in add_hole()
272 node->subtree_max_hole = node->hole_size; in add_hole()
275 insert_hole_size(&mm->holes_size, node); in add_hole()
276 insert_hole_addr(&mm->holes_addr, node); in add_hole()
278 list_add(&node->hole_stack, &mm->hole_stack); in add_hole()
285 list_del(&node->hole_stack); in rm_hole()
286 rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size); in rm_hole()
287 rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr, in rm_hole()
289 node->hole_size = 0; in rm_hole()
290 node->subtree_max_hole = 0; in rm_hole()
307 struct rb_node *rb = mm->holes_size.rb_root.rb_node; in best_hole()
314 if (size <= node->hole_size) { in best_hole()
316 rb = rb->rb_right; in best_hole()
318 rb = rb->rb_left; in best_hole()
327 return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size; in usable_hole_addr()
332 struct rb_node *rb = mm->holes_addr.rb_node; in find_hole_addr()
345 rb = node->rb_hole_addr.rb_left; in find_hole_addr()
346 else if (addr > hole_start + node->hole_size) in find_hole_addr()
347 rb = node->rb_hole_addr.rb_right; in find_hole_addr()
372 return list_first_entry_or_null(&mm->hole_stack, in first_hole()
379 * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
392 struct rb_node *parent, *node = &entry->rb_hole_addr; \
397 if (usable_hole_addr(node->first, size)) { \
398 node = node->first; \
399 while (usable_hole_addr(node->last, size)) \
400 node = node->last; \
404 while ((parent = rb_parent(node)) && node == parent->first) \
422 return rb_hole_size_to_node(rb_prev(&node->rb_hole_size)); in DECLARE_NEXT_HOLE_ADDR()
432 return &node->hole_stack == &mm->hole_stack ? NULL : node; in DECLARE_NEXT_HOLE_ADDR()
437 * drm_mm_reserve_node - insert an pre-initialized node
441 * This functions inserts an already set-up &drm_mm_node into the allocator,
444 * preallocated objects which must be set-up before the range allocator can be
445 * set-up, e.g. when taking over a firmware framebuffer.
448 * 0 on success, -ENOSPC if there's no hole where @node is.
457 end = node->start + node->size; in drm_mm_reserve_node()
458 if (unlikely(end <= node->start)) in drm_mm_reserve_node()
459 return -ENOSPC; in drm_mm_reserve_node()
462 hole = find_hole_addr(mm, node->start, 0); in drm_mm_reserve_node()
464 return -ENOSPC; in drm_mm_reserve_node()
467 adj_end = hole_end = hole_start + hole->hole_size; in drm_mm_reserve_node()
469 if (mm->color_adjust) in drm_mm_reserve_node()
470 mm->color_adjust(hole, node->color, &adj_start, &adj_end); in drm_mm_reserve_node()
472 if (adj_start > node->start || adj_end < end) in drm_mm_reserve_node()
473 return -ENOSPC; in drm_mm_reserve_node()
475 node->mm = mm; in drm_mm_reserve_node()
477 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); in drm_mm_reserve_node()
478 list_add(&node->node_list, &hole->node_list); in drm_mm_reserve_node()
480 node->hole_size = 0; in drm_mm_reserve_node()
483 if (node->start > hole_start) in drm_mm_reserve_node()
499 * drm_mm_insert_node_in_range - ranged search for space and insert @node
507 * @mode: fine-tune the allocation search and placement
512 * 0 on success, -ENOSPC if there's no suitable hole.
527 if (unlikely(size == 0 || range_end - range_start < size)) in drm_mm_insert_node_in_range()
528 return -ENOSPC; in drm_mm_insert_node_in_range()
530 if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size) in drm_mm_insert_node_in_range()
531 return -ENOSPC; in drm_mm_insert_node_in_range()
539 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; in drm_mm_insert_node_in_range()
544 u64 hole_end = hole_start + hole->hole_size; in drm_mm_insert_node_in_range()
556 if (mm->color_adjust) in drm_mm_insert_node_in_range()
557 mm->color_adjust(hole, color, &col_start, &col_end); in drm_mm_insert_node_in_range()
562 if (adj_end <= adj_start || adj_end - adj_start < size) in drm_mm_insert_node_in_range()
566 adj_start = adj_end - size; in drm_mm_insert_node_in_range()
576 adj_start -= rem; in drm_mm_insert_node_in_range()
581 min(col_end, range_end) - adj_start < size) in drm_mm_insert_node_in_range()
585 adj_end - adj_start < size) in drm_mm_insert_node_in_range()
590 node->mm = mm; in drm_mm_insert_node_in_range()
591 node->size = size; in drm_mm_insert_node_in_range()
592 node->start = adj_start; in drm_mm_insert_node_in_range()
593 node->color = color; in drm_mm_insert_node_in_range()
594 node->hole_size = 0; in drm_mm_insert_node_in_range()
596 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); in drm_mm_insert_node_in_range()
597 list_add(&node->node_list, &hole->node_list); in drm_mm_insert_node_in_range()
610 return -ENOSPC; in drm_mm_insert_node_in_range()
616 return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); in drm_mm_node_scanned_block()
620 * drm_mm_remove_node - Remove a memory node from the allocator.
624 * be cleared again before it can be re-inserted into this or any other drm_mm
629 struct drm_mm *mm = node->mm; in drm_mm_remove_node()
640 drm_mm_interval_tree_remove(node, &mm->interval_tree); in drm_mm_remove_node()
641 list_del(&node->node_list); in drm_mm_remove_node()
647 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); in drm_mm_remove_node()
661 * The DRM range allocator supports this use-case through the scanning
684 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
692 * @mode: fine-tune the allocation search and placement
698 * As long as the scan list is non-empty, no other operations than
711 DRM_MM_BUG_ON(!size || size > end - start); in drm_mm_scan_init_with_range()
712 DRM_MM_BUG_ON(mm->scan_active); in drm_mm_scan_init_with_range()
714 scan->mm = mm; in drm_mm_scan_init_with_range()
719 scan->color = color; in drm_mm_scan_init_with_range()
720 scan->alignment = alignment; in drm_mm_scan_init_with_range()
721 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; in drm_mm_scan_init_with_range()
722 scan->size = size; in drm_mm_scan_init_with_range()
723 scan->mode = mode; in drm_mm_scan_init_with_range()
726 scan->range_start = start; in drm_mm_scan_init_with_range()
727 scan->range_end = end; in drm_mm_scan_init_with_range()
729 scan->hit_start = U64_MAX; in drm_mm_scan_init_with_range()
730 scan->hit_end = 0; in drm_mm_scan_init_with_range()
735 * drm_mm_scan_add_block - add a node to the scan list
748 struct drm_mm *mm = scan->mm; in drm_mm_scan_add_block()
754 DRM_MM_BUG_ON(node->mm != mm); in drm_mm_scan_add_block()
757 __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); in drm_mm_scan_add_block()
758 mm->scan_active++; in drm_mm_scan_add_block()
767 __list_del_entry(&node->node_list); in drm_mm_scan_add_block()
774 if (mm->color_adjust) in drm_mm_scan_add_block()
775 mm->color_adjust(hole, scan->color, &col_start, &col_end); in drm_mm_scan_add_block()
777 adj_start = max(col_start, scan->range_start); in drm_mm_scan_add_block()
778 adj_end = min(col_end, scan->range_end); in drm_mm_scan_add_block()
779 if (adj_end <= adj_start || adj_end - adj_start < scan->size) in drm_mm_scan_add_block()
782 if (scan->mode == DRM_MM_INSERT_HIGH) in drm_mm_scan_add_block()
783 adj_start = adj_end - scan->size; in drm_mm_scan_add_block()
785 if (scan->alignment) { in drm_mm_scan_add_block()
788 if (likely(scan->remainder_mask)) in drm_mm_scan_add_block()
789 rem = adj_start & scan->remainder_mask; in drm_mm_scan_add_block()
791 div64_u64_rem(adj_start, scan->alignment, &rem); in drm_mm_scan_add_block()
793 adj_start -= rem; in drm_mm_scan_add_block()
794 if (scan->mode != DRM_MM_INSERT_HIGH) in drm_mm_scan_add_block()
795 adj_start += scan->alignment; in drm_mm_scan_add_block()
796 if (adj_start < max(col_start, scan->range_start) || in drm_mm_scan_add_block()
797 min(col_end, scan->range_end) - adj_start < scan->size) in drm_mm_scan_add_block()
801 adj_end - adj_start < scan->size) in drm_mm_scan_add_block()
806 scan->hit_start = adj_start; in drm_mm_scan_add_block()
807 scan->hit_end = adj_start + scan->size; in drm_mm_scan_add_block()
809 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end); in drm_mm_scan_add_block()
810 DRM_MM_BUG_ON(scan->hit_start < hole_start); in drm_mm_scan_add_block()
811 DRM_MM_BUG_ON(scan->hit_end > hole_end); in drm_mm_scan_add_block()
818 * drm_mm_scan_remove_block - remove a node from the scan list
841 DRM_MM_BUG_ON(node->mm != scan->mm); in drm_mm_scan_remove_block()
843 __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); in drm_mm_scan_remove_block()
845 DRM_MM_BUG_ON(!node->mm->scan_active); in drm_mm_scan_remove_block()
846 node->mm->scan_active--; in drm_mm_scan_remove_block()
852 * backwards correctly we check that prev_node->next == node->next, in drm_mm_scan_remove_block()
859 list_add(&node->node_list, &prev_node->node_list); in drm_mm_scan_remove_block()
861 return (node->start + node->size > scan->hit_start && in drm_mm_scan_remove_block()
862 node->start < scan->hit_end); in drm_mm_scan_remove_block()
867 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
879 struct drm_mm *mm = scan->mm; in drm_mm_scan_color_evict()
883 DRM_MM_BUG_ON(list_empty(&mm->hole_stack)); in drm_mm_scan_color_evict()
885 if (!mm->color_adjust) in drm_mm_scan_color_evict()
890 * in the hole_stack list, but due to side-effects in the driver it in drm_mm_scan_color_evict()
893 list_for_each_entry(hole, &mm->hole_stack, hole_stack) { in drm_mm_scan_color_evict()
895 hole_end = hole_start + hole->hole_size; in drm_mm_scan_color_evict()
897 if (hole_start <= scan->hit_start && in drm_mm_scan_color_evict()
898 hole_end >= scan->hit_end) in drm_mm_scan_color_evict()
903 DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); in drm_mm_scan_color_evict()
904 if (unlikely(&hole->hole_stack == &mm->hole_stack)) in drm_mm_scan_color_evict()
907 DRM_MM_BUG_ON(hole_start > scan->hit_start); in drm_mm_scan_color_evict()
908 DRM_MM_BUG_ON(hole_end < scan->hit_end); in drm_mm_scan_color_evict()
910 mm->color_adjust(hole, scan->color, &hole_start, &hole_end); in drm_mm_scan_color_evict()
911 if (hole_start > scan->hit_start) in drm_mm_scan_color_evict()
913 if (hole_end < scan->hit_end) in drm_mm_scan_color_evict()
921 * drm_mm_init - initialize a drm-mm allocator
932 mm->color_adjust = NULL; in drm_mm_init()
934 INIT_LIST_HEAD(&mm->hole_stack); in drm_mm_init()
935 mm->interval_tree = RB_ROOT_CACHED; in drm_mm_init()
936 mm->holes_size = RB_ROOT_CACHED; in drm_mm_init()
937 mm->holes_addr = RB_ROOT; in drm_mm_init()
940 INIT_LIST_HEAD(&mm->head_node.node_list); in drm_mm_init()
941 mm->head_node.flags = 0; in drm_mm_init()
942 mm->head_node.mm = mm; in drm_mm_init()
943 mm->head_node.start = start + size; in drm_mm_init()
944 mm->head_node.size = -size; in drm_mm_init()
945 add_hole(&mm->head_node); in drm_mm_init()
947 mm->scan_active = 0; in drm_mm_init()
956 * drm_mm_takedown - clean up a drm_mm allocator
974 size = entry->hole_size; in drm_mm_dump_hole()
977 drm_printf(p, "%#018llx-%#018llx: %llu: free\n", in drm_mm_dump_hole()
984 * drm_mm_print - print allocator state
993 total_free += drm_mm_dump_hole(p, &mm->head_node); in drm_mm_print()
996 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start, in drm_mm_print()
997 entry->start + entry->size, entry->size); in drm_mm_print()
998 total_used += entry->size; in drm_mm_print()