Lines Matching full:class
38 * class->lock
139 * determined). NOTE: all those class sizes must be set as multiple of
189 * Size of objects stored in this class. Must be multiple
250 unsigned int class:CLASS_BITS + 1; member
351 /* class->lock(which owns the handle) synchronizes races */
446 /* Protected by class->lock */
497 return pool->size_class[zspage->class]; in zspage_class()
502 * class maintains a list of zspages where each zspage is divided
505 * size class which has chunk size big enough to hold the given size.
518 static inline void class_stat_add(struct size_class *class, int type, in class_stat_add() argument
521 class->stats.objs[type] += cnt; in class_stat_add()
524 static inline void class_stat_sub(struct size_class *class, int type, in class_stat_sub() argument
527 class->stats.objs[type] -= cnt; in class_stat_sub()
530 static inline unsigned long class_stat_read(struct size_class *class, int type) in class_stat_read() argument
532 return class->stats.objs[type]; in class_stat_read()
552 static unsigned long zs_can_compact(struct size_class *class);
558 struct size_class *class; in zs_stats_size_show() local
566 "class", "size", "10%", "20%", "30%", "40%", in zs_stats_size_show()
573 class = pool->size_class[i]; in zs_stats_size_show()
575 if (class->index != i) in zs_stats_size_show()
578 spin_lock(&class->lock); in zs_stats_size_show()
580 seq_printf(s, " %5u %5u ", i, class->size); in zs_stats_size_show()
582 inuse_totals[fg] += class_stat_read(class, fg); in zs_stats_size_show()
583 seq_printf(s, "%9lu ", class_stat_read(class, fg)); in zs_stats_size_show()
586 obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); in zs_stats_size_show()
587 obj_used = class_stat_read(class, ZS_OBJS_INUSE); in zs_stats_size_show()
588 freeable = zs_can_compact(class); in zs_stats_size_show()
589 spin_unlock(&class->lock); in zs_stats_size_show()
591 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
593 class->pages_per_zspage; in zs_stats_size_show()
597 class->pages_per_zspage, freeable); in zs_stats_size_show()
657 * For each size class, zspages are divided into different groups
661 static int get_fullness_group(struct size_class *class, struct zspage *zspage) in get_fullness_group() argument
666 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
683 * Each size class maintains various freelists and zspages are assigned
686 * identified by <class, fullness_group>.
688 static void insert_zspage(struct size_class *class, in insert_zspage() argument
692 class_stat_add(class, fullness, 1); in insert_zspage()
693 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
699 * by <class, fullness_group>.
701 static void remove_zspage(struct size_class *class, struct zspage *zspage) in remove_zspage() argument
705 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
708 class_stat_sub(class, fullness, 1); in remove_zspage()
712 * Each size class maintains zspages in different fullness groups depending
720 static int fix_fullness_group(struct size_class *class, struct zspage *zspage) in fix_fullness_group() argument
724 newfg = get_fullness_group(class, zspage); in fix_fullness_group()
728 remove_zspage(class, zspage); in fix_fullness_group()
729 insert_zspage(class, zspage, newfg); in fix_fullness_group()
840 static void __free_zspage(struct zs_pool *pool, struct size_class *class, in __free_zspage() argument
845 assert_spin_locked(&class->lock); in __free_zspage()
863 class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in __free_zspage()
864 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); in __free_zspage()
867 static void free_zspage(struct zs_pool *pool, struct size_class *class, in free_zspage() argument
883 remove_zspage(class, zspage); in free_zspage()
884 __free_zspage(pool, class, zspage); in free_zspage()
888 static void init_zspage(struct size_class *class, struct zspage *zspage) in init_zspage() argument
904 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
906 link += class->size / sizeof(*link); in init_zspage()
932 static void create_page_chain(struct size_class *class, struct zspage *zspage, in create_page_chain() argument
938 int nr_pages = class->pages_per_zspage; in create_page_chain()
955 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
956 class->pages_per_zspage == 1)) in create_page_chain()
966 * Allocate a zspage for the given size class
969 struct size_class *class, in alloc_zspage() argument
982 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
1001 create_page_chain(class, zspage, pages); in alloc_zspage()
1002 init_zspage(class, zspage); in alloc_zspage()
1004 zspage->class = class->index; in alloc_zspage()
1009 static struct zspage *find_get_zspage(struct size_class *class) in find_get_zspage() argument
1015 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1131 static bool zspage_full(struct size_class *class, struct zspage *zspage) in zspage_full() argument
1133 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1154 struct size_class *class; in zs_lookup_class_index() local
1156 class = pool->size_class[get_size_class_index(size)]; in zs_lookup_class_index()
1158 return class->index; in zs_lookup_class_index()
1191 struct size_class *class; in zs_map_object() local
1210 * migration cannot move any zpages in this zspage. Here, class->lock in zs_map_object()
1212 * zs_unmap_object API so delegate the locking from class to zspage in zs_map_object()
1218 class = zspage_class(pool, zspage); in zs_map_object()
1219 off = offset_in_page(class->size * obj_idx); in zs_map_object()
1224 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1236 ret = __zs_map_object(area, pages, off, class->size); in zs_map_object()
1252 struct size_class *class; in zs_unmap_object() local
1258 class = zspage_class(pool, zspage); in zs_unmap_object()
1259 off = offset_in_page(class->size * obj_idx); in zs_unmap_object()
1262 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1271 __zs_unmap_object(area, pages, off, class->size); in zs_unmap_object()
1284 * The function returns the size of the first huge class - any object of equal
1304 struct size_class *class; in obj_malloc() local
1310 class = pool->size_class[zspage->class]; in obj_malloc()
1313 offset = obj * class->size; in obj_malloc()
1354 struct size_class *class; in zs_malloc() local
1370 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1372 /* class->lock effectively protects the zpage migration */ in zs_malloc()
1373 spin_lock(&class->lock); in zs_malloc()
1374 zspage = find_get_zspage(class); in zs_malloc()
1378 fix_fullness_group(class, zspage); in zs_malloc()
1379 class_stat_add(class, ZS_OBJS_INUSE, 1); in zs_malloc()
1384 spin_unlock(&class->lock); in zs_malloc()
1386 zspage = alloc_zspage(pool, class, gfp); in zs_malloc()
1392 spin_lock(&class->lock); in zs_malloc()
1394 newfg = get_fullness_group(class, zspage); in zs_malloc()
1395 insert_zspage(class, zspage, newfg); in zs_malloc()
1396 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); in zs_malloc()
1397 class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1398 class_stat_add(class, ZS_OBJS_INUSE, 1); in zs_malloc()
1403 spin_unlock(&class->lock); in zs_malloc()
1441 struct size_class *class; in zs_free() local
1455 class = zspage_class(pool, zspage); in zs_free()
1456 spin_lock(&class->lock); in zs_free()
1459 class_stat_sub(class, ZS_OBJS_INUSE, 1); in zs_free()
1460 obj_free(class->size, obj); in zs_free()
1462 fullness = fix_fullness_group(class, zspage); in zs_free()
1464 free_zspage(pool, class, zspage); in zs_free()
1466 spin_unlock(&class->lock); in zs_free()
1471 static void zs_object_copy(struct size_class *class, unsigned long dst, in zs_object_copy() argument
1481 s_size = d_size = class->size; in zs_object_copy()
1486 s_off = offset_in_page(class->size * s_objidx); in zs_object_copy()
1487 d_off = offset_in_page(class->size * d_objidx); in zs_object_copy()
1489 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1492 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1503 if (written == class->size) in zs_object_copy()
1524 s_size = class->size - written; in zs_object_copy()
1532 d_size = class->size - written; in zs_object_copy()
1545 static unsigned long find_alloced_obj(struct size_class *class, in find_alloced_obj() argument
1554 offset += class->size * index; in find_alloced_obj()
1560 offset += class->size; in find_alloced_obj()
1578 struct size_class *class = pool->size_class[src_zspage->class]; in migrate_zspage() local
1581 handle = find_alloced_obj(class, s_page, &obj_idx); in migrate_zspage()
1592 zs_object_copy(class, free_obj, used_obj); in migrate_zspage()
1594 obj_free(class->size, used_obj); in migrate_zspage()
1597 if (zspage_full(class, dst_zspage)) in migrate_zspage()
1606 static struct zspage *isolate_src_zspage(struct size_class *class) in isolate_src_zspage() argument
1612 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_src_zspage()
1615 remove_zspage(class, zspage); in isolate_src_zspage()
1623 static struct zspage *isolate_dst_zspage(struct size_class *class) in isolate_dst_zspage() argument
1629 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_dst_zspage()
1632 remove_zspage(class, zspage); in isolate_dst_zspage()
1641 * putback_zspage - add @zspage into right class's fullness list
1642 * @class: destination class
1647 static int putback_zspage(struct size_class *class, struct zspage *zspage) in putback_zspage() argument
1651 fullness = get_fullness_group(class, zspage); in putback_zspage()
1652 insert_zspage(class, zspage, fullness); in putback_zspage()
1730 static void replace_sub_page(struct size_class *class, struct zspage *zspage, in replace_sub_page() argument
1746 create_page_chain(class, zspage, pages); in replace_sub_page()
1768 struct size_class *class; in zs_page_migrate() local
1791 class = zspage_class(pool, zspage); in zs_page_migrate()
1794 * the class lock protects zpage alloc/free in the zspage. in zs_page_migrate()
1796 spin_lock(&class->lock); in zs_page_migrate()
1811 addr += class->size) { in zs_page_migrate()
1823 replace_sub_page(class, zspage, newpage, page); in zs_page_migrate()
1829 spin_unlock(&class->lock); in zs_page_migrate()
1862 struct size_class *class; in async_free_zspage() local
1869 class = pool->size_class[i]; in async_free_zspage()
1870 if (class->index != i) in async_free_zspage()
1873 spin_lock(&class->lock); in async_free_zspage()
1874 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0], in async_free_zspage()
1876 spin_unlock(&class->lock); in async_free_zspage()
1883 class = zspage_class(pool, zspage); in async_free_zspage()
1884 spin_lock(&class->lock); in async_free_zspage()
1885 class_stat_sub(class, ZS_INUSE_RATIO_0, 1); in async_free_zspage()
1886 __free_zspage(pool, class, zspage); in async_free_zspage()
1887 spin_unlock(&class->lock); in async_free_zspage()
1925 static unsigned long zs_can_compact(struct size_class *class) in zs_can_compact() argument
1928 unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); in zs_can_compact()
1929 unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE); in zs_can_compact()
1935 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
1937 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
1941 struct size_class *class) in __zs_compact() argument
1952 spin_lock(&class->lock); in __zs_compact()
1953 while (zs_can_compact(class)) { in __zs_compact()
1957 dst_zspage = isolate_dst_zspage(class); in __zs_compact()
1962 src_zspage = isolate_src_zspage(class); in __zs_compact()
1970 fg = putback_zspage(class, src_zspage); in __zs_compact()
1972 free_zspage(pool, class, src_zspage); in __zs_compact()
1973 pages_freed += class->pages_per_zspage; in __zs_compact()
1977 if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 in __zs_compact()
1979 putback_zspage(class, dst_zspage); in __zs_compact()
1982 spin_unlock(&class->lock); in __zs_compact()
1986 spin_lock(&class->lock); in __zs_compact()
1991 putback_zspage(class, src_zspage); in __zs_compact()
1994 putback_zspage(class, dst_zspage); in __zs_compact()
1996 spin_unlock(&class->lock); in __zs_compact()
2005 struct size_class *class; in zs_compact() local
2018 class = pool->size_class[i]; in zs_compact()
2019 if (class->index != i) in zs_compact()
2021 pages_freed += __zs_compact(pool, class); in zs_compact()
2056 struct size_class *class; in zs_shrinker_count() local
2061 class = pool->size_class[i]; in zs_shrinker_count()
2062 if (class->index != i) in zs_shrinker_count()
2065 pages_to_free += zs_can_compact(class); in zs_shrinker_count()
2152 struct size_class *class; in zs_create_pool() local
2164 * class. Any object bigger than or equal to that will in zs_create_pool()
2165 * endup in the huge class. in zs_create_pool()
2174 * size class search - so object may be smaller than in zs_create_pool()
2175 * huge class size, yet it still can end up in the huge in zs_create_pool()
2176 * class because it grows by ZS_HANDLE_SIZE extra bytes in zs_create_pool()
2177 * right before class lookup. in zs_create_pool()
2198 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); in zs_create_pool()
2199 if (!class) in zs_create_pool()
2202 class->size = size; in zs_create_pool()
2203 class->index = i; in zs_create_pool()
2204 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2205 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2206 spin_lock_init(&class->lock); in zs_create_pool()
2207 pool->size_class[i] = class; in zs_create_pool()
2211 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2215 prev_class = class; in zs_create_pool()
2247 struct size_class *class = pool->size_class[i]; in zs_destroy_pool() local
2249 if (!class) in zs_destroy_pool()
2252 if (class->index != i) in zs_destroy_pool()
2256 if (list_empty(&class->fullness_list[fg])) in zs_destroy_pool()
2259 pr_err("Class-%d fullness group %d is not empty\n", in zs_destroy_pool()
2260 class->size, fg); in zs_destroy_pool()
2262 kfree(class); in zs_destroy_pool()