Lines Matching refs:b
406 static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op, in vmballoon_stats_op_inc() argument
410 atomic64_inc(&b->stats->ops[op][type]); in vmballoon_stats_op_inc()
413 static inline void vmballoon_stats_gen_inc(struct vmballoon *b, in vmballoon_stats_gen_inc() argument
417 atomic64_inc(&b->stats->general_stat[stat]); in vmballoon_stats_gen_inc()
420 static inline void vmballoon_stats_gen_add(struct vmballoon *b, in vmballoon_stats_gen_add() argument
425 atomic64_add(val, &b->stats->general_stat[stat]); in vmballoon_stats_gen_add()
428 static inline void vmballoon_stats_page_inc(struct vmballoon *b, in vmballoon_stats_page_inc() argument
433 atomic64_inc(&b->stats->page_stat[stat][size]); in vmballoon_stats_page_inc()
436 static inline void vmballoon_stats_page_add(struct vmballoon *b, in vmballoon_stats_page_add() argument
442 atomic64_add(val, &b->stats->page_stat[stat][size]); in vmballoon_stats_page_add()
446 __vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1, in __vmballoon_cmd() argument
451 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT); in __vmballoon_cmd()
474 WRITE_ONCE(b->target, local_result); in __vmballoon_cmd()
478 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT); in __vmballoon_cmd()
486 b->reset_required = true; in __vmballoon_cmd()
492 vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1, in vmballoon_cmd() argument
497 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy); in vmballoon_cmd()
504 static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps) in vmballoon_send_start() argument
508 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0, in vmballoon_send_start()
513 b->capabilities = capabilities; in vmballoon_send_start()
516 b->capabilities = VMW_BALLOON_BASIC_CMDS; in vmballoon_send_start()
527 b->max_page_size = VMW_BALLOON_4K_PAGE; in vmballoon_send_start()
528 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) && in vmballoon_send_start()
529 (b->capabilities & VMW_BALLOON_BATCHED_CMDS)) in vmballoon_send_start()
530 b->max_page_size = VMW_BALLOON_2M_PAGE; in vmballoon_send_start()
548 static int vmballoon_send_guest_id(struct vmballoon *b) in vmballoon_send_guest_id() argument
552 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID, in vmballoon_send_guest_id()
621 static int vmballoon_send_get_target(struct vmballoon *b) in vmballoon_send_get_target() argument
629 if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) && in vmballoon_send_get_target()
633 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0); in vmballoon_send_get_target()
650 static int vmballoon_alloc_page_list(struct vmballoon *b, in vmballoon_alloc_page_list() argument
674 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC, in vmballoon_alloc_page_list()
685 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL, in vmballoon_alloc_page_list()
703 static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page, in vmballoon_handle_one_result() argument
716 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC, in vmballoon_handle_one_result()
735 static unsigned long vmballoon_status_page(struct vmballoon *b, int idx, in vmballoon_status_page() argument
740 *p = pfn_to_page(b->batch_page[idx].pfn); in vmballoon_status_page()
741 return b->batch_page[idx].status; in vmballoon_status_page()
745 *p = b->page; in vmballoon_status_page()
771 static unsigned long vmballoon_lock_op(struct vmballoon *b, in vmballoon_lock_op() argument
778 lockdep_assert_held(&b->comm_lock); in vmballoon_lock_op()
790 pfn = PHYS_PFN(virt_to_phys(b->batch_page)); in vmballoon_lock_op()
794 pfn = page_to_pfn(b->page); in vmballoon_lock_op()
801 return vmballoon_cmd(b, cmd, pfn, num_pages); in vmballoon_lock_op()
813 static void vmballoon_add_page(struct vmballoon *b, unsigned int idx, in vmballoon_add_page() argument
816 lockdep_assert_held(&b->comm_lock); in vmballoon_add_page()
819 b->batch_page[idx] = (struct vmballoon_batch_entry) in vmballoon_add_page()
822 b->page = p; in vmballoon_add_page()
844 static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl) in vmballoon_lock() argument
855 spin_lock(&b->comm_lock); in vmballoon_lock()
859 vmballoon_add_page(b, i++, page); in vmballoon_lock()
861 batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size, in vmballoon_lock()
872 status = vmballoon_status_page(b, i, &page); in vmballoon_lock()
882 if (!vmballoon_handle_one_result(b, page, ctl->page_size, in vmballoon_lock()
895 spin_unlock(&b->comm_lock); in vmballoon_lock()
929 static void vmballoon_release_refused_pages(struct vmballoon *b, in vmballoon_release_refused_pages() argument
932 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE, in vmballoon_release_refused_pages()
947 static int64_t vmballoon_change(struct vmballoon *b) in vmballoon_change() argument
951 size = atomic64_read(&b->size); in vmballoon_change()
952 target = READ_ONCE(b->target); in vmballoon_change()
959 if (b->reset_required) in vmballoon_change()
968 if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout))) in vmballoon_change()
985 static void vmballoon_enqueue_page_list(struct vmballoon *b, in vmballoon_enqueue_page_list() argument
994 balloon_page_list_enqueue(&b->b_dev_info, pages); in vmballoon_enqueue_page_list()
1000 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); in vmballoon_enqueue_page_list()
1006 list_splice_init(pages, &b->huge_pages); in vmballoon_enqueue_page_list()
1009 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); in vmballoon_enqueue_page_list()
1028 static void vmballoon_dequeue_page_list(struct vmballoon *b, in vmballoon_dequeue_page_list() argument
1040 *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages, in vmballoon_dequeue_page_list()
1046 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); in vmballoon_dequeue_page_list()
1047 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) { in vmballoon_dequeue_page_list()
1057 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); in vmballoon_dequeue_page_list()
1092 static void vmballoon_inflate(struct vmballoon *b) in vmballoon_inflate() argument
1099 .page_size = b->max_page_size, in vmballoon_inflate()
1103 while ((to_inflate_frames = vmballoon_change(b)) > 0) { in vmballoon_inflate()
1112 to_inflate_pages = min_t(unsigned long, b->batch_max_pages, in vmballoon_inflate()
1117 alloc_error = vmballoon_alloc_page_list(b, &ctl, in vmballoon_inflate()
1121 lock_error = vmballoon_lock(b, &ctl); in vmballoon_inflate()
1131 atomic64_add(ctl.n_pages * page_in_frames, &b->size); in vmballoon_inflate()
1133 vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages, in vmballoon_inflate()
1162 vmballoon_release_refused_pages(b, &ctl); in vmballoon_inflate()
1179 static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames, in vmballoon_deflate() argument
1211 -vmballoon_change(b); in vmballoon_deflate()
1221 to_deflate_pages = min_t(unsigned long, b->batch_max_pages, in vmballoon_deflate()
1226 vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages, in vmballoon_deflate()
1243 vmballoon_lock(b, &ctl); in vmballoon_deflate()
1255 atomic64_sub(n_unlocked_frames, &b->size); in vmballoon_deflate()
1258 vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE, in vmballoon_deflate()
1266 vmballoon_enqueue_page_list(b, &ctl.refused_pages, in vmballoon_deflate()
1272 if (ctl.page_size == b->max_page_size) in vmballoon_deflate()
1291 static void vmballoon_deinit_batching(struct vmballoon *b) in vmballoon_deinit_batching() argument
1293 free_page((unsigned long)b->batch_page); in vmballoon_deinit_batching()
1294 b->batch_page = NULL; in vmballoon_deinit_batching()
1296 b->batch_max_pages = 1; in vmballoon_deinit_batching()
1309 static int vmballoon_init_batching(struct vmballoon *b) in vmballoon_init_batching() argument
1317 b->batch_page = page_address(page); in vmballoon_init_batching()
1318 b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry); in vmballoon_init_batching()
1330 struct vmballoon *b = client_data; in vmballoon_doorbell() local
1332 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL); in vmballoon_doorbell()
1334 mod_delayed_work(system_freezable_wq, &b->dwork, 0); in vmballoon_doorbell()
1340 static void vmballoon_vmci_cleanup(struct vmballoon *b) in vmballoon_vmci_cleanup() argument
1342 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET, in vmballoon_vmci_cleanup()
1345 if (!vmci_handle_is_invalid(b->vmci_doorbell)) { in vmballoon_vmci_cleanup()
1346 vmci_doorbell_destroy(b->vmci_doorbell); in vmballoon_vmci_cleanup()
1347 b->vmci_doorbell = VMCI_INVALID_HANDLE; in vmballoon_vmci_cleanup()
1361 static int vmballoon_vmci_init(struct vmballoon *b) in vmballoon_vmci_init() argument
1365 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0) in vmballoon_vmci_init()
1368 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB, in vmballoon_vmci_init()
1370 vmballoon_doorbell, b); in vmballoon_vmci_init()
1375 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET, in vmballoon_vmci_init()
1376 b->vmci_doorbell.context, in vmballoon_vmci_init()
1377 b->vmci_doorbell.resource, NULL); in vmballoon_vmci_init()
1384 vmballoon_vmci_cleanup(b); in vmballoon_vmci_init()
1397 static void vmballoon_pop(struct vmballoon *b) in vmballoon_pop() argument
1401 while ((size = atomic64_read(&b->size))) in vmballoon_pop()
1402 vmballoon_deflate(b, size, false); in vmballoon_pop()
1410 static void vmballoon_reset(struct vmballoon *b) in vmballoon_reset() argument
1414 down_write(&b->conf_sem); in vmballoon_reset()
1416 vmballoon_vmci_cleanup(b); in vmballoon_reset()
1419 vmballoon_pop(b); in vmballoon_reset()
1421 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES)) in vmballoon_reset()
1424 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) { in vmballoon_reset()
1425 if (vmballoon_init_batching(b)) { in vmballoon_reset()
1432 vmballoon_send_start(b, 0); in vmballoon_reset()
1435 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) { in vmballoon_reset()
1436 vmballoon_deinit_batching(b); in vmballoon_reset()
1439 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET); in vmballoon_reset()
1440 b->reset_required = false; in vmballoon_reset()
1442 error = vmballoon_vmci_init(b); in vmballoon_reset()
1446 if (vmballoon_send_guest_id(b)) in vmballoon_reset()
1450 up_write(&b->conf_sem); in vmballoon_reset()
1464 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); in vmballoon_work() local
1467 if (b->reset_required) in vmballoon_work()
1468 vmballoon_reset(b); in vmballoon_work()
1470 down_read(&b->conf_sem); in vmballoon_work()
1477 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER); in vmballoon_work()
1479 if (!vmballoon_send_get_target(b)) in vmballoon_work()
1480 change = vmballoon_change(b); in vmballoon_work()
1484 atomic64_read(&b->size), READ_ONCE(b->target)); in vmballoon_work()
1487 vmballoon_inflate(b); in vmballoon_work()
1489 vmballoon_deflate(b, 0, true); in vmballoon_work()
1492 up_read(&b->conf_sem); in vmballoon_work()
1513 struct vmballoon *b = &balloon; in vmballoon_shrinker_scan() local
1516 pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size)); in vmballoon_shrinker_scan()
1518 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK); in vmballoon_shrinker_scan()
1524 if (!down_read_trylock(&b->conf_sem)) in vmballoon_shrinker_scan()
1527 deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true); in vmballoon_shrinker_scan()
1529 vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE, in vmballoon_shrinker_scan()
1537 WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY); in vmballoon_shrinker_scan()
1539 up_read(&b->conf_sem); in vmballoon_shrinker_scan()
1555 struct vmballoon *b = &balloon; in vmballoon_shrinker_count() local
1557 return atomic64_read(&b->size); in vmballoon_shrinker_count()
1560 static void vmballoon_unregister_shrinker(struct vmballoon *b) in vmballoon_unregister_shrinker() argument
1562 shrinker_free(b->shrinker); in vmballoon_unregister_shrinker()
1563 b->shrinker = NULL; in vmballoon_unregister_shrinker()
1566 static int vmballoon_register_shrinker(struct vmballoon *b) in vmballoon_register_shrinker() argument
1572 b->shrinker = shrinker_alloc(0, "vmw-balloon"); in vmballoon_register_shrinker()
1573 if (!b->shrinker) in vmballoon_register_shrinker()
1576 b->shrinker->scan_objects = vmballoon_shrinker_scan; in vmballoon_register_shrinker()
1577 b->shrinker->count_objects = vmballoon_shrinker_count; in vmballoon_register_shrinker()
1578 b->shrinker->private_data = b; in vmballoon_register_shrinker()
1580 shrinker_register(b->shrinker); in vmballoon_register_shrinker()
1606 static int vmballoon_enable_stats(struct vmballoon *b) in vmballoon_enable_stats() argument
1610 down_write(&b->conf_sem); in vmballoon_enable_stats()
1613 if (b->stats) in vmballoon_enable_stats()
1616 b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL); in vmballoon_enable_stats()
1618 if (!b->stats) { in vmballoon_enable_stats()
1625 up_write(&b->conf_sem); in vmballoon_enable_stats()
1642 struct vmballoon *b = f->private; in vmballoon_debug_show() local
1646 if (!b->stats) { in vmballoon_debug_show()
1647 int r = vmballoon_enable_stats(b); in vmballoon_debug_show()
1656 seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities); in vmballoon_debug_show()
1658 b->reset_required ? "y" : "n"); in vmballoon_debug_show()
1661 seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target)); in vmballoon_debug_show()
1662 seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size)); in vmballoon_debug_show()
1670 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]), in vmballoon_debug_show()
1671 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT])); in vmballoon_debug_show()
1677 atomic64_read(&b->stats->general_stat[i])); in vmballoon_debug_show()
1684 atomic64_read(&b->stats->page_stat[i][j])); in vmballoon_debug_show()
1692 static void __init vmballoon_debugfs_init(struct vmballoon *b) in vmballoon_debugfs_init() argument
1694 debugfs_create_file("vmmemctl", S_IRUGO, NULL, b, in vmballoon_debugfs_init()
1698 static void __exit vmballoon_debugfs_exit(struct vmballoon *b) in vmballoon_debugfs_exit() argument
1702 kfree(b->stats); in vmballoon_debugfs_exit()
1703 b->stats = NULL; in vmballoon_debugfs_exit()
1708 static inline void vmballoon_debugfs_init(struct vmballoon *b) in vmballoon_debugfs_init() argument
1712 static inline void vmballoon_debugfs_exit(struct vmballoon *b) in vmballoon_debugfs_exit() argument
1739 struct vmballoon *b; in vmballoon_migratepage() local
1742 b = container_of(b_dev_info, struct vmballoon, b_dev_info); in vmballoon_migratepage()
1748 if (!down_read_trylock(&b->conf_sem)) in vmballoon_migratepage()
1751 spin_lock(&b->comm_lock); in vmballoon_migratepage()
1759 vmballoon_add_page(b, 0, page); in vmballoon_migratepage()
1760 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE, in vmballoon_migratepage()
1764 status = vmballoon_status_page(b, 0, &page); in vmballoon_migratepage()
1771 spin_unlock(&b->comm_lock); in vmballoon_migratepage()
1786 vmballoon_add_page(b, 0, newpage); in vmballoon_migratepage()
1787 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE, in vmballoon_migratepage()
1791 status = vmballoon_status_page(b, 0, &newpage); in vmballoon_migratepage()
1793 spin_unlock(&b->comm_lock); in vmballoon_migratepage()
1802 atomic64_dec(&b->size); in vmballoon_migratepage()
1814 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); in vmballoon_migratepage()
1822 balloon_page_insert(&b->b_dev_info, newpage); in vmballoon_migratepage()
1830 b->b_dev_info.isolated_pages--; in vmballoon_migratepage()
1831 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); in vmballoon_migratepage()
1834 up_read(&b->conf_sem); in vmballoon_migratepage()
1849 static __init void vmballoon_compaction_init(struct vmballoon *b) in vmballoon_compaction_init() argument
1851 b->b_dev_info.migratepage = vmballoon_migratepage; in vmballoon_compaction_init()
1855 static inline void vmballoon_compaction_init(struct vmballoon *b) in vmballoon_compaction_init() argument