1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * VMware Balloon driver.
4  *
5  * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
6  *
7  * This is VMware physical memory management driver for Linux. The driver
8  * acts like a "balloon" that can be inflated to reclaim physical pages by
9  * reserving them in the guest and invalidating them in the monitor,
10  * freeing up the underlying machine pages so they can be allocated to
11  * other guests.  The balloon can also be deflated to allow the guest to
12  * use more physical memory. Higher level policies can control the sizes
13  * of balloons in VMs in order to manage physical memory resources.
14  */
15 
16 //#define DEBUG
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/types.h>
20 #include <linux/io.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/vmalloc.h>
24 #include <linux/sched.h>
25 #include <linux/module.h>
26 #include <linux/workqueue.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/rwsem.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/balloon_compaction.h>
33 #include <linux/vmw_vmci_defs.h>
34 #include <linux/vmw_vmci_api.h>
35 #include <asm/hypervisor.h>
36 
37 MODULE_AUTHOR("VMware, Inc.");
38 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
39 MODULE_ALIAS("dmi:*:svnVMware*:*");
40 MODULE_ALIAS("vmware_vmmemctl");
41 MODULE_LICENSE("GPL");
42 
43 static bool __read_mostly vmwballoon_shrinker_enable;
44 module_param(vmwballoon_shrinker_enable, bool, 0444);
45 MODULE_PARM_DESC(vmwballoon_shrinker_enable,
46 	"Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
47 
48 /* Delay in seconds after shrink before inflation. */
49 #define VMBALLOON_SHRINK_DELAY		(5)
50 
51 /* Maximum number of refused pages we accumulate during inflation cycle */
52 #define VMW_BALLOON_MAX_REFUSED		16
53 
54 /* Magic number for the balloon mount-point */
55 #define BALLOON_VMW_MAGIC		0x0ba11007
56 
57 /*
58  * Hypervisor communication port definitions.
59  */
60 #define VMW_BALLOON_HV_PORT		0x5670
61 #define VMW_BALLOON_HV_MAGIC		0x456c6d6f
62 #define VMW_BALLOON_GUEST_ID		1	/* Linux */
63 
64 enum vmwballoon_capabilities {
65 	/*
66 	 * Bit 0 is reserved and not associated to any capability.
67 	 */
68 	VMW_BALLOON_BASIC_CMDS			= (1 << 1),
69 	VMW_BALLOON_BATCHED_CMDS		= (1 << 2),
70 	VMW_BALLOON_BATCHED_2M_CMDS		= (1 << 3),
71 	VMW_BALLOON_SIGNALLED_WAKEUP_CMD	= (1 << 4),
72 	VMW_BALLOON_64_BIT_TARGET		= (1 << 5)
73 };
74 
75 #define VMW_BALLOON_CAPABILITIES_COMMON	(VMW_BALLOON_BASIC_CMDS \
76 					| VMW_BALLOON_BATCHED_CMDS \
77 					| VMW_BALLOON_BATCHED_2M_CMDS \
78 					| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
79 
80 #define VMW_BALLOON_2M_ORDER		(PMD_SHIFT - PAGE_SHIFT)
81 
82 /*
83  * 64-bit targets are only supported in 64-bit
84  */
85 #ifdef CONFIG_64BIT
86 #define VMW_BALLOON_CAPABILITIES	(VMW_BALLOON_CAPABILITIES_COMMON \
87 					| VMW_BALLOON_64_BIT_TARGET)
88 #else
89 #define VMW_BALLOON_CAPABILITIES	VMW_BALLOON_CAPABILITIES_COMMON
90 #endif
91 
92 enum vmballoon_page_size_type {
93 	VMW_BALLOON_4K_PAGE,
94 	VMW_BALLOON_2M_PAGE,
95 	VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
96 };
97 
98 #define VMW_BALLOON_NUM_PAGE_SIZES	(VMW_BALLOON_LAST_SIZE + 1)
99 
100 static const char * const vmballoon_page_size_names[] = {
101 	[VMW_BALLOON_4K_PAGE]			= "4k",
102 	[VMW_BALLOON_2M_PAGE]			= "2M"
103 };
104 
105 enum vmballoon_op {
106 	VMW_BALLOON_INFLATE,
107 	VMW_BALLOON_DEFLATE
108 };
109 
110 enum vmballoon_op_stat_type {
111 	VMW_BALLOON_OP_STAT,
112 	VMW_BALLOON_OP_FAIL_STAT
113 };
114 
115 #define VMW_BALLOON_OP_STAT_TYPES	(VMW_BALLOON_OP_FAIL_STAT + 1)
116 
117 /**
118  * enum vmballoon_cmd_type - backdoor commands.
119  *
120  * Availability of the commands is as followed:
121  *
122  * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
123  * %VMW_BALLOON_CMD_GUEST_ID are always available.
124  *
125  * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
126  * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
127  *
128  * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
129  * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
130  * are available.
131  *
132  * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
133  * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
134  * are supported.
135  *
136  * If the host reports  VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
137  * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
138  *
139  * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
140  * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
141  * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
142  * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
143  *			    to be deflated from the balloon.
144  * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
145  *			      runs in the VM.
146  * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
147  *				  ballooned pages (up to 512).
148  * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
149  *				  pages that are about to be deflated from the
150  *				  balloon (up to 512).
151  * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
152  *				     for 2MB pages.
153  * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
154  *				       @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
155  *				       pages.
156  * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
157  *				       that would be invoked when the balloon
158  *				       size changes.
159  * @VMW_BALLOON_CMD_LAST: Value of the last command.
160  */
161 enum vmballoon_cmd_type {
162 	VMW_BALLOON_CMD_START,
163 	VMW_BALLOON_CMD_GET_TARGET,
164 	VMW_BALLOON_CMD_LOCK,
165 	VMW_BALLOON_CMD_UNLOCK,
166 	VMW_BALLOON_CMD_GUEST_ID,
167 	/* No command 5 */
168 	VMW_BALLOON_CMD_BATCHED_LOCK = 6,
169 	VMW_BALLOON_CMD_BATCHED_UNLOCK,
170 	VMW_BALLOON_CMD_BATCHED_2M_LOCK,
171 	VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
172 	VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
173 	VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
174 };
175 
176 #define VMW_BALLOON_CMD_NUM	(VMW_BALLOON_CMD_LAST + 1)
177 
178 enum vmballoon_error_codes {
179 	VMW_BALLOON_SUCCESS,
180 	VMW_BALLOON_ERROR_CMD_INVALID,
181 	VMW_BALLOON_ERROR_PPN_INVALID,
182 	VMW_BALLOON_ERROR_PPN_LOCKED,
183 	VMW_BALLOON_ERROR_PPN_UNLOCKED,
184 	VMW_BALLOON_ERROR_PPN_PINNED,
185 	VMW_BALLOON_ERROR_PPN_NOTNEEDED,
186 	VMW_BALLOON_ERROR_RESET,
187 	VMW_BALLOON_ERROR_BUSY
188 };
189 
190 #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES	(0x03000000)
191 
192 #define VMW_BALLOON_CMD_WITH_TARGET_MASK			\
193 	((1UL << VMW_BALLOON_CMD_GET_TARGET)		|	\
194 	 (1UL << VMW_BALLOON_CMD_LOCK)			|	\
195 	 (1UL << VMW_BALLOON_CMD_UNLOCK)		|	\
196 	 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK)		|	\
197 	 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK)	|	\
198 	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK)	|	\
199 	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
200 
201 static const char * const vmballoon_cmd_names[] = {
202 	[VMW_BALLOON_CMD_START]			= "start",
203 	[VMW_BALLOON_CMD_GET_TARGET]		= "target",
204 	[VMW_BALLOON_CMD_LOCK]			= "lock",
205 	[VMW_BALLOON_CMD_UNLOCK]		= "unlock",
206 	[VMW_BALLOON_CMD_GUEST_ID]		= "guestType",
207 	[VMW_BALLOON_CMD_BATCHED_LOCK]		= "batchLock",
208 	[VMW_BALLOON_CMD_BATCHED_UNLOCK]	= "batchUnlock",
209 	[VMW_BALLOON_CMD_BATCHED_2M_LOCK]	= "2m-lock",
210 	[VMW_BALLOON_CMD_BATCHED_2M_UNLOCK]	= "2m-unlock",
211 	[VMW_BALLOON_CMD_VMCI_DOORBELL_SET]	= "doorbellSet"
212 };
213 
214 enum vmballoon_stat_page {
215 	VMW_BALLOON_PAGE_STAT_ALLOC,
216 	VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
217 	VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
218 	VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
219 	VMW_BALLOON_PAGE_STAT_FREE,
220 	VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
221 };
222 
223 #define VMW_BALLOON_PAGE_STAT_NUM	(VMW_BALLOON_PAGE_STAT_LAST + 1)
224 
225 enum vmballoon_stat_general {
226 	VMW_BALLOON_STAT_TIMER,
227 	VMW_BALLOON_STAT_DOORBELL,
228 	VMW_BALLOON_STAT_RESET,
229 	VMW_BALLOON_STAT_SHRINK,
230 	VMW_BALLOON_STAT_SHRINK_FREE,
231 	VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
232 };
233 
234 #define VMW_BALLOON_STAT_NUM		(VMW_BALLOON_STAT_LAST + 1)
235 
236 static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
237 static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
238 
239 struct vmballoon_ctl {
240 	struct list_head pages;
241 	struct list_head refused_pages;
242 	struct list_head prealloc_pages;
243 	unsigned int n_refused_pages;
244 	unsigned int n_pages;
245 	enum vmballoon_page_size_type page_size;
246 	enum vmballoon_op op;
247 };
248 
249 /**
250  * struct vmballoon_batch_entry - a batch entry for lock or unlock.
251  *
252  * @status: the status of the operation, which is written by the hypervisor.
253  * @reserved: reserved for future use. Must be set to zero.
254  * @pfn: the physical frame number of the page to be locked or unlocked.
255  */
256 struct vmballoon_batch_entry {
257 	u64 status : 5;
258 	u64 reserved : PAGE_SHIFT - 5;
259 	u64 pfn : 52;
260 } __packed;
261 
262 struct vmballoon {
263 	/**
264 	 * @max_page_size: maximum supported page size for ballooning.
265 	 *
266 	 * Protected by @conf_sem
267 	 */
268 	enum vmballoon_page_size_type max_page_size;
269 
270 	/**
271 	 * @size: balloon actual size in basic page size (frames).
272 	 *
273 	 * While we currently do not support size which is bigger than 32-bit,
274 	 * in preparation for future support, use 64-bits.
275 	 */
276 	atomic64_t size;
277 
278 	/**
279 	 * @target: balloon target size in basic page size (frames).
280 	 *
281 	 * We do not protect the target under the assumption that setting the
282 	 * value is always done through a single write. If this assumption ever
283 	 * breaks, we would have to use X_ONCE for accesses, and suffer the less
284 	 * optimized code. Although we may read stale target value if multiple
285 	 * accesses happen at once, the performance impact should be minor.
286 	 */
287 	unsigned long target;
288 
289 	/**
290 	 * @reset_required: reset flag
291 	 *
292 	 * Setting this flag may introduce races, but the code is expected to
293 	 * handle them gracefully. In the worst case, another operation will
294 	 * fail as reset did not take place. Clearing the flag is done while
295 	 * holding @conf_sem for write.
296 	 */
297 	bool reset_required;
298 
299 	/**
300 	 * @capabilities: hypervisor balloon capabilities.
301 	 *
302 	 * Protected by @conf_sem.
303 	 */
304 	unsigned long capabilities;
305 
306 	/**
307 	 * @batch_page: pointer to communication batch page.
308 	 *
309 	 * When batching is used, batch_page points to a page, which holds up to
310 	 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
311 	 */
312 	struct vmballoon_batch_entry *batch_page;
313 
314 	/**
315 	 * @batch_max_pages: maximum pages that can be locked/unlocked.
316 	 *
317 	 * Indicates the number of pages that the hypervisor can lock or unlock
318 	 * at once, according to whether batching is enabled. If batching is
319 	 * disabled, only a single page can be locked/unlock on each operation.
320 	 *
321 	 * Protected by @conf_sem.
322 	 */
323 	unsigned int batch_max_pages;
324 
325 	/**
326 	 * @page: page to be locked/unlocked by the hypervisor
327 	 *
328 	 * @page is only used when batching is disabled and a single page is
329 	 * reclaimed on each iteration.
330 	 *
331 	 * Protected by @comm_lock.
332 	 */
333 	struct page *page;
334 
335 	/**
336 	 * @shrink_timeout: timeout until the next inflation.
337 	 *
338 	 * After an shrink event, indicates the time in jiffies after which
339 	 * inflation is allowed again. Can be written concurrently with reads,
340 	 * so must use READ_ONCE/WRITE_ONCE when accessing.
341 	 */
342 	unsigned long shrink_timeout;
343 
344 	/* statistics */
345 	struct vmballoon_stats *stats;
346 
347 	/**
348 	 * @b_dev_info: balloon device information descriptor.
349 	 */
350 	struct balloon_dev_info b_dev_info;
351 
352 	struct delayed_work dwork;
353 
354 	/**
355 	 * @huge_pages - list of the inflated 2MB pages.
356 	 *
357 	 * Protected by @b_dev_info.pages_lock .
358 	 */
359 	struct list_head huge_pages;
360 
361 	/**
362 	 * @vmci_doorbell.
363 	 *
364 	 * Protected by @conf_sem.
365 	 */
366 	struct vmci_handle vmci_doorbell;
367 
368 	/**
369 	 * @conf_sem: semaphore to protect the configuration and the statistics.
370 	 */
371 	struct rw_semaphore conf_sem;
372 
373 	/**
374 	 * @comm_lock: lock to protect the communication with the host.
375 	 *
376 	 * Lock ordering: @conf_sem -> @comm_lock .
377 	 */
378 	spinlock_t comm_lock;
379 
380 	/**
381 	 * @shrinker: shrinker interface that is used to avoid over-inflation.
382 	 */
383 	struct shrinker *shrinker;
384 };
385 
386 static struct vmballoon balloon;
387 
388 struct vmballoon_stats {
389 	/* timer / doorbell operations */
390 	atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
391 
392 	/* allocation statistics for huge and small pages */
393 	atomic64_t
394 	       page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
395 
396 	/* Monitor operations: total operations, and failures */
397 	atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
398 };
399 
is_vmballoon_stats_on(void)400 static inline bool is_vmballoon_stats_on(void)
401 {
402 	return IS_ENABLED(CONFIG_DEBUG_FS) &&
403 		static_branch_unlikely(&balloon_stat_enabled);
404 }
405 
vmballoon_stats_op_inc(struct vmballoon * b,unsigned int op,enum vmballoon_op_stat_type type)406 static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
407 					  enum vmballoon_op_stat_type type)
408 {
409 	if (is_vmballoon_stats_on())
410 		atomic64_inc(&b->stats->ops[op][type]);
411 }
412 
vmballoon_stats_gen_inc(struct vmballoon * b,enum vmballoon_stat_general stat)413 static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
414 					   enum vmballoon_stat_general stat)
415 {
416 	if (is_vmballoon_stats_on())
417 		atomic64_inc(&b->stats->general_stat[stat]);
418 }
419 
vmballoon_stats_gen_add(struct vmballoon * b,enum vmballoon_stat_general stat,unsigned int val)420 static inline void vmballoon_stats_gen_add(struct vmballoon *b,
421 					   enum vmballoon_stat_general stat,
422 					   unsigned int val)
423 {
424 	if (is_vmballoon_stats_on())
425 		atomic64_add(val, &b->stats->general_stat[stat]);
426 }
427 
vmballoon_stats_page_inc(struct vmballoon * b,enum vmballoon_stat_page stat,enum vmballoon_page_size_type size)428 static inline void vmballoon_stats_page_inc(struct vmballoon *b,
429 					    enum vmballoon_stat_page stat,
430 					    enum vmballoon_page_size_type size)
431 {
432 	if (is_vmballoon_stats_on())
433 		atomic64_inc(&b->stats->page_stat[stat][size]);
434 }
435 
vmballoon_stats_page_add(struct vmballoon * b,enum vmballoon_stat_page stat,enum vmballoon_page_size_type size,unsigned int val)436 static inline void vmballoon_stats_page_add(struct vmballoon *b,
437 					    enum vmballoon_stat_page stat,
438 					    enum vmballoon_page_size_type size,
439 					    unsigned int val)
440 {
441 	if (is_vmballoon_stats_on())
442 		atomic64_add(val, &b->stats->page_stat[stat][size]);
443 }
444 
445 static inline unsigned long
__vmballoon_cmd(struct vmballoon * b,unsigned long cmd,unsigned long arg1,unsigned long arg2,unsigned long * result)446 __vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
447 		unsigned long arg2, unsigned long *result)
448 {
449 	unsigned long status, dummy1, dummy2, dummy3, local_result;
450 
451 	vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
452 
453 	asm volatile ("inl %%dx" :
454 		"=a"(status),
455 		"=c"(dummy1),
456 		"=d"(dummy2),
457 		"=b"(local_result),
458 		"=S"(dummy3) :
459 		"0"(VMW_BALLOON_HV_MAGIC),
460 		"1"(cmd),
461 		"2"(VMW_BALLOON_HV_PORT),
462 		"3"(arg1),
463 		"4"(arg2) :
464 		"memory");
465 
466 	/* update the result if needed */
467 	if (result)
468 		*result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
469 							   local_result;
470 
471 	/* update target when applicable */
472 	if (status == VMW_BALLOON_SUCCESS &&
473 	    ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
474 		WRITE_ONCE(b->target, local_result);
475 
476 	if (status != VMW_BALLOON_SUCCESS &&
477 	    status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
478 		vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
479 		pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
480 			 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
481 			 status);
482 	}
483 
484 	/* mark reset required accordingly */
485 	if (status == VMW_BALLOON_ERROR_RESET)
486 		b->reset_required = true;
487 
488 	return status;
489 }
490 
491 static __always_inline unsigned long
vmballoon_cmd(struct vmballoon * b,unsigned long cmd,unsigned long arg1,unsigned long arg2)492 vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
493 	      unsigned long arg2)
494 {
495 	unsigned long dummy;
496 
497 	return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
498 }
499 
500 /*
501  * Send "start" command to the host, communicating supported version
502  * of the protocol.
503  */
vmballoon_send_start(struct vmballoon * b,unsigned long req_caps)504 static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
505 {
506 	unsigned long status, capabilities;
507 
508 	status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
509 				 &capabilities);
510 
511 	switch (status) {
512 	case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
513 		b->capabilities = capabilities;
514 		break;
515 	case VMW_BALLOON_SUCCESS:
516 		b->capabilities = VMW_BALLOON_BASIC_CMDS;
517 		break;
518 	default:
519 		return -EIO;
520 	}
521 
522 	/*
523 	 * 2MB pages are only supported with batching. If batching is for some
524 	 * reason disabled, do not use 2MB pages, since otherwise the legacy
525 	 * mechanism is used with 2MB pages, causing a failure.
526 	 */
527 	b->max_page_size = VMW_BALLOON_4K_PAGE;
528 	if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
529 	    (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
530 		b->max_page_size = VMW_BALLOON_2M_PAGE;
531 
532 
533 	return 0;
534 }
535 
536 /**
537  * vmballoon_send_guest_id - communicate guest type to the host.
538  *
539  * @b: pointer to the balloon.
540  *
541  * Communicate guest type to the host so that it can adjust ballooning
542  * algorithm to the one most appropriate for the guest. This command
543  * is normally issued after sending "start" command and is part of
544  * standard reset sequence.
545  *
546  * Return: zero on success or appropriate error code.
547  */
vmballoon_send_guest_id(struct vmballoon * b)548 static int vmballoon_send_guest_id(struct vmballoon *b)
549 {
550 	unsigned long status;
551 
552 	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
553 			       VMW_BALLOON_GUEST_ID, 0);
554 
555 	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
556 }
557 
558 /**
559  * vmballoon_page_order() - return the order of the page
560  * @page_size: the size of the page.
561  *
562  * Return: the allocation order.
563  */
564 static inline
vmballoon_page_order(enum vmballoon_page_size_type page_size)565 unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
566 {
567 	return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
568 }
569 
570 /**
571  * vmballoon_page_in_frames() - returns the number of frames in a page.
572  * @page_size: the size of the page.
573  *
574  * Return: the number of 4k frames.
575  */
576 static inline unsigned int
vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)577 vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
578 {
579 	return 1 << vmballoon_page_order(page_size);
580 }
581 
582 /**
583  * vmballoon_mark_page_offline() - mark a page as offline
584  * @page: pointer for the page.
585  * @page_size: the size of the page.
586  */
587 static void
vmballoon_mark_page_offline(struct page * page,enum vmballoon_page_size_type page_size)588 vmballoon_mark_page_offline(struct page *page,
589 			    enum vmballoon_page_size_type page_size)
590 {
591 	int i;
592 
593 	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
594 		__SetPageOffline(page + i);
595 }
596 
597 /**
598  * vmballoon_mark_page_online() - mark a page as online
599  * @page: pointer for the page.
600  * @page_size: the size of the page.
601  */
602 static void
vmballoon_mark_page_online(struct page * page,enum vmballoon_page_size_type page_size)603 vmballoon_mark_page_online(struct page *page,
604 			   enum vmballoon_page_size_type page_size)
605 {
606 	int i;
607 
608 	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
609 		__ClearPageOffline(page + i);
610 }
611 
612 /**
613  * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
614  *
615  * @b: pointer to the balloon.
616  *
617  * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
618  * by the host-guest protocol and EIO if an error occurred in communicating with
619  * the host.
620  */
vmballoon_send_get_target(struct vmballoon * b)621 static int vmballoon_send_get_target(struct vmballoon *b)
622 {
623 	unsigned long status;
624 	unsigned long limit;
625 
626 	limit = totalram_pages();
627 
628 	/* Ensure limit fits in 32-bits if 64-bit targets are not supported */
629 	if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
630 	    limit != (u32)limit)
631 		return -EINVAL;
632 
633 	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
634 
635 	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
636 }
637 
638 /**
639  * vmballoon_alloc_page_list - allocates a list of pages.
640  *
641  * @b: pointer to the balloon.
642  * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
643  * @req_n_pages: the number of requested pages.
644  *
645  * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
646  * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
647  *
648  * Return: zero on success or error code otherwise.
649  */
vmballoon_alloc_page_list(struct vmballoon * b,struct vmballoon_ctl * ctl,unsigned int req_n_pages)650 static int vmballoon_alloc_page_list(struct vmballoon *b,
651 				     struct vmballoon_ctl *ctl,
652 				     unsigned int req_n_pages)
653 {
654 	struct page *page;
655 	unsigned int i;
656 
657 	for (i = 0; i < req_n_pages; i++) {
658 		/*
659 		 * First check if we happen to have pages that were allocated
660 		 * before. This happens when 2MB page rejected during inflation
661 		 * by the hypervisor, and then split into 4KB pages.
662 		 */
663 		if (!list_empty(&ctl->prealloc_pages)) {
664 			page = list_first_entry(&ctl->prealloc_pages,
665 						struct page, lru);
666 			list_del(&page->lru);
667 		} else {
668 			if (ctl->page_size == VMW_BALLOON_2M_PAGE)
669 				page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
670 					__GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
671 			else
672 				page = balloon_page_alloc();
673 
674 			vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
675 						 ctl->page_size);
676 		}
677 
678 		if (page) {
679 			/* Success. Add the page to the list and continue. */
680 			list_add(&page->lru, &ctl->pages);
681 			continue;
682 		}
683 
684 		/* Allocation failed. Update statistics and stop. */
685 		vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
686 					 ctl->page_size);
687 		break;
688 	}
689 
690 	ctl->n_pages = i;
691 
692 	return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
693 }
694 
695 /**
696  * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
697  *
698  * @b: pointer for %struct vmballoon.
699  * @page: pointer for the page whose result should be handled.
700  * @page_size: size of the page.
701  * @status: status of the operation as provided by the hypervisor.
702  */
vmballoon_handle_one_result(struct vmballoon * b,struct page * page,enum vmballoon_page_size_type page_size,unsigned long status)703 static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
704 				       enum vmballoon_page_size_type page_size,
705 				       unsigned long status)
706 {
707 	/* On success do nothing. The page is already on the balloon list. */
708 	if (likely(status == VMW_BALLOON_SUCCESS))
709 		return 0;
710 
711 	pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
712 		 page_to_pfn(page), status,
713 		 vmballoon_page_size_names[page_size]);
714 
715 	/* Error occurred */
716 	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
717 				 page_size);
718 
719 	return -EIO;
720 }
721 
722 /**
723  * vmballoon_status_page - returns the status of (un)lock operation
724  *
725  * @b: pointer to the balloon.
726  * @idx: index for the page for which the operation is performed.
727  * @p: pointer to where the page struct is returned.
728  *
729  * Following a lock or unlock operation, returns the status of the operation for
730  * an individual page. Provides the page that the operation was performed on on
731  * the @page argument.
732  *
733  * Returns: The status of a lock or unlock operation for an individual page.
734  */
vmballoon_status_page(struct vmballoon * b,int idx,struct page ** p)735 static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
736 					   struct page **p)
737 {
738 	if (static_branch_likely(&vmw_balloon_batching)) {
739 		/* batching mode */
740 		*p = pfn_to_page(b->batch_page[idx].pfn);
741 		return b->batch_page[idx].status;
742 	}
743 
744 	/* non-batching mode */
745 	*p = b->page;
746 
747 	/*
748 	 * If a failure occurs, the indication will be provided in the status
749 	 * of the entire operation, which is considered before the individual
750 	 * page status. So for non-batching mode, the indication is always of
751 	 * success.
752 	 */
753 	return VMW_BALLOON_SUCCESS;
754 }
755 
756 /**
757  * vmballoon_lock_op - notifies the host about inflated/deflated pages.
758  * @b: pointer to the balloon.
759  * @num_pages: number of inflated/deflated pages.
760  * @page_size: size of the page.
761  * @op: the type of operation (lock or unlock).
762  *
763  * Notify the host about page(s) that were ballooned (or removed from the
764  * balloon) so that host can use it without fear that guest will need it (or
765  * stop using them since the VM does). Host may reject some pages, we need to
766  * check the return value and maybe submit a different page. The pages that are
767  * inflated/deflated are pointed by @b->page.
768  *
769  * Return: result as provided by the hypervisor.
770  */
vmballoon_lock_op(struct vmballoon * b,unsigned int num_pages,enum vmballoon_page_size_type page_size,enum vmballoon_op op)771 static unsigned long vmballoon_lock_op(struct vmballoon *b,
772 				       unsigned int num_pages,
773 				       enum vmballoon_page_size_type page_size,
774 				       enum vmballoon_op op)
775 {
776 	unsigned long cmd, pfn;
777 
778 	lockdep_assert_held(&b->comm_lock);
779 
780 	if (static_branch_likely(&vmw_balloon_batching)) {
781 		if (op == VMW_BALLOON_INFLATE)
782 			cmd = page_size == VMW_BALLOON_2M_PAGE ?
783 				VMW_BALLOON_CMD_BATCHED_2M_LOCK :
784 				VMW_BALLOON_CMD_BATCHED_LOCK;
785 		else
786 			cmd = page_size == VMW_BALLOON_2M_PAGE ?
787 				VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
788 				VMW_BALLOON_CMD_BATCHED_UNLOCK;
789 
790 		pfn = PHYS_PFN(virt_to_phys(b->batch_page));
791 	} else {
792 		cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
793 						  VMW_BALLOON_CMD_UNLOCK;
794 		pfn = page_to_pfn(b->page);
795 
796 		/* In non-batching mode, PFNs must fit in 32-bit */
797 		if (unlikely(pfn != (u32)pfn))
798 			return VMW_BALLOON_ERROR_PPN_INVALID;
799 	}
800 
801 	return vmballoon_cmd(b, cmd, pfn, num_pages);
802 }
803 
804 /**
805  * vmballoon_add_page - adds a page towards lock/unlock operation.
806  *
807  * @b: pointer to the balloon.
808  * @idx: index of the page to be ballooned in this batch.
809  * @p: pointer to the page that is about to be ballooned.
810  *
811  * Adds the page to be ballooned. Must be called while holding @comm_lock.
812  */
vmballoon_add_page(struct vmballoon * b,unsigned int idx,struct page * p)813 static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
814 			       struct page *p)
815 {
816 	lockdep_assert_held(&b->comm_lock);
817 
818 	if (static_branch_likely(&vmw_balloon_batching))
819 		b->batch_page[idx] = (struct vmballoon_batch_entry)
820 					{ .pfn = page_to_pfn(p) };
821 	else
822 		b->page = p;
823 }
824 
825 /**
826  * vmballoon_lock - lock or unlock a batch of pages.
827  *
828  * @b: pointer to the balloon.
829  * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
830  *
831  * Notifies the host of about ballooned pages (after inflation or deflation,
832  * according to @ctl). If the host rejects the page put it on the
833  * @ctl refuse list. These refused page are then released when moving to the
834  * next size of pages.
835  *
836  * Note that we neither free any @page here nor put them back on the ballooned
837  * pages list. Instead we queue it for later processing. We do that for several
838  * reasons. First, we do not want to free the page under the lock. Second, it
839  * allows us to unify the handling of lock and unlock. In the inflate case, the
840  * caller will check if there are too many refused pages and release them.
841  * Although it is not identical to the past behavior, it should not affect
842  * performance.
843  */
vmballoon_lock(struct vmballoon * b,struct vmballoon_ctl * ctl)844 static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
845 {
846 	unsigned long batch_status;
847 	struct page *page;
848 	unsigned int i, num_pages;
849 
850 	num_pages = ctl->n_pages;
851 	if (num_pages == 0)
852 		return 0;
853 
854 	/* communication with the host is done under the communication lock */
855 	spin_lock(&b->comm_lock);
856 
857 	i = 0;
858 	list_for_each_entry(page, &ctl->pages, lru)
859 		vmballoon_add_page(b, i++, page);
860 
861 	batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
862 					 ctl->op);
863 
864 	/*
865 	 * Iterate over the pages in the provided list. Since we are changing
866 	 * @ctl->n_pages we are saving the original value in @num_pages and
867 	 * use this value to bound the loop.
868 	 */
869 	for (i = 0; i < num_pages; i++) {
870 		unsigned long status;
871 
872 		status = vmballoon_status_page(b, i, &page);
873 
874 		/*
875 		 * Failure of the whole batch overrides a single operation
876 		 * results.
877 		 */
878 		if (batch_status != VMW_BALLOON_SUCCESS)
879 			status = batch_status;
880 
881 		/* Continue if no error happened */
882 		if (!vmballoon_handle_one_result(b, page, ctl->page_size,
883 						 status))
884 			continue;
885 
886 		/*
887 		 * Error happened. Move the pages to the refused list and update
888 		 * the pages number.
889 		 */
890 		list_move(&page->lru, &ctl->refused_pages);
891 		ctl->n_pages--;
892 		ctl->n_refused_pages++;
893 	}
894 
895 	spin_unlock(&b->comm_lock);
896 
897 	return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
898 }
899 
900 /**
901  * vmballoon_release_page_list() - Releases a page list
902  *
903  * @page_list: list of pages to release.
904  * @n_pages: pointer to the number of pages.
905  * @page_size: whether the pages in the list are 2MB (or else 4KB).
906  *
907  * Releases the list of pages and zeros the number of pages.
908  */
vmballoon_release_page_list(struct list_head * page_list,int * n_pages,enum vmballoon_page_size_type page_size)909 static void vmballoon_release_page_list(struct list_head *page_list,
910 				       int *n_pages,
911 				       enum vmballoon_page_size_type page_size)
912 {
913 	struct page *page, *tmp;
914 
915 	list_for_each_entry_safe(page, tmp, page_list, lru) {
916 		list_del(&page->lru);
917 		__free_pages(page, vmballoon_page_order(page_size));
918 	}
919 
920 	if (n_pages)
921 		*n_pages = 0;
922 }
923 
924 
925 /*
926  * Release pages that were allocated while attempting to inflate the
927  * balloon but were refused by the host for one reason or another.
928  */
vmballoon_release_refused_pages(struct vmballoon * b,struct vmballoon_ctl * ctl)929 static void vmballoon_release_refused_pages(struct vmballoon *b,
930 					    struct vmballoon_ctl *ctl)
931 {
932 	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
933 				 ctl->page_size);
934 
935 	vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
936 				    ctl->page_size);
937 }
938 
939 /**
940  * vmballoon_change - retrieve the required balloon change
941  *
942  * @b: pointer for the balloon.
943  *
944  * Return: the required change for the balloon size. A positive number
945  * indicates inflation, a negative number indicates a deflation.
946  */
vmballoon_change(struct vmballoon * b)947 static int64_t vmballoon_change(struct vmballoon *b)
948 {
949 	int64_t size, target;
950 
951 	size = atomic64_read(&b->size);
952 	target = READ_ONCE(b->target);
953 
954 	/*
955 	 * We must cast first because of int sizes
956 	 * Otherwise we might get huge positives instead of negatives
957 	 */
958 
959 	if (b->reset_required)
960 		return 0;
961 
962 	/* consider a 2MB slack on deflate, unless the balloon is emptied */
963 	if (target < size && target != 0 &&
964 	    size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
965 		return 0;
966 
967 	/* If an out-of-memory recently occurred, inflation is disallowed. */
968 	if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
969 		return 0;
970 
971 	return target - size;
972 }
973 
974 /**
975  * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
976  *
977  * @b: pointer to balloon.
978  * @pages: list of pages to enqueue.
979  * @n_pages: pointer to number of pages in list. The value is zeroed.
980  * @page_size: whether the pages are 2MB or 4KB pages.
981  *
982  * Enqueues the provides list of pages in the ballooned page list, clears the
983  * list and zeroes the number of pages that was provided.
984  */
vmballoon_enqueue_page_list(struct vmballoon * b,struct list_head * pages,unsigned int * n_pages,enum vmballoon_page_size_type page_size)985 static void vmballoon_enqueue_page_list(struct vmballoon *b,
986 					struct list_head *pages,
987 					unsigned int *n_pages,
988 					enum vmballoon_page_size_type page_size)
989 {
990 	unsigned long flags;
991 	struct page *page;
992 
993 	if (page_size == VMW_BALLOON_4K_PAGE) {
994 		balloon_page_list_enqueue(&b->b_dev_info, pages);
995 	} else {
996 		/*
997 		 * Keep the huge pages in a local list which is not available
998 		 * for the balloon compaction mechanism.
999 		 */
1000 		spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1001 
1002 		list_for_each_entry(page, pages, lru) {
1003 			vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1004 		}
1005 
1006 		list_splice_init(pages, &b->huge_pages);
1007 		__count_vm_events(BALLOON_INFLATE, *n_pages *
1008 				  vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1009 		spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1010 	}
1011 
1012 	*n_pages = 0;
1013 }
1014 
1015 /**
1016  * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
1017  *
1018  * @b: pointer to balloon.
1019  * @pages: list of pages to enqueue.
1020  * @n_pages: pointer to number of pages in list. The value is zeroed.
1021  * @page_size: whether the pages are 2MB or 4KB pages.
1022  * @n_req_pages: the number of requested pages.
1023  *
1024  * Dequeues the number of requested pages from the balloon for deflation. The
1025  * number of dequeued pages may be lower, if not enough pages in the requested
1026  * size are available.
1027  */
vmballoon_dequeue_page_list(struct vmballoon * b,struct list_head * pages,unsigned int * n_pages,enum vmballoon_page_size_type page_size,unsigned int n_req_pages)1028 static void vmballoon_dequeue_page_list(struct vmballoon *b,
1029 					struct list_head *pages,
1030 					unsigned int *n_pages,
1031 					enum vmballoon_page_size_type page_size,
1032 					unsigned int n_req_pages)
1033 {
1034 	struct page *page, *tmp;
1035 	unsigned int i = 0;
1036 	unsigned long flags;
1037 
1038 	/* In the case of 4k pages, use the compaction infrastructure */
1039 	if (page_size == VMW_BALLOON_4K_PAGE) {
1040 		*n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
1041 						     n_req_pages);
1042 		return;
1043 	}
1044 
1045 	/* 2MB pages */
1046 	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1047 	list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1048 		vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1049 
1050 		list_move(&page->lru, pages);
1051 		if (++i == n_req_pages)
1052 			break;
1053 	}
1054 
1055 	__count_vm_events(BALLOON_DEFLATE,
1056 			  i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1057 	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1058 	*n_pages = i;
1059 }
1060 
1061 /**
1062  * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
1063  *
1064  * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
1065  * due to one or few 4KB pages. These 2MB pages may keep being allocated and
1066  * then being refused. To prevent this case, this function splits the refused
1067  * pages into 4KB pages and adds them into @prealloc_pages list.
1068  *
1069  * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
1070  */
vmballoon_split_refused_pages(struct vmballoon_ctl * ctl)1071 static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
1072 {
1073 	struct page *page, *tmp;
1074 	unsigned int i, order;
1075 
1076 	order = vmballoon_page_order(ctl->page_size);
1077 
1078 	list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
1079 		list_del(&page->lru);
1080 		split_page(page, order);
1081 		for (i = 0; i < (1 << order); i++)
1082 			list_add(&page[i].lru, &ctl->prealloc_pages);
1083 	}
1084 	ctl->n_refused_pages = 0;
1085 }
1086 
1087 /**
1088  * vmballoon_inflate() - Inflate the balloon towards its target size.
1089  *
1090  * @b: pointer to the balloon.
1091  */
vmballoon_inflate(struct vmballoon * b)1092 static void vmballoon_inflate(struct vmballoon *b)
1093 {
1094 	int64_t to_inflate_frames;
1095 	struct vmballoon_ctl ctl = {
1096 		.pages = LIST_HEAD_INIT(ctl.pages),
1097 		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1098 		.prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
1099 		.page_size = b->max_page_size,
1100 		.op = VMW_BALLOON_INFLATE
1101 	};
1102 
1103 	while ((to_inflate_frames = vmballoon_change(b)) > 0) {
1104 		unsigned int to_inflate_pages, page_in_frames;
1105 		int alloc_error, lock_error = 0;
1106 
1107 		VM_BUG_ON(!list_empty(&ctl.pages));
1108 		VM_BUG_ON(ctl.n_pages != 0);
1109 
1110 		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1111 
1112 		to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
1113 					 DIV_ROUND_UP_ULL(to_inflate_frames,
1114 							  page_in_frames));
1115 
1116 		/* Start by allocating */
1117 		alloc_error = vmballoon_alloc_page_list(b, &ctl,
1118 							to_inflate_pages);
1119 
1120 		/* Actually lock the pages by telling the hypervisor */
1121 		lock_error = vmballoon_lock(b, &ctl);
1122 
1123 		/*
1124 		 * If an error indicates that something serious went wrong,
1125 		 * stop the inflation.
1126 		 */
1127 		if (lock_error)
1128 			break;
1129 
1130 		/* Update the balloon size */
1131 		atomic64_add(ctl.n_pages * page_in_frames, &b->size);
1132 
1133 		vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1134 					    ctl.page_size);
1135 
1136 		/*
1137 		 * If allocation failed or the number of refused pages exceeds
1138 		 * the maximum allowed, move to the next page size.
1139 		 */
1140 		if (alloc_error ||
1141 		    ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1142 			if (ctl.page_size == VMW_BALLOON_4K_PAGE)
1143 				break;
1144 
1145 			/*
1146 			 * Split the refused pages to 4k. This will also empty
1147 			 * the refused pages list.
1148 			 */
1149 			vmballoon_split_refused_pages(&ctl);
1150 			ctl.page_size--;
1151 		}
1152 
1153 		cond_resched();
1154 	}
1155 
1156 	/*
1157 	 * Release pages that were allocated while attempting to inflate the
1158 	 * balloon but were refused by the host for one reason or another,
1159 	 * and update the statistics.
1160 	 */
1161 	if (ctl.n_refused_pages != 0)
1162 		vmballoon_release_refused_pages(b, &ctl);
1163 
1164 	vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
1165 }
1166 
1167 /**
1168  * vmballoon_deflate() - Decrease the size of the balloon.
1169  *
1170  * @b: pointer to the balloon
1171  * @n_frames: the number of frames to deflate. If zero, automatically
1172  * calculated according to the target size.
1173  * @coordinated: whether to coordinate with the host
1174  *
1175  * Decrease the size of the balloon allowing guest to use more memory.
1176  *
1177  * Return: The number of deflated frames (i.e., basic page size units)
1178  */
vmballoon_deflate(struct vmballoon * b,uint64_t n_frames,bool coordinated)1179 static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1180 				       bool coordinated)
1181 {
1182 	unsigned long deflated_frames = 0;
1183 	unsigned long tried_frames = 0;
1184 	struct vmballoon_ctl ctl = {
1185 		.pages = LIST_HEAD_INIT(ctl.pages),
1186 		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1187 		.page_size = VMW_BALLOON_4K_PAGE,
1188 		.op = VMW_BALLOON_DEFLATE
1189 	};
1190 
1191 	/* free pages to reach target */
1192 	while (true) {
1193 		unsigned int to_deflate_pages, n_unlocked_frames;
1194 		unsigned int page_in_frames;
1195 		int64_t to_deflate_frames;
1196 		bool deflated_all;
1197 
1198 		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1199 
1200 		VM_BUG_ON(!list_empty(&ctl.pages));
1201 		VM_BUG_ON(ctl.n_pages);
1202 		VM_BUG_ON(!list_empty(&ctl.refused_pages));
1203 		VM_BUG_ON(ctl.n_refused_pages);
1204 
1205 		/*
1206 		 * If we were requested a specific number of frames, we try to
1207 		 * deflate this number of frames. Otherwise, deflation is
1208 		 * performed according to the target and balloon size.
1209 		 */
1210 		to_deflate_frames = n_frames ? n_frames - tried_frames :
1211 					       -vmballoon_change(b);
1212 
1213 		/* break if no work to do */
1214 		if (to_deflate_frames <= 0)
1215 			break;
1216 
1217 		/*
1218 		 * Calculate the number of frames based on current page size,
1219 		 * but limit the deflated frames to a single chunk
1220 		 */
1221 		to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1222 					 DIV_ROUND_UP_ULL(to_deflate_frames,
1223 							  page_in_frames));
1224 
1225 		/* First take the pages from the balloon pages. */
1226 		vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1227 					    ctl.page_size, to_deflate_pages);
1228 
1229 		/*
1230 		 * Before pages are moving to the refused list, count their
1231 		 * frames as frames that we tried to deflate.
1232 		 */
1233 		tried_frames += ctl.n_pages * page_in_frames;
1234 
1235 		/*
1236 		 * Unlock the pages by communicating with the hypervisor if the
1237 		 * communication is coordinated (i.e., not pop). We ignore the
1238 		 * return code. Instead we check if all the pages we manage to
1239 		 * unlock all the pages. If we failed, we will move to the next
1240 		 * page size, and would eventually try again later.
1241 		 */
1242 		if (coordinated)
1243 			vmballoon_lock(b, &ctl);
1244 
1245 		/*
1246 		 * Check if we deflated enough. We will move to the next page
1247 		 * size if we did not manage to do so. This calculation takes
1248 		 * place now, as once the pages are released, the number of
1249 		 * pages is zeroed.
1250 		 */
1251 		deflated_all = (ctl.n_pages == to_deflate_pages);
1252 
1253 		/* Update local and global counters */
1254 		n_unlocked_frames = ctl.n_pages * page_in_frames;
1255 		atomic64_sub(n_unlocked_frames, &b->size);
1256 		deflated_frames += n_unlocked_frames;
1257 
1258 		vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1259 					 ctl.page_size, ctl.n_pages);
1260 
1261 		/* free the ballooned pages */
1262 		vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1263 					    ctl.page_size);
1264 
1265 		/* Return the refused pages to the ballooned list. */
1266 		vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1267 					    &ctl.n_refused_pages,
1268 					    ctl.page_size);
1269 
1270 		/* If we failed to unlock all the pages, move to next size. */
1271 		if (!deflated_all) {
1272 			if (ctl.page_size == b->max_page_size)
1273 				break;
1274 			ctl.page_size++;
1275 		}
1276 
1277 		cond_resched();
1278 	}
1279 
1280 	return deflated_frames;
1281 }
1282 
1283 /**
1284  * vmballoon_deinit_batching - disables batching mode.
1285  *
1286  * @b: pointer to &struct vmballoon.
1287  *
1288  * Disables batching, by deallocating the page for communication with the
1289  * hypervisor and disabling the static key to indicate that batching is off.
1290  */
vmballoon_deinit_batching(struct vmballoon * b)1291 static void vmballoon_deinit_batching(struct vmballoon *b)
1292 {
1293 	free_page((unsigned long)b->batch_page);
1294 	b->batch_page = NULL;
1295 	static_branch_disable(&vmw_balloon_batching);
1296 	b->batch_max_pages = 1;
1297 }
1298 
1299 /**
1300  * vmballoon_init_batching - enable batching mode.
1301  *
1302  * @b: pointer to &struct vmballoon.
1303  *
1304  * Enables batching, by allocating a page for communication with the hypervisor
1305  * and enabling the static_key to use batching.
1306  *
1307  * Return: zero on success or an appropriate error-code.
1308  */
vmballoon_init_batching(struct vmballoon * b)1309 static int vmballoon_init_batching(struct vmballoon *b)
1310 {
1311 	struct page *page;
1312 
1313 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1314 	if (!page)
1315 		return -ENOMEM;
1316 
1317 	b->batch_page = page_address(page);
1318 	b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1319 
1320 	static_branch_enable(&vmw_balloon_batching);
1321 
1322 	return 0;
1323 }
1324 
1325 /*
1326  * Receive notification and resize balloon
1327  */
vmballoon_doorbell(void * client_data)1328 static void vmballoon_doorbell(void *client_data)
1329 {
1330 	struct vmballoon *b = client_data;
1331 
1332 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
1333 
1334 	mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1335 }
1336 
1337 /*
1338  * Clean up vmci doorbell
1339  */
vmballoon_vmci_cleanup(struct vmballoon * b)1340 static void vmballoon_vmci_cleanup(struct vmballoon *b)
1341 {
1342 	vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1343 		      VMCI_INVALID_ID, VMCI_INVALID_ID);
1344 
1345 	if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1346 		vmci_doorbell_destroy(b->vmci_doorbell);
1347 		b->vmci_doorbell = VMCI_INVALID_HANDLE;
1348 	}
1349 }
1350 
1351 /**
1352  * vmballoon_vmci_init - Initialize vmci doorbell.
1353  *
1354  * @b: pointer to the balloon.
1355  *
1356  * Return: zero on success or when wakeup command not supported. Error-code
1357  * otherwise.
1358  *
1359  * Initialize vmci doorbell, to get notified as soon as balloon changes.
1360  */
vmballoon_vmci_init(struct vmballoon * b)1361 static int vmballoon_vmci_init(struct vmballoon *b)
1362 {
1363 	unsigned long error;
1364 
1365 	if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1366 		return 0;
1367 
1368 	error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1369 				     VMCI_PRIVILEGE_FLAG_RESTRICTED,
1370 				     vmballoon_doorbell, b);
1371 
1372 	if (error != VMCI_SUCCESS)
1373 		goto fail;
1374 
1375 	error =	__vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1376 				b->vmci_doorbell.context,
1377 				b->vmci_doorbell.resource, NULL);
1378 
1379 	if (error != VMW_BALLOON_SUCCESS)
1380 		goto fail;
1381 
1382 	return 0;
1383 fail:
1384 	vmballoon_vmci_cleanup(b);
1385 	return -EIO;
1386 }
1387 
1388 /**
1389  * vmballoon_pop - Quickly release all pages allocate for the balloon.
1390  *
1391  * @b: pointer to the balloon.
1392  *
1393  * This function is called when host decides to "reset" balloon for one reason
1394  * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1395  * pages being released.
1396  */
vmballoon_pop(struct vmballoon * b)1397 static void vmballoon_pop(struct vmballoon *b)
1398 {
1399 	unsigned long size;
1400 
1401 	while ((size = atomic64_read(&b->size)))
1402 		vmballoon_deflate(b, size, false);
1403 }
1404 
1405 /*
1406  * Perform standard reset sequence by popping the balloon (in case it
1407  * is not  empty) and then restarting protocol. This operation normally
1408  * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1409  */
vmballoon_reset(struct vmballoon * b)1410 static void vmballoon_reset(struct vmballoon *b)
1411 {
1412 	int error;
1413 
1414 	down_write(&b->conf_sem);
1415 
1416 	vmballoon_vmci_cleanup(b);
1417 
1418 	/* free all pages, skipping monitor unlock */
1419 	vmballoon_pop(b);
1420 
1421 	if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1422 		goto unlock;
1423 
1424 	if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1425 		if (vmballoon_init_batching(b)) {
1426 			/*
1427 			 * We failed to initialize batching, inform the monitor
1428 			 * about it by sending a null capability.
1429 			 *
1430 			 * The guest will retry in one second.
1431 			 */
1432 			vmballoon_send_start(b, 0);
1433 			goto unlock;
1434 		}
1435 	} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1436 		vmballoon_deinit_batching(b);
1437 	}
1438 
1439 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1440 	b->reset_required = false;
1441 
1442 	error = vmballoon_vmci_init(b);
1443 	if (error)
1444 		pr_err_once("failed to initialize vmci doorbell\n");
1445 
1446 	if (vmballoon_send_guest_id(b))
1447 		pr_err_once("failed to send guest ID to the host\n");
1448 
1449 unlock:
1450 	up_write(&b->conf_sem);
1451 }
1452 
1453 /**
1454  * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1455  *
1456  * @work: pointer to the &work_struct which is provided by the workqueue.
1457  *
1458  * Resets the protocol if needed, gets the new size and adjusts balloon as
1459  * needed. Repeat in 1 sec.
1460  */
vmballoon_work(struct work_struct * work)1461 static void vmballoon_work(struct work_struct *work)
1462 {
1463 	struct delayed_work *dwork = to_delayed_work(work);
1464 	struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1465 	int64_t change = 0;
1466 
1467 	if (b->reset_required)
1468 		vmballoon_reset(b);
1469 
1470 	down_read(&b->conf_sem);
1471 
1472 	/*
1473 	 * Update the stats while holding the semaphore to ensure that
1474 	 * @stats_enabled is consistent with whether the stats are actually
1475 	 * enabled
1476 	 */
1477 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1478 
1479 	if (!vmballoon_send_get_target(b))
1480 		change = vmballoon_change(b);
1481 
1482 	if (change != 0) {
1483 		pr_debug("%s - size: %llu, target %lu\n", __func__,
1484 			 atomic64_read(&b->size), READ_ONCE(b->target));
1485 
1486 		if (change > 0)
1487 			vmballoon_inflate(b);
1488 		else  /* (change < 0) */
1489 			vmballoon_deflate(b, 0, true);
1490 	}
1491 
1492 	up_read(&b->conf_sem);
1493 
1494 	/*
1495 	 * We are using a freezable workqueue so that balloon operations are
1496 	 * stopped while the system transitions to/from sleep/hibernation.
1497 	 */
1498 	queue_delayed_work(system_freezable_wq,
1499 			   dwork, round_jiffies_relative(HZ));
1500 
1501 }
1502 
1503 /**
1504  * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
1505  * @shrinker: pointer to the balloon shrinker.
1506  * @sc: page reclaim information.
1507  *
1508  * Returns: number of pages that were freed during deflation.
1509  */
vmballoon_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)1510 static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
1511 					     struct shrink_control *sc)
1512 {
1513 	struct vmballoon *b = &balloon;
1514 	unsigned long deflated_frames;
1515 
1516 	pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
1517 
1518 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
1519 
1520 	/*
1521 	 * If the lock is also contended for read, we cannot easily reclaim and
1522 	 * we bail out.
1523 	 */
1524 	if (!down_read_trylock(&b->conf_sem))
1525 		return 0;
1526 
1527 	deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
1528 
1529 	vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
1530 				deflated_frames);
1531 
1532 	/*
1533 	 * Delay future inflation for some time to mitigate the situations in
1534 	 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
1535 	 * the access is asynchronous.
1536 	 */
1537 	WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
1538 
1539 	up_read(&b->conf_sem);
1540 
1541 	return deflated_frames;
1542 }
1543 
1544 /**
1545  * vmballoon_shrinker_count() - return the number of ballooned pages.
1546  * @shrinker: pointer to the balloon shrinker.
1547  * @sc: page reclaim information.
1548  *
1549  * Returns: number of 4k pages that are allocated for the balloon and can
1550  *	    therefore be reclaimed under pressure.
1551  */
vmballoon_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)1552 static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
1553 					      struct shrink_control *sc)
1554 {
1555 	struct vmballoon *b = &balloon;
1556 
1557 	return atomic64_read(&b->size);
1558 }
1559 
vmballoon_unregister_shrinker(struct vmballoon * b)1560 static void vmballoon_unregister_shrinker(struct vmballoon *b)
1561 {
1562 	shrinker_free(b->shrinker);
1563 	b->shrinker = NULL;
1564 }
1565 
vmballoon_register_shrinker(struct vmballoon * b)1566 static int vmballoon_register_shrinker(struct vmballoon *b)
1567 {
1568 	/* Do nothing if the shrinker is not enabled */
1569 	if (!vmwballoon_shrinker_enable)
1570 		return 0;
1571 
1572 	b->shrinker = shrinker_alloc(0, "vmw-balloon");
1573 	if (!b->shrinker)
1574 		return -ENOMEM;
1575 
1576 	b->shrinker->scan_objects = vmballoon_shrinker_scan;
1577 	b->shrinker->count_objects = vmballoon_shrinker_count;
1578 	b->shrinker->private_data = b;
1579 
1580 	shrinker_register(b->shrinker);
1581 
1582 	return 0;
1583 }
1584 
1585 /*
1586  * DEBUGFS Interface
1587  */
1588 #ifdef CONFIG_DEBUG_FS
1589 
1590 static const char * const vmballoon_stat_page_names[] = {
1591 	[VMW_BALLOON_PAGE_STAT_ALLOC]		= "alloc",
1592 	[VMW_BALLOON_PAGE_STAT_ALLOC_FAIL]	= "allocFail",
1593 	[VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC]	= "errAlloc",
1594 	[VMW_BALLOON_PAGE_STAT_REFUSED_FREE]	= "errFree",
1595 	[VMW_BALLOON_PAGE_STAT_FREE]		= "free"
1596 };
1597 
1598 static const char * const vmballoon_stat_names[] = {
1599 	[VMW_BALLOON_STAT_TIMER]		= "timer",
1600 	[VMW_BALLOON_STAT_DOORBELL]		= "doorbell",
1601 	[VMW_BALLOON_STAT_RESET]		= "reset",
1602 	[VMW_BALLOON_STAT_SHRINK]		= "shrink",
1603 	[VMW_BALLOON_STAT_SHRINK_FREE]		= "shrinkFree"
1604 };
1605 
vmballoon_enable_stats(struct vmballoon * b)1606 static int vmballoon_enable_stats(struct vmballoon *b)
1607 {
1608 	int r = 0;
1609 
1610 	down_write(&b->conf_sem);
1611 
1612 	/* did we somehow race with another reader which enabled stats? */
1613 	if (b->stats)
1614 		goto out;
1615 
1616 	b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1617 
1618 	if (!b->stats) {
1619 		/* allocation failed */
1620 		r = -ENOMEM;
1621 		goto out;
1622 	}
1623 	static_key_enable(&balloon_stat_enabled.key);
1624 out:
1625 	up_write(&b->conf_sem);
1626 	return r;
1627 }
1628 
1629 /**
1630  * vmballoon_debug_show - shows statistics of balloon operations.
1631  * @f: pointer to the &struct seq_file.
1632  * @offset: ignored.
1633  *
1634  * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1635  * To avoid the overhead - mainly that of memory - of collecting the statistics,
1636  * we only collect statistics after the first time the counters are read.
1637  *
1638  * Return: zero on success or an error code.
1639  */
vmballoon_debug_show(struct seq_file * f,void * offset)1640 static int vmballoon_debug_show(struct seq_file *f, void *offset)
1641 {
1642 	struct vmballoon *b = f->private;
1643 	int i, j;
1644 
1645 	/* enables stats if they are disabled */
1646 	if (!b->stats) {
1647 		int r = vmballoon_enable_stats(b);
1648 
1649 		if (r)
1650 			return r;
1651 	}
1652 
1653 	/* format capabilities info */
1654 	seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1655 		   VMW_BALLOON_CAPABILITIES);
1656 	seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1657 	seq_printf(f, "%-22s: %16s\n", "is resetting",
1658 		   b->reset_required ? "y" : "n");
1659 
1660 	/* format size info */
1661 	seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1662 	seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
1663 
1664 	for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1665 		if (vmballoon_cmd_names[i] == NULL)
1666 			continue;
1667 
1668 		seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1669 			   vmballoon_cmd_names[i],
1670 			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1671 			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1672 	}
1673 
1674 	for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1675 		seq_printf(f, "%-22s: %16llu\n",
1676 			   vmballoon_stat_names[i],
1677 			   atomic64_read(&b->stats->general_stat[i]));
1678 
1679 	for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1680 		for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1681 			seq_printf(f, "%-18s(%s): %16llu\n",
1682 				   vmballoon_stat_page_names[i],
1683 				   vmballoon_page_size_names[j],
1684 				   atomic64_read(&b->stats->page_stat[i][j]));
1685 	}
1686 
1687 	return 0;
1688 }
1689 
1690 DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1691 
vmballoon_debugfs_init(struct vmballoon * b)1692 static void __init vmballoon_debugfs_init(struct vmballoon *b)
1693 {
1694 	debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1695 			    &vmballoon_debug_fops);
1696 }
1697 
vmballoon_debugfs_exit(struct vmballoon * b)1698 static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1699 {
1700 	static_key_disable(&balloon_stat_enabled.key);
1701 	debugfs_lookup_and_remove("vmmemctl", NULL);
1702 	kfree(b->stats);
1703 	b->stats = NULL;
1704 }
1705 
1706 #else
1707 
vmballoon_debugfs_init(struct vmballoon * b)1708 static inline void vmballoon_debugfs_init(struct vmballoon *b)
1709 {
1710 }
1711 
vmballoon_debugfs_exit(struct vmballoon * b)1712 static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1713 {
1714 }
1715 
1716 #endif	/* CONFIG_DEBUG_FS */
1717 
1718 
1719 #ifdef CONFIG_BALLOON_COMPACTION
1720 /**
1721  * vmballoon_migratepage() - migrates a balloon page.
1722  * @b_dev_info: balloon device information descriptor.
1723  * @newpage: the page to which @page should be migrated.
1724  * @page: a ballooned page that should be migrated.
1725  * @mode: migration mode, ignored.
1726  *
1727  * This function is really open-coded, but that is according to the interface
1728  * that balloon_compaction provides.
1729  *
1730  * Return: zero on success, -EAGAIN when migration cannot be performed
1731  *	   momentarily, and -EBUSY if migration failed and should be retried
1732  *	   with that specific page.
1733  */
vmballoon_migratepage(struct balloon_dev_info * b_dev_info,struct page * newpage,struct page * page,enum migrate_mode mode)1734 static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
1735 				 struct page *newpage, struct page *page,
1736 				 enum migrate_mode mode)
1737 {
1738 	unsigned long status, flags;
1739 	struct vmballoon *b;
1740 	int ret;
1741 
1742 	b = container_of(b_dev_info, struct vmballoon, b_dev_info);
1743 
1744 	/*
1745 	 * If the semaphore is taken, there is ongoing configuration change
1746 	 * (i.e., balloon reset), so try again.
1747 	 */
1748 	if (!down_read_trylock(&b->conf_sem))
1749 		return -EAGAIN;
1750 
1751 	spin_lock(&b->comm_lock);
1752 	/*
1753 	 * We must start by deflating and not inflating, as otherwise the
1754 	 * hypervisor may tell us that it has enough memory and the new page is
1755 	 * not needed. Since the old page is isolated, we cannot use the list
1756 	 * interface to unlock it, as the LRU field is used for isolation.
1757 	 * Instead, we use the native interface directly.
1758 	 */
1759 	vmballoon_add_page(b, 0, page);
1760 	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1761 				   VMW_BALLOON_DEFLATE);
1762 
1763 	if (status == VMW_BALLOON_SUCCESS)
1764 		status = vmballoon_status_page(b, 0, &page);
1765 
1766 	/*
1767 	 * If a failure happened, let the migration mechanism know that it
1768 	 * should not retry.
1769 	 */
1770 	if (status != VMW_BALLOON_SUCCESS) {
1771 		spin_unlock(&b->comm_lock);
1772 		ret = -EBUSY;
1773 		goto out_unlock;
1774 	}
1775 
1776 	/*
1777 	 * The page is isolated, so it is safe to delete it without holding
1778 	 * @pages_lock . We keep holding @comm_lock since we will need it in a
1779 	 * second.
1780 	 */
1781 	balloon_page_delete(page);
1782 
1783 	put_page(page);
1784 
1785 	/* Inflate */
1786 	vmballoon_add_page(b, 0, newpage);
1787 	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1788 				   VMW_BALLOON_INFLATE);
1789 
1790 	if (status == VMW_BALLOON_SUCCESS)
1791 		status = vmballoon_status_page(b, 0, &newpage);
1792 
1793 	spin_unlock(&b->comm_lock);
1794 
1795 	if (status != VMW_BALLOON_SUCCESS) {
1796 		/*
1797 		 * A failure happened. While we can deflate the page we just
1798 		 * inflated, this deflation can also encounter an error. Instead
1799 		 * we will decrease the size of the balloon to reflect the
1800 		 * change and report failure.
1801 		 */
1802 		atomic64_dec(&b->size);
1803 		ret = -EBUSY;
1804 	} else {
1805 		/*
1806 		 * Success. Take a reference for the page, and we will add it to
1807 		 * the list after acquiring the lock.
1808 		 */
1809 		get_page(newpage);
1810 		ret = MIGRATEPAGE_SUCCESS;
1811 	}
1812 
1813 	/* Update the balloon list under the @pages_lock */
1814 	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1815 
1816 	/*
1817 	 * On inflation success, we already took a reference for the @newpage.
1818 	 * If we succeed just insert it to the list and update the statistics
1819 	 * under the lock.
1820 	 */
1821 	if (ret == MIGRATEPAGE_SUCCESS) {
1822 		balloon_page_insert(&b->b_dev_info, newpage);
1823 		__count_vm_event(BALLOON_MIGRATE);
1824 	}
1825 
1826 	/*
1827 	 * We deflated successfully, so regardless to the inflation success, we
1828 	 * need to reduce the number of isolated_pages.
1829 	 */
1830 	b->b_dev_info.isolated_pages--;
1831 	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1832 
1833 out_unlock:
1834 	up_read(&b->conf_sem);
1835 	return ret;
1836 }
1837 
1838 /**
1839  * vmballoon_compaction_init() - initialized compaction for the balloon.
1840  *
1841  * @b: pointer to the balloon.
1842  *
1843  * If during the initialization a failure occurred, this function does not
1844  * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
1845  * case.
1846  *
1847  * Return: zero on success or error code on failure.
1848  */
vmballoon_compaction_init(struct vmballoon * b)1849 static __init void vmballoon_compaction_init(struct vmballoon *b)
1850 {
1851 	b->b_dev_info.migratepage = vmballoon_migratepage;
1852 }
1853 
1854 #else /* CONFIG_BALLOON_COMPACTION */
vmballoon_compaction_init(struct vmballoon * b)1855 static inline void vmballoon_compaction_init(struct vmballoon *b)
1856 {
1857 }
1858 #endif /* CONFIG_BALLOON_COMPACTION */
1859 
vmballoon_init(void)1860 static int __init vmballoon_init(void)
1861 {
1862 	int error;
1863 
1864 	/*
1865 	 * Check if we are running on VMware's hypervisor and bail out
1866 	 * if we are not.
1867 	 */
1868 	if (x86_hyper_type != X86_HYPER_VMWARE)
1869 		return -ENODEV;
1870 
1871 	INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1872 
1873 	error = vmballoon_register_shrinker(&balloon);
1874 	if (error)
1875 		return error;
1876 
1877 	/*
1878 	 * Initialization of compaction must be done after the call to
1879 	 * balloon_devinfo_init() .
1880 	 */
1881 	balloon_devinfo_init(&balloon.b_dev_info);
1882 	vmballoon_compaction_init(&balloon);
1883 
1884 	INIT_LIST_HEAD(&balloon.huge_pages);
1885 	spin_lock_init(&balloon.comm_lock);
1886 	init_rwsem(&balloon.conf_sem);
1887 	balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1888 	balloon.batch_page = NULL;
1889 	balloon.page = NULL;
1890 	balloon.reset_required = true;
1891 
1892 	queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1893 
1894 	vmballoon_debugfs_init(&balloon);
1895 
1896 	return 0;
1897 }
1898 
1899 /*
1900  * Using late_initcall() instead of module_init() allows the balloon to use the
1901  * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1902  * VMCI is probed only after the balloon is initialized. If the balloon is used
1903  * as a module, late_initcall() is equivalent to module_init().
1904  */
1905 late_initcall(vmballoon_init);
1906 
vmballoon_exit(void)1907 static void __exit vmballoon_exit(void)
1908 {
1909 	vmballoon_unregister_shrinker(&balloon);
1910 	vmballoon_vmci_cleanup(&balloon);
1911 	cancel_delayed_work_sync(&balloon.dwork);
1912 
1913 	vmballoon_debugfs_exit(&balloon);
1914 
1915 	/*
1916 	 * Deallocate all reserved memory, and reset connection with monitor.
1917 	 * Reset connection before deallocating memory to avoid potential for
1918 	 * additional spurious resets from guest touching deallocated pages.
1919 	 */
1920 	vmballoon_send_start(&balloon, 0);
1921 	vmballoon_pop(&balloon);
1922 }
1923 module_exit(vmballoon_exit);
1924