vmw_balloon.c 31 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Dmitry Torokhov's avatar
Dmitry Torokhov committed
2 3 4
/*
 * VMware Balloon driver.
 *
5
 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
Dmitry Torokhov's avatar
Dmitry Torokhov committed
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * This is VMware physical memory management driver for Linux. The driver
 * acts like a "balloon" that can be inflated to reclaim physical pages by
 * reserving them in the guest and invalidating them in the monitor,
 * freeing up the underlying machine pages so they can be allocated to
 * other guests.  The balloon can also be deflated to allow the guest to
 * use more physical memory. Higher level policies can control the sizes
 * of balloons in VMs in order to manage physical memory resources.
 */

//#define DEBUG
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
22
#include <linux/vmalloc.h>
Dmitry Torokhov's avatar
Dmitry Torokhov committed
23 24 25 26 27
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
28 29
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
30
#include <asm/hypervisor.h>
Dmitry Torokhov's avatar
Dmitry Torokhov committed
31 32 33

MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
34
MODULE_VERSION("1.5.0.0-k");
Dmitry Torokhov's avatar
Dmitry Torokhov committed
35 36 37 38 39 40
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");

/*
 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
41
 * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
Dmitry Torokhov's avatar
Dmitry Torokhov committed
42 43 44 45 46 47 48 49 50 51 52 53 54
 * __GFP_NOWARN, to suppress page allocation failure warnings.
 */
#define VMW_PAGE_ALLOC_NOSLEEP		(__GFP_HIGHMEM|__GFP_NOWARN)

/*
 * Use GFP_HIGHUSER when executing in a separate kernel thread
 * context and allocation can sleep.  This is less stressful to
 * the guest memory system, since it allows the thread to block
 * while memory is reclaimed, and won't take pages from emergency
 * low-memory pools.
 */
#define VMW_PAGE_ALLOC_CANSLEEP		(GFP_HIGHUSER)

55 56
/* Maximum number of refused pages we accumulate during inflation cycle */
#define VMW_BALLOON_MAX_REFUSED		16
Dmitry Torokhov's avatar
Dmitry Torokhov committed
57 58 59 60 61 62 63 64

/*
 * Hypervisor communication port definitions.
 */
#define VMW_BALLOON_HV_PORT		0x5670
#define VMW_BALLOON_HV_MAGIC		0x456c6d6f
#define VMW_BALLOON_GUEST_ID		1	/* Linux */

65 66 67 68
enum vmwballoon_capabilities {
	/*
	 * Bit 0 is reserved and not associated to any capability.
	 */
69 70 71 72
	VMW_BALLOON_BASIC_CMDS			= (1 << 1),
	VMW_BALLOON_BATCHED_CMDS		= (1 << 2),
	VMW_BALLOON_BATCHED_2M_CMDS		= (1 << 3),
	VMW_BALLOON_SIGNALLED_WAKEUP_CMD	= (1 << 4),
73 74
};

75
#define VMW_BALLOON_CAPABILITIES	(VMW_BALLOON_BASIC_CMDS \
76
					| VMW_BALLOON_BATCHED_CMDS \
77 78
					| VMW_BALLOON_BATCHED_2M_CMDS \
					| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
79 80 81

#define VMW_BALLOON_2M_SHIFT		(9)
#define VMW_BALLOON_NUM_PAGE_SIZES	(2)
82

83 84 85 86 87 88 89 90 91
/*
 * Backdoor commands availability:
 *
 * START, GET_TARGET and GUEST_ID are always available,
 *
 * VMW_BALLOON_BASIC_CMDS:
 *	LOCK and UNLOCK commands,
 * VMW_BALLOON_BATCHED_CMDS:
 *	BATCHED_LOCK and BATCHED_UNLOCK commands.
92
 * VMW BALLOON_BATCHED_2M_CMDS:
93 94 95
 *	BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
 * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
 *	VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
96
 */
97 98 99 100 101 102 103 104 105
#define VMW_BALLOON_CMD_START			0
#define VMW_BALLOON_CMD_GET_TARGET		1
#define VMW_BALLOON_CMD_LOCK			2
#define VMW_BALLOON_CMD_UNLOCK			3
#define VMW_BALLOON_CMD_GUEST_ID		4
#define VMW_BALLOON_CMD_BATCHED_LOCK		6
#define VMW_BALLOON_CMD_BATCHED_UNLOCK		7
#define VMW_BALLOON_CMD_BATCHED_2M_LOCK		8
#define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK	9
106
#define VMW_BALLOON_CMD_VMCI_DOORBELL_SET	10
107

108
#define VMW_BALLOON_CMD_NUM			11
Dmitry Torokhov's avatar
Dmitry Torokhov committed
109 110

/* error codes */
111 112 113 114 115 116 117 118 119 120 121 122 123
#define VMW_BALLOON_SUCCESS		        0
#define VMW_BALLOON_FAILURE		        -1
#define VMW_BALLOON_ERROR_CMD_INVALID	        1
#define VMW_BALLOON_ERROR_PPN_INVALID	        2
#define VMW_BALLOON_ERROR_PPN_LOCKED	        3
#define VMW_BALLOON_ERROR_PPN_UNLOCKED	        4
#define VMW_BALLOON_ERROR_PPN_PINNED	        5
#define VMW_BALLOON_ERROR_PPN_NOTNEEDED	        6
#define VMW_BALLOON_ERROR_RESET		        7
#define VMW_BALLOON_ERROR_BUSY		        8

#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES	(0x03000000)

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
/* Batch page description */

/*
 * Layout of a page in the batch page:
 *
 * +-------------+----------+--------+
 * |             |          |        |
 * | Page number | Reserved | Status |
 * |             |          |        |
 * +-------------+----------+--------+
 * 64  PAGE_SHIFT          6         0
 *
 * The reserved field should be set to 0.
 */
#define VMW_BALLOON_BATCH_MAX_PAGES	(PAGE_SIZE / sizeof(u64))
#define VMW_BALLOON_BATCH_STATUS_MASK	((1UL << 5) - 1)
#define VMW_BALLOON_BATCH_PAGE_MASK	(~((1UL << PAGE_SHIFT) - 1))

142 143 144 145 146 147 148 149 150
#define VMW_BALLOON_CMD_WITH_TARGET_MASK			\
	((1UL << VMW_BALLOON_CMD_GET_TARGET)		|	\
	 (1UL << VMW_BALLOON_CMD_LOCK)			|	\
	 (1UL << VMW_BALLOON_CMD_UNLOCK)		|	\
	 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK)		|	\
	 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK)	|	\
	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK)	|	\
	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))

151 152 153 154 155 156 157 158 159 160 161 162 163
static const char * const vmballoon_cmd_names[] = {
	[VMW_BALLOON_CMD_START]			= "start",
	[VMW_BALLOON_CMD_GET_TARGET]		= "target",
	[VMW_BALLOON_CMD_LOCK]			= "lock",
	[VMW_BALLOON_CMD_UNLOCK]		= "unlock",
	[VMW_BALLOON_CMD_GUEST_ID]		= "guestType",
	[VMW_BALLOON_CMD_BATCHED_LOCK]		= "batchLock",
	[VMW_BALLOON_CMD_BATCHED_UNLOCK]	= "batchUnlock",
	[VMW_BALLOON_CMD_BATCHED_2M_LOCK]	= "2m-lock",
	[VMW_BALLOON_CMD_BATCHED_2M_UNLOCK]	= "2m-unlock",
	[VMW_BALLOON_CMD_VMCI_DOORBELL_SET]	= "doorbellSet"
};

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
struct vmballoon_batch_page {
	u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
};

static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
{
	return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
}

static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
				int idx)
{
	return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
}

static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
				u64 pa)
{
	batch->pages[idx] = pa;
}

Dmitry Torokhov's avatar
Dmitry Torokhov committed
185 186 187
#ifdef CONFIG_DEBUG_FS
struct vmballoon_stats {
	unsigned int timer;
188
	unsigned int doorbell;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
189

190
	/* allocation statistics */
191 192
	unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
	unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
Dmitry Torokhov's avatar
Dmitry Torokhov committed
193 194
	unsigned int sleep_alloc;
	unsigned int sleep_alloc_fail;
195 196 197
	unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
	unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
	unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
Dmitry Torokhov's avatar
Dmitry Torokhov committed
198

199 200 201
	/* Monitor operations.  */
	unsigned long ops[VMW_BALLOON_CMD_NUM];
	unsigned long ops_fail[VMW_BALLOON_CMD_NUM];
Dmitry Torokhov's avatar
Dmitry Torokhov committed
202 203 204 205 206 207 208
};

#define STATS_INC(stat) (stat)++
#else
#define STATS_INC(stat)
#endif

209 210 211 212
struct vmballoon;

struct vmballoon_ops {
	void (*add_page)(struct vmballoon *b, int idx, struct page *p);
213
	int (*lock)(struct vmballoon *b, unsigned int num_pages,
214
		    bool is_2m_pages);
215
	int (*unlock)(struct vmballoon *b, unsigned int num_pages,
216
		      bool is_2m_pages);
217 218
};

219
struct vmballoon_page_size {
Dmitry Torokhov's avatar
Dmitry Torokhov committed
220 221 222 223 224
	/* list of reserved physical pages */
	struct list_head pages;

	/* transient list of non-balloonable pages */
	struct list_head refused_pages;
225
	unsigned int n_refused_pages;
226 227 228 229 230 231 232
};

struct vmballoon {
	struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];

	/* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
	unsigned supported_page_sizes;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
233 234 235 236 237 238 239 240

	/* balloon size in pages */
	unsigned int size;
	unsigned int target;

	/* reset flag */
	bool reset_required;

241 242 243 244 245 246 247 248
	unsigned long capabilities;

	struct vmballoon_batch_page *batch_page;
	unsigned int batch_max_pages;
	struct page *page;

	const struct vmballoon_ops *ops;

Dmitry Torokhov's avatar
Dmitry Torokhov committed
249 250 251 252 253 254 255 256 257 258 259
#ifdef CONFIG_DEBUG_FS
	/* statistics */
	struct vmballoon_stats stats;

	/* debugfs file exporting statistics */
	struct dentry *dbg_entry;
#endif

	struct sysinfo sysinfo;

	struct delayed_work dwork;
260 261

	struct vmci_handle vmci_doorbell;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
262 263 264 265
};

static struct vmballoon balloon;

266 267 268 269 270 271
static inline unsigned long
__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
		unsigned long arg2, unsigned long *result)
{
	unsigned long status, dummy1, dummy2, dummy3, local_result;

272 273
	STATS_INC(b->stats.ops[cmd]);

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	asm volatile ("inl %%dx" :
		"=a"(status),
		"=c"(dummy1),
		"=d"(dummy2),
		"=b"(local_result),
		"=S"(dummy3) :
		"0"(VMW_BALLOON_HV_MAGIC),
		"1"(cmd),
		"2"(VMW_BALLOON_HV_PORT),
		"3"(arg1),
		"4"(arg2) :
		"memory");

	/* update the result if needed */
	if (result)
		*result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
							   local_result;

	/* update target when applicable */
	if (status == VMW_BALLOON_SUCCESS &&
	    ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
		b->target = local_result;

297 298 299 300 301 302 303 304
	if (status != VMW_BALLOON_SUCCESS &&
	    status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
		STATS_INC(b->stats.ops_fail[cmd]);
		pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
			 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
			 status);
	}

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	/* mark reset required accordingly */
	if (status == VMW_BALLOON_ERROR_RESET)
		b->reset_required = true;

	return status;
}

static __always_inline unsigned long
vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
	      unsigned long arg2)
{
	unsigned long dummy;

	return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
}

Dmitry Torokhov's avatar
Dmitry Torokhov committed
321 322 323 324
/*
 * Send "start" command to the host, communicating supported version
 * of the protocol.
 */
325
static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
Dmitry Torokhov's avatar
Dmitry Torokhov committed
326
{
327
	unsigned long status, capabilities;
328
	bool success;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
329

330 331
	status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
				 &capabilities);
332 333 334 335

	switch (status) {
	case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
		b->capabilities = capabilities;
336 337
		success = true;
		break;
338 339
	case VMW_BALLOON_SUCCESS:
		b->capabilities = VMW_BALLOON_BASIC_CMDS;
340 341 342 343
		success = true;
		break;
	default:
		success = false;
344
	}
Dmitry Torokhov's avatar
Dmitry Torokhov committed
345

346 347 348 349 350 351 352
	/*
	 * 2MB pages are only supported with batching. If batching is for some
	 * reason disabled, do not use 2MB pages, since otherwise the legacy
	 * mechanism is used with 2MB pages, causing a failure.
	 */
	if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
	    (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
353 354 355 356 357
		b->supported_page_sizes = 2;
	else
		b->supported_page_sizes = 1;

	return success;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
358 359 360 361 362 363 364 365 366 367
}

/*
 * Communicate guest type to the host so that it can adjust ballooning
 * algorithm to the one most appropriate for the guest. This command
 * is normally issued after sending "start" command and is part of
 * standard reset sequence.
 */
static bool vmballoon_send_guest_id(struct vmballoon *b)
{
368
	unsigned long status;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
369

370 371
	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
			       VMW_BALLOON_GUEST_ID, 0);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
372

373
	if (status == VMW_BALLOON_SUCCESS)
Dmitry Torokhov's avatar
Dmitry Torokhov committed
374 375 376 377 378
		return true;

	return false;
}

379 380 381 382 383 384 385 386
static u16 vmballoon_page_size(bool is_2m_page)
{
	if (is_2m_page)
		return 1 << VMW_BALLOON_2M_SHIFT;

	return 1;
}

Dmitry Torokhov's avatar
Dmitry Torokhov committed
387 388 389
/*
 * Retrieve desired balloon size from the host.
 */
390
static bool vmballoon_send_get_target(struct vmballoon *b)
Dmitry Torokhov's avatar
Dmitry Torokhov committed
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
{
	unsigned long status;
	unsigned long limit;
	u32 limit32;

	/*
	 * si_meminfo() is cheap. Moreover, we want to provide dynamic
	 * max balloon size later. So let us call si_meminfo() every
	 * iteration.
	 */
	si_meminfo(&b->sysinfo);
	limit = b->sysinfo.totalram;

	/* Ensure limit fits in 32-bits */
	limit32 = (u32)limit;
	if (limit != limit32)
		return false;

409 410 411
	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);

	if (status == VMW_BALLOON_SUCCESS)
Dmitry Torokhov's avatar
Dmitry Torokhov committed
412 413 414 415 416 417 418 419 420 421
		return true;

	return false;
}

/*
 * Notify the host about allocated page so that host can use it without
 * fear that guest will need it. Host may reject some pages, we need to
 * check the return value and maybe submit a different page.
 */
422
static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
423
				    unsigned int *hv_status)
Dmitry Torokhov's avatar
Dmitry Torokhov committed
424
{
425
	unsigned long status;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
426 427 428 429
	u32 pfn32;

	pfn32 = (u32)pfn;
	if (pfn32 != pfn)
430
		return -EINVAL;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
431

432 433 434
	*hv_status = status = vmballoon_cmd(b, VMW_BALLOON_CMD_LOCK, pfn, 0);

	if (status == VMW_BALLOON_SUCCESS)
435
		return 0;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
436

437
	return -EIO;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
438 439
}

440
static int vmballoon_send_batched_lock(struct vmballoon *b,
441
				       unsigned int num_pages, bool is_2m_pages)
442
{
443
	unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
444
	unsigned long status, cmd;
445

446 447
	cmd = is_2m_pages ? VMW_BALLOON_CMD_BATCHED_2M_LOCK :
			    VMW_BALLOON_CMD_BATCHED_LOCK;
448

449 450 451
	status = vmballoon_cmd(b, cmd, pfn, num_pages);

	if (status == VMW_BALLOON_SUCCESS)
452 453 454 455 456
		return 0;

	return 1;
}

Dmitry Torokhov's avatar
Dmitry Torokhov committed
457 458 459 460
/*
 * Notify the host that guest intends to release given page back into
 * the pool of available (to the guest) pages.
 */
461
static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
Dmitry Torokhov's avatar
Dmitry Torokhov committed
462
{
463
	unsigned long status;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
464 465 466 467 468 469
	u32 pfn32;

	pfn32 = (u32)pfn;
	if (pfn32 != pfn)
		return false;

470
	status = vmballoon_cmd(b, VMW_BALLOON_CMD_UNLOCK, pfn, 0);
471
	return status == VMW_BALLOON_SUCCESS;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
472 473
}

474
static bool vmballoon_send_batched_unlock(struct vmballoon *b,
475
		unsigned int num_pages, bool is_2m_pages)
476
{
477
	unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
478
	unsigned long status, cmd;
479

480 481 482 483
	cmd = is_2m_pages ? VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
			    VMW_BALLOON_CMD_BATCHED_UNLOCK;

	status = vmballoon_cmd(b, cmd, pfn, num_pages);
484

485
	return status == VMW_BALLOON_SUCCESS;
486 487
}

488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page)
{
	if (is_2m_page)
		return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);

	return alloc_page(flags);
}

static void vmballoon_free_page(struct page *page, bool is_2m_page)
{
	if (is_2m_page)
		__free_pages(page, VMW_BALLOON_2M_SHIFT);
	else
		__free_page(page);
}

Dmitry Torokhov's avatar
Dmitry Torokhov committed
504 505 506 507 508 509 510 511 512
/*
 * Quickly release all pages allocated for the balloon. This function is
 * called when host decides to "reset" balloon for one reason or another.
 * Unlike normal "deflate" we do not (shall not) notify host of the pages
 * being released.
 */
static void vmballoon_pop(struct vmballoon *b)
{
	struct page *page, *next;
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
	unsigned is_2m_pages;

	for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
			is_2m_pages++) {
		struct vmballoon_page_size *page_size =
				&b->page_sizes[is_2m_pages];
		u16 size_per_page = vmballoon_page_size(is_2m_pages);

		list_for_each_entry_safe(page, next, &page_size->pages, lru) {
			list_del(&page->lru);
			vmballoon_free_page(page, is_2m_pages);
			STATS_INC(b->stats.free[is_2m_pages]);
			b->size -= size_per_page;
			cond_resched();
		}
Dmitry Torokhov's avatar
Dmitry Torokhov committed
528 529
	}

530 531 532
	/* Clearing the batch_page unconditionally has no adverse effect */
	free_page((unsigned long)b->batch_page);
	b->batch_page = NULL;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
533 534 535
}

/*
536 537 538
 * Notify the host of a ballooned page. If host rejects the page put it on the
 * refuse list, those refused page are then released at the end of the
 * inflation cycle.
Dmitry Torokhov's avatar
Dmitry Torokhov committed
539
 */
540
static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
541
				bool is_2m_pages)
Dmitry Torokhov's avatar
Dmitry Torokhov committed
542
{
543
	int locked, hv_status;
544
	struct page *page = b->page;
545 546 547
	struct vmballoon_page_size *page_size = &b->page_sizes[false];

	/* is_2m_pages can never happen as 2m pages support implies batching */
Dmitry Torokhov's avatar
Dmitry Torokhov committed
548

549 550
	locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);

551
	if (locked) {
552
		STATS_INC(b->stats.refused_alloc[false]);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
553

554 555 556
		if (locked == -EIO &&
		    (hv_status == VMW_BALLOON_ERROR_RESET ||
		     hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
557
			vmballoon_free_page(page, false);
558 559
			return -EIO;
		}
Dmitry Torokhov's avatar
Dmitry Torokhov committed
560

561 562 563 564 565
		/*
		 * Place page on the list of non-balloonable pages
		 * and retry allocation, unless we already accumulated
		 * too many of them, in which case take a breather.
		 */
566 567 568
		if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
			page_size->n_refused_pages++;
			list_add(&page->lru, &page_size->refused_pages);
569
		} else {
570
			vmballoon_free_page(page, false);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
571
		}
572
		return locked;
573
	}
Dmitry Torokhov's avatar
Dmitry Torokhov committed
574 575

	/* track allocated page */
576
	list_add(&page->lru, &page_size->pages);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
577 578 579 580 581 582 583

	/* update balloon size */
	b->size++;

	return 0;
}

584
static int vmballoon_lock_batched_page(struct vmballoon *b,
585
				       unsigned int num_pages, bool is_2m_pages)
586 587
{
	int locked, i;
588
	u16 size_per_page = vmballoon_page_size(is_2m_pages);
589

590 591
	locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages);

592 593 594 595 596
	if (locked > 0) {
		for (i = 0; i < num_pages; i++) {
			u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
			struct page *p = pfn_to_page(pa >> PAGE_SHIFT);

597
			vmballoon_free_page(p, is_2m_pages);
598 599 600 601 602 603 604 605
		}

		return -EIO;
	}

	for (i = 0; i < num_pages; i++) {
		u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
		struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
606 607
		struct vmballoon_page_size *page_size =
				&b->page_sizes[is_2m_pages];
608 609 610 611 612

		locked = vmballoon_batch_get_status(b->batch_page, i);

		switch (locked) {
		case VMW_BALLOON_SUCCESS:
613 614
			list_add(&p->lru, &page_size->pages);
			b->size += size_per_page;
615 616 617
			break;
		case VMW_BALLOON_ERROR_PPN_PINNED:
		case VMW_BALLOON_ERROR_PPN_INVALID:
618 619 620 621
			if (page_size->n_refused_pages
					< VMW_BALLOON_MAX_REFUSED) {
				list_add(&p->lru, &page_size->refused_pages);
				page_size->n_refused_pages++;
622 623 624 625 626
				break;
			}
			/* Fallthrough */
		case VMW_BALLOON_ERROR_RESET:
		case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
627
			vmballoon_free_page(p, is_2m_pages);
628 629 630 631 632 633 634 635 636 637
			break;
		default:
			/* This should never happen */
			WARN_ON_ONCE(true);
		}
	}

	return 0;
}

Dmitry Torokhov's avatar
Dmitry Torokhov committed
638 639 640 641 642
/*
 * Release the page allocated for the balloon. Note that we first notify
 * the host so it can make sure the page will be available for the guest
 * to use, if needed.
 */
643
static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
644
				 bool is_2m_pages)
Dmitry Torokhov's avatar
Dmitry Torokhov committed
645
{
646
	struct page *page = b->page;
647 648 649
	struct vmballoon_page_size *page_size = &b->page_sizes[false];

	/* is_2m_pages can never happen as 2m pages support implies batching */
Dmitry Torokhov's avatar
Dmitry Torokhov committed
650

651
	if (!vmballoon_send_unlock_page(b, page_to_pfn(page))) {
652
		list_add(&page->lru, &page_size->pages);
653 654
		return -EIO;
	}
Dmitry Torokhov's avatar
Dmitry Torokhov committed
655 656

	/* deallocate page */
657 658
	vmballoon_free_page(page, false);
	STATS_INC(b->stats.free[false]);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
659 660 661 662 663 664 665

	/* update balloon size */
	b->size--;

	return 0;
}

666
static int vmballoon_unlock_batched_page(struct vmballoon *b,
667
				unsigned int num_pages, bool is_2m_pages)
668 669 670
{
	int locked, i, ret = 0;
	bool hv_success;
671
	u16 size_per_page = vmballoon_page_size(is_2m_pages);
672

673 674
	hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages);

675 676 677 678 679 680
	if (!hv_success)
		ret = -EIO;

	for (i = 0; i < num_pages; i++) {
		u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
		struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
681 682
		struct vmballoon_page_size *page_size =
				&b->page_sizes[is_2m_pages];
683 684 685 686 687 688 689 690

		locked = vmballoon_batch_get_status(b->batch_page, i);
		if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
			/*
			 * That page wasn't successfully unlocked by the
			 * hypervisor, re-add it to the list of pages owned by
			 * the balloon driver.
			 */
691
			list_add(&p->lru, &page_size->pages);
692 693
		} else {
			/* deallocate page */
694 695
			vmballoon_free_page(p, is_2m_pages);
			STATS_INC(b->stats.free[is_2m_pages]);
696 697

			/* update balloon size */
698
			b->size -= size_per_page;
699 700 701 702 703 704
		}
	}

	return ret;
}

Dmitry Torokhov's avatar
Dmitry Torokhov committed
705 706 707 708
/*
 * Release pages that were allocated while attempting to inflate the
 * balloon but were refused by the host for one reason or another.
 */
709 710
static void vmballoon_release_refused_pages(struct vmballoon *b,
		bool is_2m_pages)
Dmitry Torokhov's avatar
Dmitry Torokhov committed
711 712
{
	struct page *page, *next;
713 714
	struct vmballoon_page_size *page_size =
			&b->page_sizes[is_2m_pages];
Dmitry Torokhov's avatar
Dmitry Torokhov committed
715

716
	list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
Dmitry Torokhov's avatar
Dmitry Torokhov committed
717
		list_del(&page->lru);
718 719
		vmballoon_free_page(page, is_2m_pages);
		STATS_INC(b->stats.refused_free[is_2m_pages]);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
720
	}
721

722
	page_size->n_refused_pages = 0;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
723 724
}

725 726 727 728 729 730 731 732 733 734 735 736
static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
{
	b->page = p;
}

static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
				struct page *p)
{
	vmballoon_batch_set_pa(b->batch_page, idx,
			(u64)page_to_pfn(p) << PAGE_SHIFT);
}

Dmitry Torokhov's avatar
Dmitry Torokhov committed
737 738 739 740 741 742 743
/*
 * Inflate the balloon towards its target size. Note that we try to limit
 * the rate of allocation to make sure we are not choking the rest of the
 * system.
 */
static void vmballoon_inflate(struct vmballoon *b)
{
744
	unsigned int num_pages = 0;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
745
	int error = 0;
746
	gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
747
	bool is_2m_pages;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769

	pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);

	/*
	 * First try NOSLEEP page allocations to inflate balloon.
	 *
	 * If we do not throttle nosleep allocations, we can drain all
	 * free pages in the guest quickly (if the balloon target is high).
	 * As a side-effect, draining free pages helps to inform (force)
	 * the guest to start swapping if balloon target is not met yet,
	 * which is a desired behavior. However, balloon driver can consume
	 * all available CPU cycles if too many pages are allocated in a
	 * second. Therefore, we throttle nosleep allocations even when
	 * the guest is not under memory pressure. OTOH, if we have already
	 * predicted that the guest is under memory pressure, then we
	 * slowdown page allocations considerably.
	 */

	/*
	 * Start with no sleep allocation rate which may be higher
	 * than sleeping allocation rate.
	 */
770
	is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
771

772
	pr_debug("%s - goal: %d",  __func__, b->target - b->size);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
773

774
	while (!b->reset_required &&
775 776
		b->size + num_pages * vmballoon_page_size(is_2m_pages)
		< b->target) {
777
		struct page *page;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
778

779
		if (flags == VMW_PAGE_ALLOC_NOSLEEP)
780
			STATS_INC(b->stats.alloc[is_2m_pages]);
781 782
		else
			STATS_INC(b->stats.sleep_alloc);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
783

784
		page = vmballoon_alloc_page(flags, is_2m_pages);
785
		if (!page) {
786 787 788
			STATS_INC(b->stats.alloc_fail[is_2m_pages]);

			if (is_2m_pages) {
789
				b->ops->lock(b, num_pages, true);
790 791 792 793 794 795 796 797 798 799 800 801

				/*
				 * ignore errors from locking as we now switch
				 * to 4k pages and we might get different
				 * errors.
				 */

				num_pages = 0;
				is_2m_pages = false;
				continue;
			}

802
			if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
Dmitry Torokhov's avatar
Dmitry Torokhov committed
803 804
				/*
				 * CANSLEEP page allocation failed, so guest
805 806 807
				 * is under severe memory pressure. We just log
				 * the event, but do not stop the inflation
				 * due to its negative impact on performance.
Dmitry Torokhov's avatar
Dmitry Torokhov committed
808
				 */
809
				STATS_INC(b->stats.sleep_alloc_fail);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
810 811 812 813 814
				break;
			}

			/*
			 * NOSLEEP page allocation failed, so the guest is
815 816 817 818 819 820
			 * under memory pressure. Slowing down page alloctions
			 * seems to be reasonable, but doing so might actually
			 * cause the hypervisor to throttle us down, resulting
			 * in degraded performance. We will count on the
			 * scheduler and standard memory management mechanisms
			 * for now.
Dmitry Torokhov's avatar
Dmitry Torokhov committed
821
			 */
822 823
			flags = VMW_PAGE_ALLOC_CANSLEEP;
			continue;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
824 825
		}

826 827
		b->ops->add_page(b, num_pages++, page);
		if (num_pages == b->batch_max_pages) {
828 829
			error = b->ops->lock(b, num_pages, is_2m_pages);

830 831 832 833
			num_pages = 0;
			if (error)
				break;
		}
834

835
		cond_resched();
Dmitry Torokhov's avatar
Dmitry Torokhov committed
836 837
	}

838
	if (num_pages > 0)
839
		b->ops->lock(b, num_pages, is_2m_pages);
840

841 842
	vmballoon_release_refused_pages(b, true);
	vmballoon_release_refused_pages(b, false);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
843 844 845 846 847 848 849
}

/*
 * Decrease the size of the balloon allowing guest to use more memory.
 */
static void vmballoon_deflate(struct vmballoon *b)
{
850
	unsigned is_2m_pages;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
851

852
	pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
853 854

	/* free pages to reach target */
855 856 857 858 859 860 861 862 863 864 865 866 867 868
	for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
			is_2m_pages++) {
		struct page *page, *next;
		unsigned int num_pages = 0;
		struct vmballoon_page_size *page_size =
				&b->page_sizes[is_2m_pages];

		list_for_each_entry_safe(page, next, &page_size->pages, lru) {
			if (b->reset_required ||
				(b->target > 0 &&
					b->size - num_pages
					* vmballoon_page_size(is_2m_pages)
				< b->target + vmballoon_page_size(true)))
				break;
869

870 871
			list_del(&page->lru);
			b->ops->add_page(b, num_pages++, page);
872

873 874
			if (num_pages == b->batch_max_pages) {
				int error;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
875

876
				error = b->ops->unlock(b, num_pages,
877
						       is_2m_pages);
878 879 880 881
				num_pages = 0;
				if (error)
					return;
			}
882

883 884
			cond_resched();
		}
Dmitry Torokhov's avatar
Dmitry Torokhov committed
885

886
		if (num_pages > 0)
887
			b->ops->unlock(b, num_pages, is_2m_pages);
888
	}
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
}

static const struct vmballoon_ops vmballoon_basic_ops = {
	.add_page = vmballoon_add_page,
	.lock = vmballoon_lock_page,
	.unlock = vmballoon_unlock_page
};

static const struct vmballoon_ops vmballoon_batched_ops = {
	.add_page = vmballoon_add_batched_page,
	.lock = vmballoon_lock_batched_page,
	.unlock = vmballoon_unlock_batched_page
};

static bool vmballoon_init_batching(struct vmballoon *b)
{
905
	struct page *page;
906

907 908
	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page)
909 910
		return false;

911
	b->batch_page = page_address(page);
912 913 914
	return true;
}

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
/*
 * Receive notification and resize balloon
 */
static void vmballoon_doorbell(void *client_data)
{
	struct vmballoon *b = client_data;

	STATS_INC(b->stats.doorbell);

	mod_delayed_work(system_freezable_wq, &b->dwork, 0);
}

/*
 * Clean up vmci doorbell
 */
static void vmballoon_vmci_cleanup(struct vmballoon *b)
{
932 933
	vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
		      VMCI_INVALID_ID, VMCI_INVALID_ID);
934 935 936 937 938 939 940 941 942 943 944 945

	if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
		vmci_doorbell_destroy(b->vmci_doorbell);
		b->vmci_doorbell = VMCI_INVALID_HANDLE;
	}
}

/*
 * Initialize vmci doorbell, to get notified as soon as balloon changes
 */
static int vmballoon_vmci_init(struct vmballoon *b)
{
946
	unsigned long error;
947

948 949
	if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
		return 0;
950

951 952 953
	error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
				     VMCI_PRIVILEGE_FLAG_RESTRICTED,
				     vmballoon_doorbell, b);
954

955 956 957
	if (error != VMCI_SUCCESS)
		goto fail;

958 959 960
	error =	__vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
				b->vmci_doorbell.context,
				b->vmci_doorbell.resource, NULL);
961 962 963

	if (error != VMW_BALLOON_SUCCESS)
		goto fail;
964 965

	return 0;
966 967 968
fail:
	vmballoon_vmci_cleanup(b);
	return -EIO;
969 970
}

971 972 973 974 975 976 977
/*
 * Perform standard reset sequence by popping the balloon (in case it
 * is not  empty) and then restarting protocol. This operation normally
 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
 */
static void vmballoon_reset(struct vmballoon *b)
{
978 979 980 981
	int error;

	vmballoon_vmci_cleanup(b);

982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
	/* free all pages, skipping monitor unlock */
	vmballoon_pop(b);

	if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
		return;

	if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
		b->ops = &vmballoon_batched_ops;
		b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
		if (!vmballoon_init_batching(b)) {
			/*
			 * We failed to initialize batching, inform the monitor
			 * about it by sending a null capability.
			 *
			 * The guest will retry in one second.
			 */
			vmballoon_send_start(b, 0);
			return;
		}
	} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
		b->ops = &vmballoon_basic_ops;
		b->batch_max_pages = 1;
	}

	b->reset_required = false;
1007 1008 1009 1010 1011

	error = vmballoon_vmci_init(b);
	if (error)
		pr_err("failed to initialize vmci doorbell\n");

1012 1013
	if (!vmballoon_send_guest_id(b))
		pr_err("failed to send guest ID to the host\n");
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
}

/*
 * Balloon work function: reset protocol, if needed, get the new size and
 * adjust balloon as needed. Repeat in 1 sec.
 */
static void vmballoon_work(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);

	STATS_INC(b->stats.timer);

	if (b->reset_required)
		vmballoon_reset(b);

1030 1031
	if (!b->reset_required && vmballoon_send_get_target(b)) {
		unsigned long target = b->target;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1032

1033
		/* update target, adjust size */
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1034 1035
		if (b->size < target)
			vmballoon_inflate(b);
1036 1037
		else if (target == 0 ||
				b->size > target + vmballoon_page_size(true))
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1038 1039 1040
			vmballoon_deflate(b);
	}

1041 1042 1043 1044 1045 1046
	/*
	 * We are using a freezable workqueue so that balloon operations are
	 * stopped while the system transitions to/from sleep/hibernation.
	 */
	queue_delayed_work(system_freezable_wq,
			   dwork, round_jiffies_relative(HZ));
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
}

/*
 * DEBUGFS Interface
 */
#ifdef CONFIG_DEBUG_FS

static int vmballoon_debug_show(struct seq_file *f, void *offset)
{
	struct vmballoon *b = f->private;
	struct vmballoon_stats *stats = &b->stats;
1058
	int i;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1059

1060 1061 1062
	/* format capabilities info */
	seq_printf(f,
		   "balloon capabilities:   %#4x\n"
1063 1064 1065 1066
		   "used capabilities:      %#4lx\n"
		   "is resetting:           %c\n",
		   VMW_BALLOON_CAPABILITIES, b->capabilities,
		   b->reset_required ? 'y' : 'n');
1067

Dmitry Torokhov's avatar
Dmitry Torokhov committed
1068 1069 1070 1071 1072 1073
	/* format size info */
	seq_printf(f,
		   "target:             %8d pages\n"
		   "current:            %8d pages\n",
		   b->target, b->size);

1074 1075 1076 1077 1078 1079 1080 1081 1082
	for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
		if (vmballoon_cmd_names[i] == NULL)
			continue;

		seq_printf(f, "%-22s: %16lu (%lu failed)\n",
			   vmballoon_cmd_names[i], stats->ops[i],
			   stats->ops_fail[i]);
	}

Dmitry Torokhov's avatar
Dmitry Torokhov committed
1083 1084 1085
	seq_printf(f,
		   "\n"
		   "timer:              %8u\n"
1086
		   "doorbell:           %8u\n"
1087
		   "prim2mAlloc:        %8u (%4u failed)\n"
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1088 1089
		   "primNoSleepAlloc:   %8u (%4u failed)\n"
		   "primCanSleepAlloc:  %8u (%4u failed)\n"
1090
		   "prim2mFree:         %8u\n"
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1091
		   "primFree:           %8u\n"
1092
		   "err2mAlloc:         %8u\n"
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1093
		   "errAlloc:           %8u\n"
1094
		   "err2mFree:          %8u\n"
1095
		   "errFree:            %8u\n",
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1096
		   stats->timer,
1097
		   stats->doorbell,
1098 1099
		   stats->alloc[true], stats->alloc_fail[true],
		   stats->alloc[false], stats->alloc_fail[false],
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1100
		   stats->sleep_alloc, stats->sleep_alloc_fail,
1101 1102 1103
		   stats->free[true],
		   stats->free[false],
		   stats->refused_alloc[true], stats->refused_alloc[false],
1104
		   stats->refused_free[true], stats->refused_free[false]);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157

	return 0;
}

static int vmballoon_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, vmballoon_debug_show, inode->i_private);
}

static const struct file_operations vmballoon_debug_fops = {
	.owner		= THIS_MODULE,
	.open		= vmballoon_debug_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static int __init vmballoon_debugfs_init(struct vmballoon *b)
{
	int error;

	b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
					   &vmballoon_debug_fops);
	if (IS_ERR(b->dbg_entry)) {
		error = PTR_ERR(b->dbg_entry);
		pr_err("failed to create debugfs entry, error: %d\n", error);
		return error;
	}

	return 0;
}

static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
{
	debugfs_remove(b->dbg_entry);
}

#else

static inline int vmballoon_debugfs_init(struct vmballoon *b)
{
	return 0;
}

static inline void vmballoon_debugfs_exit(struct vmballoon *b)
{
}

#endif	/* CONFIG_DEBUG_FS */

static int __init vmballoon_init(void)
{
	int error;
1158
	unsigned is_2m_pages;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1159 1160 1161 1162
	/*
	 * Check if we are running on VMware's hypervisor and bail out
	 * if we are not.
	 */
1163
	if (x86_hyper_type != X86_HYPER_VMWARE)
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1164 1165
		return -ENODEV;

1166 1167 1168 1169 1170
	for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
			is_2m_pages++) {
		INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
		INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
	}
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1171 1172 1173 1174 1175

	INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);

	error = vmballoon_debugfs_init(&balloon);
	if (error)
1176
		return error;
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1177

1178
	balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1179 1180 1181 1182
	balloon.batch_page = NULL;
	balloon.page = NULL;
	balloon.reset_required = true;

1183
	queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1184 1185 1186

	return 0;
}
1187 1188 1189 1190 1191 1192 1193 1194

/*
 * Using late_initcall() instead of module_init() allows the balloon to use the
 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
 * VMCI is probed only after the balloon is initialized. If the balloon is used
 * as a module, late_initcall() is equivalent to module_init().
 */
late_initcall(vmballoon_init);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1195 1196 1197

static void __exit vmballoon_exit(void)
{
1198
	vmballoon_vmci_cleanup(&balloon);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1199 1200 1201 1202 1203 1204 1205 1206 1207
	cancel_delayed_work_sync(&balloon.dwork);

	vmballoon_debugfs_exit(&balloon);

	/*
	 * Deallocate all reserved memory, and reset connection with monitor.
	 * Reset connection before deallocating memory to avoid potential for
	 * additional spurious resets from guest touching deallocated pages.
	 */
1208
	vmballoon_send_start(&balloon, 0);
Dmitry Torokhov's avatar
Dmitry Torokhov committed
1209 1210 1211
	vmballoon_pop(&balloon);
}
module_exit(vmballoon_exit);