svm.c 113 KB
Newer Older
Avi Kivity's avatar
Avi Kivity committed
1 2 3 4 5 6
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * AMD SVM support
 *
 * Copyright (C) 2006 Qumranet, Inc.
7
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity's avatar
Avi Kivity committed
8 9 10 11 12 13 14 15 16
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */
17 18
#include <linux/kvm_host.h>

19
#include "irq.h"
20
#include "mmu.h"
21
#include "kvm_cache_regs.h"
22
#include "x86.h"
23
#include "cpuid.h"
24
#include "pmu.h"
Avi Kivity's avatar
Avi Kivity committed
25

Avi Kivity's avatar
Avi Kivity committed
26
#include <linux/module.h>
27
#include <linux/mod_devicetable.h>
28
#include <linux/kernel.h>
Avi Kivity's avatar
Avi Kivity committed
29 30
#include <linux/vmalloc.h>
#include <linux/highmem.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
31
#include <linux/sched.h>
32
#include <linux/trace_events.h>
33
#include <linux/slab.h>
Avi Kivity's avatar
Avi Kivity committed
34

35
#include <asm/perf_event.h>
36
#include <asm/tlbflush.h>
Avi Kivity's avatar
Avi Kivity committed
37
#include <asm/desc.h>
38
#include <asm/debugreg.h>
39
#include <asm/kvm_para.h>
40
#include <asm/nospec-branch.h>
Avi Kivity's avatar
Avi Kivity committed
41

42
#include <asm/virtext.h>
43
#include "trace.h"
44

45 46
#define __ex(x) __kvm_handle_fault_on_reboot(x)

Avi Kivity's avatar
Avi Kivity committed
47 48 49
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

50 51 52 53 54 55
static const struct x86_cpu_id svm_cpu_id[] = {
	X86_FEATURE_MATCH(X86_FEATURE_SVM),
	{}
};
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);

Avi Kivity's avatar
Avi Kivity committed
56 57 58 59 60 61
#define IOPM_ALLOC_ORDER 2
#define MSRPM_ALLOC_ORDER 1

#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3

62 63 64 65
#define SVM_FEATURE_NPT            (1 <<  0)
#define SVM_FEATURE_LBRV           (1 <<  1)
#define SVM_FEATURE_SVML           (1 <<  2)
#define SVM_FEATURE_NRIP           (1 <<  3)
66 67 68 69
#define SVM_FEATURE_TSC_RATE       (1 <<  4)
#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
70
#define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
71

72 73 74 75
#define NESTED_EXIT_HOST	0	/* Exit handled on host level */
#define NESTED_EXIT_DONE	1	/* Exit caused nested vmexit  */
#define NESTED_EXIT_CONTINUE	2	/* Further checks needed      */

76 77
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))

78
#define TSC_RATIO_RSVD          0xffffff0000000000ULL
79 80
#define TSC_RATIO_MIN		0x0000000000000001ULL
#define TSC_RATIO_MAX		0x000000ffffffffffULL
81

82 83
static bool erratum_383_found __read_mostly;

84 85 86 87 88 89 90 91 92 93 94 95
static const u32 host_save_user_msrs[] = {
#ifdef CONFIG_X86_64
	MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
	MSR_FS_BASE,
#endif
	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
};

#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)

struct kvm_vcpu;

96 97 98
struct nested_state {
	struct vmcb *hsave;
	u64 hsave_msr;
99
	u64 vm_cr_msr;
100 101 102 103 104 105 106
	u64 vmcb;

	/* These are the merged vectors */
	u32 *msrpm;

	/* gpa pointers to the real vectors */
	u64 vmcb_msrpm;
107
	u64 vmcb_iopm;
108

109 110 111
	/* A VMEXIT is required but not yet emulated */
	bool exit_required;

112
	/* cache for intercepts of the guest */
113
	u32 intercept_cr;
114
	u32 intercept_dr;
115 116 117
	u32 intercept_exceptions;
	u64 intercept;

118 119
	/* Nested Paging related state */
	u64 nested_cr3;
120 121
};

122 123 124
#define MSRPM_OFFSETS	16
static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;

125 126 127 128 129 130
/*
 * Set osvw_len to higher value when updated Revision Guides
 * are published and we know what the new status bits are
 */
static uint64_t osvw_len = 4, osvw_status;

131 132 133 134 135 136 137 138 139 140 141 142
struct vcpu_svm {
	struct kvm_vcpu vcpu;
	struct vmcb *vmcb;
	unsigned long vmcb_pa;
	struct svm_cpu_data *svm_data;
	uint64_t asid_generation;
	uint64_t sysenter_esp;
	uint64_t sysenter_eip;

	u64 next_rip;

	u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
143
	struct {
144 145 146
		u16 fs;
		u16 gs;
		u16 ldt;
147 148
		u64 gs_base;
	} host;
149 150 151

	u32 *msrpm;

152 153
	ulong nmi_iret_rip;

154
	struct nested_state nested;
Jan Kiszka's avatar
Jan Kiszka committed
155 156

	bool nmi_singlestep;
157 158 159

	unsigned int3_injected;
	unsigned long int3_rip;
160
	u32 apf_reason;
161

162 163
	/* cached guest cpuid flags for faster access */
	bool nrips_enabled	: 1;
164 165
};

166 167 168
static DEFINE_PER_CPU(u64, current_tsc_ratio);
#define TSC_RATIO_DEFAULT	0x0100000000ULL

169 170
#define MSR_INVALID			0xffffffffU

171
static const struct svm_direct_access_msrs {
172 173 174
	u32 index;   /* Index of the MSR */
	bool always; /* True if intercept is always on */
} direct_access_msrs[] = {
Brian Gerst's avatar
Brian Gerst committed
175
	{ .index = MSR_STAR,				.always = true  },
176 177 178 179 180 181 182 183 184 185 186 187 188 189
	{ .index = MSR_IA32_SYSENTER_CS,		.always = true  },
#ifdef CONFIG_X86_64
	{ .index = MSR_GS_BASE,				.always = true  },
	{ .index = MSR_FS_BASE,				.always = true  },
	{ .index = MSR_KERNEL_GS_BASE,			.always = true  },
	{ .index = MSR_LSTAR,				.always = true  },
	{ .index = MSR_CSTAR,				.always = true  },
	{ .index = MSR_SYSCALL_MASK,			.always = true  },
#endif
	{ .index = MSR_IA32_LASTBRANCHFROMIP,		.always = false },
	{ .index = MSR_IA32_LASTBRANCHTOIP,		.always = false },
	{ .index = MSR_IA32_LASTINTFROMIP,		.always = false },
	{ .index = MSR_IA32_LASTINTTOIP,		.always = false },
	{ .index = MSR_INVALID,				.always = false },
190 191
};

192 193 194 195
/* enable NPT for AMD64 and X86 with PAE */
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static bool npt_enabled = true;
#else
Joerg Roedel's avatar
Joerg Roedel committed
196
static bool npt_enabled;
197
#endif
198

199 200
/* allow nested paging (virtualized MMU) for all guests */
static int npt = true;
201
module_param(npt, int, S_IRUGO);
202

203 204
/* allow nested virtualization in KVM/SVM */
static int nested = true;
205 206
module_param(nested, int, S_IRUGO);

207
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
208
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
209
static void svm_complete_interrupts(struct vcpu_svm *svm);
210

211
static int nested_svm_exit_handled(struct vcpu_svm *svm);
212
static int nested_svm_intercept(struct vcpu_svm *svm);
213 214 215 216
static int nested_svm_vmexit(struct vcpu_svm *svm);
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
				      bool has_error_code, u32 error_code);

217
enum {
218 219
	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
			    pause filter count */
220
	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
221
	VMCB_ASID,	 /* ASID */
222
	VMCB_INTR,	 /* int_ctl, int_vector */
223
	VMCB_NPT,        /* npt_en, nCR3, gPAT */
224
	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
225
	VMCB_DR,         /* DR6, DR7 */
226
	VMCB_DT,         /* GDT, IDT */
227
	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
228
	VMCB_CR2,        /* CR2 only */
229
	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
230 231 232
	VMCB_DIRTY_MAX,
};

233 234
/* TPR and CR2 are always written before VMRUN */
#define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251

static inline void mark_all_dirty(struct vmcb *vmcb)
{
	vmcb->control.clean = 0;
}

static inline void mark_all_clean(struct vmcb *vmcb)
{
	vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
			       & ~VMCB_ALWAYS_DIRTY_MASK;
}

static inline void mark_dirty(struct vmcb *vmcb, int bit)
{
	vmcb->control.clean &= ~(1 << bit);
}

252 253
static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
{
254
	return container_of(vcpu, struct vcpu_svm, vcpu);
255 256
}

257 258 259 260 261
static void recalc_intercepts(struct vcpu_svm *svm)
{
	struct vmcb_control_area *c, *h;
	struct nested_state *g;

262 263
	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);

264 265 266 267 268 269 270
	if (!is_guest_mode(&svm->vcpu))
		return;

	c = &svm->vmcb->control;
	h = &svm->nested.hsave->control;
	g = &svm->nested;

271
	c->intercept_cr = h->intercept_cr | g->intercept_cr;
272
	c->intercept_dr = h->intercept_dr | g->intercept_dr;
273 274 275 276
	c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
	c->intercept = h->intercept | g->intercept;
}

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
{
	if (is_guest_mode(&svm->vcpu))
		return svm->nested.hsave;
	else
		return svm->vmcb;
}

static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept_cr |= (1U << bit);

	recalc_intercepts(svm);
}

static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept_cr &= ~(1U << bit);

	recalc_intercepts(svm);
}

static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	return vmcb->control.intercept_cr & (1U << bit);
}

310
static inline void set_dr_intercepts(struct vcpu_svm *svm)
311 312 313
{
	struct vmcb *vmcb = get_host_vmcb(svm);

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
		| (1 << INTERCEPT_DR1_READ)
		| (1 << INTERCEPT_DR2_READ)
		| (1 << INTERCEPT_DR3_READ)
		| (1 << INTERCEPT_DR4_READ)
		| (1 << INTERCEPT_DR5_READ)
		| (1 << INTERCEPT_DR6_READ)
		| (1 << INTERCEPT_DR7_READ)
		| (1 << INTERCEPT_DR0_WRITE)
		| (1 << INTERCEPT_DR1_WRITE)
		| (1 << INTERCEPT_DR2_WRITE)
		| (1 << INTERCEPT_DR3_WRITE)
		| (1 << INTERCEPT_DR4_WRITE)
		| (1 << INTERCEPT_DR5_WRITE)
		| (1 << INTERCEPT_DR6_WRITE)
		| (1 << INTERCEPT_DR7_WRITE);
330 331 332 333

	recalc_intercepts(svm);
}

334
static inline void clr_dr_intercepts(struct vcpu_svm *svm)
335 336 337
{
	struct vmcb *vmcb = get_host_vmcb(svm);

338
	vmcb->control.intercept_dr = 0;
339 340 341 342

	recalc_intercepts(svm);
}

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept_exceptions |= (1U << bit);

	recalc_intercepts(svm);
}

static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept_exceptions &= ~(1U << bit);

	recalc_intercepts(svm);
}

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
static inline void set_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept |= (1ULL << bit);

	recalc_intercepts(svm);
}

static inline void clr_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept &= ~(1ULL << bit);

	recalc_intercepts(svm);
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
static inline void enable_gif(struct vcpu_svm *svm)
{
	svm->vcpu.arch.hflags |= HF_GIF_MASK;
}

static inline void disable_gif(struct vcpu_svm *svm)
{
	svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
}

static inline bool gif_set(struct vcpu_svm *svm)
{
	return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
}

394
static unsigned long iopm_base;
Avi Kivity's avatar
Avi Kivity committed
395 396 397 398

struct kvm_ldttss_desc {
	u16 limit0;
	u16 base0;
Joerg Roedel's avatar
Joerg Roedel committed
399 400
	unsigned base1:8, type:5, dpl:2, p:1;
	unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity's avatar
Avi Kivity committed
401 402 403 404 405 406 407
	u32 base3;
	u32 zero1;
} __attribute__((packed));

struct svm_cpu_data {
	int cpu;

Avi Kivity's avatar
Avi Kivity committed
408 409 410
	u64 asid_generation;
	u32 max_asid;
	u32 next_asid;
Avi Kivity's avatar
Avi Kivity committed
411 412 413 414 415 416 417 418 419 420 421 422
	struct kvm_ldttss_desc *tss_desc;

	struct page *save_area;
};

static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);

struct svm_init_data {
	int cpu;
	int r;
};

423
static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
Avi Kivity's avatar
Avi Kivity committed
424

425
#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity's avatar
Avi Kivity committed
426 427 428
#define MSRS_RANGE_SIZE 2048
#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
static u32 svm_msrpm_offset(u32 msr)
{
	u32 offset;
	int i;

	for (i = 0; i < NUM_MSR_MAPS; i++) {
		if (msr < msrpm_ranges[i] ||
		    msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
			continue;

		offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
		offset += (i * MSRS_RANGE_SIZE);       /* add range offset */

		/* Now we have the u8 offset - but need the u32 offset */
		return offset / 4;
	}

	/* MSR not in any range */
	return MSR_INVALID;
}

Avi Kivity's avatar
Avi Kivity committed
450 451 452 453
#define MAX_INST_SIZE 15

static inline void clgi(void)
{
454
	asm volatile (__ex(SVM_CLGI));
Avi Kivity's avatar
Avi Kivity committed
455 456 457 458
}

static inline void stgi(void)
{
459
	asm volatile (__ex(SVM_STGI));
Avi Kivity's avatar
Avi Kivity committed
460 461 462 463
}

static inline void invlpga(unsigned long addr, u32 asid)
{
Joerg Roedel's avatar
Joerg Roedel committed
464
	asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
Avi Kivity's avatar
Avi Kivity committed
465 466
}

467 468 469 470 471 472 473 474 475
static int get_npt_level(void)
{
#ifdef CONFIG_X86_64
	return PT64_ROOT_LEVEL;
#else
	return PT32E_ROOT_LEVEL;
#endif
}

Avi Kivity's avatar
Avi Kivity committed
476 477
static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
478
	vcpu->arch.efer = efer;
479
	if (!npt_enabled && !(efer & EFER_LMA))
480
		efer &= ~EFER_LME;
Avi Kivity's avatar
Avi Kivity committed
481

482
	to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
483
	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity's avatar
Avi Kivity committed
484 485 486 487 488 489 490 491
}

static int is_external_interrupt(u32 info)
{
	info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
	return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
}

492
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
493 494 495 496 497
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u32 ret = 0;

	if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
498 499
		ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
	return ret;
500 501 502 503 504 505 506 507 508 509 510 511 512
}

static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (mask == 0)
		svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
	else
		svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;

}

Avi Kivity's avatar
Avi Kivity committed
513 514
static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
515 516
	struct vcpu_svm *svm = to_svm(vcpu);

517
	if (svm->vmcb->control.next_rip != 0) {
518
		WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
519
		svm->next_rip = svm->vmcb->control.next_rip;
520
	}
521

522
	if (!svm->next_rip) {
523
		if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
524 525
				EMULATE_DONE)
			printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity's avatar
Avi Kivity committed
526 527
		return;
	}
528 529 530
	if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
		printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
		       __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity's avatar
Avi Kivity committed
531

532
	kvm_rip_write(vcpu, svm->next_rip);
533
	svm_set_interrupt_shadow(vcpu, 0);
Avi Kivity's avatar
Avi Kivity committed
534 535
}

536
static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
537 538
				bool has_error_code, u32 error_code,
				bool reinject)
539 540 541
{
	struct vcpu_svm *svm = to_svm(vcpu);

Joerg Roedel's avatar
Joerg Roedel committed
542 543 544 545
	/*
	 * If we are within a nested VM we'd better #VMEXIT and let the guest
	 * handle the exception
	 */
546 547
	if (!reinject &&
	    nested_svm_check_exception(svm, nr, has_error_code, error_code))
548 549
		return;

550
	if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
		unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);

		/*
		 * For guest debugging where we have to reinject #BP if some
		 * INT3 is guest-owned:
		 * Emulate nRIP by moving RIP forward. Will fail if injection
		 * raises a fault that is not intercepted. Still better than
		 * failing in all cases.
		 */
		skip_emulated_instruction(&svm->vcpu);
		rip = kvm_rip_read(&svm->vcpu);
		svm->int3_rip = rip + svm->vmcb->save.cs.base;
		svm->int3_injected = rip - old_rip;
	}

566 567 568 569 570 571 572
	svm->vmcb->control.event_inj = nr
		| SVM_EVTINJ_VALID
		| (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
		| SVM_EVTINJ_TYPE_EXEPT;
	svm->vmcb->control.event_inj_err = error_code;
}

573 574 575 576 577 578
static void svm_init_erratum_383(void)
{
	u32 low, high;
	int err;
	u64 val;

579
	if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
		return;

	/* Use _safe variants to not break nested virtualization */
	val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
	if (err)
		return;

	val |= (1ULL << 47);

	low  = lower_32_bits(val);
	high = upper_32_bits(val);

	native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);

	erratum_383_found = true;
}

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
static void svm_init_osvw(struct kvm_vcpu *vcpu)
{
	/*
	 * Guests should see errata 400 and 415 as fixed (assuming that
	 * HLT and IO instructions are intercepted).
	 */
	vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
	vcpu->arch.osvw.status = osvw_status & ~(6ULL);

	/*
	 * By increasing VCPU's osvw.length to 3 we are telling the guest that
	 * all osvw.status bits inside that length, including bit 0 (which is
	 * reserved for erratum 298), are valid. However, if host processor's
	 * osvw_len is 0 then osvw_status[0] carries no information. We need to
	 * be conservative here and therefore we tell the guest that erratum 298
	 * is present (because we really don't know).
	 */
	if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
		vcpu->arch.osvw.status |= 1;
}

Avi Kivity's avatar
Avi Kivity committed
618 619
static int has_svm(void)
{
620
	const char *msg;
Avi Kivity's avatar
Avi Kivity committed
621

622
	if (!cpu_has_svm(&msg)) {
Joe Perches's avatar
Joe Perches committed
623
		printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity's avatar
Avi Kivity committed
624 625 626 627 628 629
		return 0;
	}

	return 1;
}

630
static void svm_hardware_disable(void)
Avi Kivity's avatar
Avi Kivity committed
631
{
632 633 634 635
	/* Make sure we clean up behind us */
	if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
		wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);

636
	cpu_svm_disable();
637 638

	amd_pmu_disable_virt();
Avi Kivity's avatar
Avi Kivity committed
639 640
}

641
static int svm_hardware_enable(void)
Avi Kivity's avatar
Avi Kivity committed
642 643
{

644
	struct svm_cpu_data *sd;
Avi Kivity's avatar
Avi Kivity committed
645
	uint64_t efer;
646
	struct desc_ptr gdt_descr;
Avi Kivity's avatar
Avi Kivity committed
647 648 649
	struct desc_struct *gdt;
	int me = raw_smp_processor_id();

650 651 652 653
	rdmsrl(MSR_EFER, efer);
	if (efer & EFER_SVME)
		return -EBUSY;

Avi Kivity's avatar
Avi Kivity committed
654
	if (!has_svm()) {
655
		pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
656
		return -EINVAL;
Avi Kivity's avatar
Avi Kivity committed
657
	}
658 659
	sd = per_cpu(svm_data, me);
	if (!sd) {
660
		pr_err("%s: svm_data is NULL on %d\n", __func__, me);
661
		return -EINVAL;
Avi Kivity's avatar
Avi Kivity committed
662 663
	}

664 665 666
	sd->asid_generation = 1;
	sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
	sd->next_asid = sd->max_asid + 1;
Avi Kivity's avatar
Avi Kivity committed
667

668
	native_store_gdt(&gdt_descr);
669
	gdt = (struct desc_struct *)gdt_descr.address;
670
	sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity's avatar
Avi Kivity committed
671

672
	wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity's avatar
Avi Kivity committed
673

674
	wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
675

676 677
	if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
678
		__this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
679 680
	}

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710

	/*
	 * Get OSVW bits.
	 *
	 * Note that it is possible to have a system with mixed processor
	 * revisions and therefore different OSVW bits. If bits are not the same
	 * on different processors then choose the worst case (i.e. if erratum
	 * is present on one processor and not on another then assume that the
	 * erratum is present everywhere).
	 */
	if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
		uint64_t len, status = 0;
		int err;

		len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
		if (!err)
			status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
						      &err);

		if (err)
			osvw_status = osvw_len = 0;
		else {
			if (len < osvw_len)
				osvw_len = len;
			osvw_status |= status;
			osvw_status &= (1ULL << osvw_len) - 1;
		}
	} else
		osvw_status = osvw_len = 0;

711 712
	svm_init_erratum_383();

713 714
	amd_pmu_enable_virt();

715
	return 0;
Avi Kivity's avatar
Avi Kivity committed
716 717
}

718 719
static void svm_cpu_uninit(int cpu)
{
720
	struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
721

722
	if (!sd)
723 724 725
		return;

	per_cpu(svm_data, raw_smp_processor_id()) = NULL;
726 727
	__free_page(sd->save_area);
	kfree(sd);
728 729
}

Avi Kivity's avatar
Avi Kivity committed
730 731
static int svm_cpu_init(int cpu)
{
732
	struct svm_cpu_data *sd;
Avi Kivity's avatar
Avi Kivity committed
733 734
	int r;

735 736
	sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
	if (!sd)
Avi Kivity's avatar
Avi Kivity committed
737
		return -ENOMEM;
738 739
	sd->cpu = cpu;
	sd->save_area = alloc_page(GFP_KERNEL);
Avi Kivity's avatar
Avi Kivity committed
740
	r = -ENOMEM;
741
	if (!sd->save_area)
Avi Kivity's avatar
Avi Kivity committed
742 743
		goto err_1;

744
	per_cpu(svm_data, cpu) = sd;
Avi Kivity's avatar
Avi Kivity committed
745 746 747 748

	return 0;

err_1:
749
	kfree(sd);
Avi Kivity's avatar
Avi Kivity committed
750 751 752 753
	return r;

}

754 755 756 757 758 759 760 761 762 763 764
static bool valid_msr_intercept(u32 index)
{
	int i;

	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
		if (direct_access_msrs[i].index == index)
			return true;

	return false;
}

765 766
static void set_msr_interception(u32 *msrpm, unsigned msr,
				 int read, int write)
Avi Kivity's avatar
Avi Kivity committed
767
{
768 769 770
	u8 bit_read, bit_write;
	unsigned long tmp;
	u32 offset;
Avi Kivity's avatar
Avi Kivity committed
771

772 773 774 775 776 777
	/*
	 * If this warning triggers extend the direct_access_msrs list at the
	 * beginning of the file
	 */
	WARN_ON(!valid_msr_intercept(msr));

778 779 780 781 782 783 784 785 786 787 788
	offset    = svm_msrpm_offset(msr);
	bit_read  = 2 * (msr & 0x0f);
	bit_write = 2 * (msr & 0x0f) + 1;
	tmp       = msrpm[offset];

	BUG_ON(offset == MSR_INVALID);

	read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
	write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);

	msrpm[offset] = tmp;
Avi Kivity's avatar
Avi Kivity committed
789 790
}

791
static void svm_vcpu_init_msrpm(u32 *msrpm)
Avi Kivity's avatar
Avi Kivity committed
792 793 794
{
	int i;

795 796
	memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));

797 798 799 800 801 802
	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
		if (!direct_access_msrs[i].always)
			continue;

		set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
	}
803 804
}

805 806 807 808 809 810 811 812
static void add_msr_offset(u32 offset)
{
	int i;

	for (i = 0; i < MSRPM_OFFSETS; ++i) {

		/* Offset already in list? */
		if (msrpm_offsets[i] == offset)
813
			return;
814 815 816 817 818 819 820 821 822

		/* Slot used by another offset? */
		if (msrpm_offsets[i] != MSR_INVALID)
			continue;

		/* Add offset to list */
		msrpm_offsets[i] = offset;

		return;
Avi Kivity's avatar
Avi Kivity committed
823
	}
824 825 826 827 828

	/*
	 * If this BUG triggers the msrpm_offsets table has an overflow. Just
	 * increase MSRPM_OFFSETS in this case.
	 */
829
	BUG();
Avi Kivity's avatar
Avi Kivity committed
830 831
}

832
static void init_msrpm_offsets(void)
833
{
834
	int i;
835

836 837 838 839 840 841 842 843 844 845
	memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));

	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
		u32 offset;

		offset = svm_msrpm_offset(direct_access_msrs[i].index);
		BUG_ON(offset == MSR_INVALID);

		add_msr_offset(offset);
	}
846 847
}

848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
static void svm_enable_lbrv(struct vcpu_svm *svm)
{
	u32 *msrpm = svm->msrpm;

	svm->vmcb->control.lbr_ctl = 1;
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
}

static void svm_disable_lbrv(struct vcpu_svm *svm)
{
	u32 *msrpm = svm->msrpm;

	svm->vmcb->control.lbr_ctl = 0;
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
}

Avi Kivity's avatar
Avi Kivity committed
870 871 872 873
static __init int svm_hardware_setup(void)
{
	int cpu;
	struct page *iopm_pages;
874
	void *iopm_va;
Avi Kivity's avatar
Avi Kivity committed
875 876 877 878 879 880
	int r;

	iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);

	if (!iopm_pages)
		return -ENOMEM;
881 882 883

	iopm_va = page_address(iopm_pages);
	memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity's avatar
Avi Kivity committed
884 885
	iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;

886 887
	init_msrpm_offsets();

888 889 890
	if (boot_cpu_has(X86_FEATURE_NX))
		kvm_enable_efer_bits(EFER_NX);

Alexander Graf's avatar
Alexander Graf committed
891 892 893
	if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
		kvm_enable_efer_bits(EFER_FFXSR);

894 895
	if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		kvm_has_tsc_control = true;
896 897
		kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
		kvm_tsc_scaling_ratio_frac_bits = 32;
898 899
	}

900 901
	if (nested) {
		printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
902
		kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
903 904
	}

Zachary Amsden's avatar
Zachary Amsden committed
905
	for_each_possible_cpu(cpu) {
Avi Kivity's avatar
Avi Kivity committed
906 907
		r = svm_cpu_init(cpu);
		if (r)
908
			goto err;
Avi Kivity's avatar
Avi Kivity committed
909
	}
910

911
	if (!boot_cpu_has(X86_FEATURE_NPT))
912 913
		npt_enabled = false;

914 915 916 917 918
	if (npt_enabled && !npt) {
		printk(KERN_INFO "kvm: Nested Paging disabled\n");
		npt_enabled = false;
	}

919
	if (npt_enabled) {
920
		printk(KERN_INFO "kvm: Nested Paging enabled\n");
921
		kvm_enable_tdp();
922 923
	} else
		kvm_disable_tdp();
924

Avi Kivity's avatar
Avi Kivity committed
925 926
	return 0;

927
err:
Avi Kivity's avatar
Avi Kivity committed
928 929 930 931 932 933 934
	__free_pages(iopm_pages, IOPM_ALLOC_ORDER);
	iopm_base = 0;
	return r;
}

static __exit void svm_hardware_unsetup(void)
{
935 936
	int cpu;

Zachary Amsden's avatar
Zachary Amsden committed
937
	for_each_possible_cpu(cpu)
938 939
		svm_cpu_uninit(cpu);

Avi Kivity's avatar
Avi Kivity committed
940
	__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
941
	iopm_base = 0;
Avi Kivity's avatar
Avi Kivity committed
942 943 944 945 946 947
}

static void init_seg(struct vmcb_seg *seg)
{
	seg->selector = 0;
	seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedel's avatar
Joerg Roedel committed
948
		      SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity's avatar
Avi Kivity committed
949 950 951 952 953 954 955 956 957 958 959 960
	seg->limit = 0xffff;
	seg->base = 0;
}

static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
{
	seg->selector = 0;
	seg->attrib = SVM_SELECTOR_P_MASK | type;
	seg->limit = 0xffff;
	seg->base = 0;
}

961 962 963 964 965 966 967
static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	return svm->vmcb->control.tsc_offset;
}

968 969 970 971 972
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 g_tsc_offset = 0;

973
	if (is_guest_mode(vcpu)) {
974 975 976
		g_tsc_offset = svm->vmcb->control.tsc_offset -
			       svm->nested.hsave->control.tsc_offset;
		svm->nested.hsave->control.tsc_offset = offset;
977 978 979 980
	} else
		trace_kvm_write_tsc_offset(vcpu->vcpu_id,
					   svm->vmcb->control.tsc_offset,
					   offset);
981 982

	svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
983 984

	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
985 986
}

987
static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
Zachary Amsden's avatar
Zachary Amsden committed
988 989 990 991
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->control.tsc_offset += adjustment;
992
	if (is_guest_mode(vcpu))
Zachary Amsden's avatar
Zachary Amsden committed
993
		svm->nested.hsave->control.tsc_offset += adjustment;
994 995 996 997 998
	else
		trace_kvm_write_tsc_offset(vcpu->vcpu_id,
				     svm->vmcb->control.tsc_offset - adjustment,
				     svm->vmcb->control.tsc_offset);

999
	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsden's avatar
Zachary Amsden committed
1000 1001
}

Paolo Bonzini's avatar
Paolo Bonzini committed
1002
static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
1003
{
1004 1005
	struct vmcb_control_area *control = &svm->vmcb->control;
	struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity's avatar
Avi Kivity committed
1006

1007
	svm->vcpu.fpu_active = 1;
1008
	svm->vcpu.arch.hflags = 0;
1009

1010 1011 1012 1013 1014 1015 1016
	set_cr_intercept(svm, INTERCEPT_CR0_READ);
	set_cr_intercept(svm, INTERCEPT_CR3_READ);
	set_cr_intercept(svm, INTERCEPT_CR4_READ);
	set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
	set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
	set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
	set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity's avatar
Avi Kivity committed
1017

1018
	set_dr_intercepts(svm);
Avi Kivity's avatar
Avi Kivity committed
1019

1020 1021 1022
	set_exception_intercept(svm, PF_VECTOR);
	set_exception_intercept(svm, UD_VECTOR);
	set_exception_intercept(svm, MC_VECTOR);
1023
	set_exception_intercept(svm, AC_VECTOR);
1024
	set_exception_intercept(svm, DB_VECTOR);
Avi Kivity's avatar
Avi Kivity committed
1025

1026 1027 1028 1029
	set_intercept(svm, INTERCEPT_INTR);
	set_intercept(svm, INTERCEPT_NMI);
	set_intercept(svm, INTERCEPT_SMI);
	set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
Avi Kivity's avatar
Avi Kivity committed
1030
	set_intercept(svm, INTERCEPT_RDPMC);
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	set_intercept(svm, INTERCEPT_CPUID);
	set_intercept(svm, INTERCEPT_INVD);
	set_intercept(svm, INTERCEPT_HLT);
	set_intercept(svm, INTERCEPT_INVLPG);
	set_intercept(svm, INTERCEPT_INVLPGA);
	set_intercept(svm, INTERCEPT_IOIO_PROT);
	set_intercept(svm, INTERCEPT_MSR_PROT);
	set_intercept(svm, INTERCEPT_TASK_SWITCH);
	set_intercept(svm, INTERCEPT_SHUTDOWN);
	set_intercept(svm, INTERCEPT_VMRUN);
	set_intercept(svm, INTERCEPT_VMMCALL);
	set_intercept(svm, INTERCEPT_VMLOAD);
	set_intercept(svm, INTERCEPT_VMSAVE);
	set_intercept(svm, INTERCEPT_STGI);
	set_intercept(svm, INTERCEPT_CLGI);
	set_intercept(svm, INTERCEPT_SKINIT);
	set_intercept(svm, INTERCEPT_WBINVD);
	set_intercept(svm, INTERCEPT_MONITOR);
	set_intercept(svm, INTERCEPT_MWAIT);
Joerg Roedel's avatar
Joerg Roedel committed
1050
	set_intercept(svm, INTERCEPT_XSETBV);
Avi Kivity's avatar
Avi Kivity committed
1051 1052

	control->iopm_base_pa = iopm_base;
1053
	control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity's avatar
Avi Kivity committed
1054 1055 1056 1057 1058 1059 1060 1061 1062
	control->int_ctl = V_INTR_MASKING_MASK;

	init_seg(&save->es);
	init_seg(&save->ss);
	init_seg(&save->ds);
	init_seg(&save->fs);
	init_seg(&save->gs);

	save->cs.selector = 0xf000;
1063
	save->cs.base = 0xffff0000;
Avi Kivity's avatar
Avi Kivity committed
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
	/* Executable/Readable Code Segment */
	save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
		SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
	save->cs.limit = 0xffff;

	save->gdtr.limit = 0xffff;
	save->idtr.limit = 0xffff;

	init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
	init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);

Paolo Bonzini's avatar
Paolo Bonzini committed
1075
	svm_set_efer(&svm->vcpu, 0);
Mike Day's avatar
Mike Day committed
1076
	save->dr6 = 0xffff0ff0;
1077
	kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity's avatar
Avi Kivity committed
1078
	save->rip = 0x0000fff0;
1079
	svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity's avatar
Avi Kivity committed
1080

Joerg Roedel's avatar
Joerg Roedel committed
1081
	/*
1082
	 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1083
	 * It also updates the guest-visible cr0 value.
Avi Kivity's avatar
Avi Kivity committed
1084
	 */
1085
	svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1086
	kvm_mmu_reset_context(&svm->vcpu);
1087

1088
	save->cr4 = X86_CR4_PAE;
Avi Kivity's avatar
Avi Kivity committed
1089
	/* rdx = ?? */
1090 1091 1092 1093

	if (npt_enabled) {
		/* Setup VMCB for Nested Paging */
		control->nested_ctl = 1;
1094
		clr_intercept(svm, INTERCEPT_INVLPG);
1095
		clr_exception_intercept(svm, PF_VECTOR);
1096 1097
		clr_cr_intercept(svm, INTERCEPT_CR3_READ);
		clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1098
		save->g_pat = svm->vcpu.arch.pat;
1099 1100 1101
		save->cr3 = 0;
		save->cr4 = 0;
	}
1102
	svm->asid_generation = 0;
1103

1104
	svm->nested.vmcb = 0;
1105 1106
	svm->vcpu.arch.hflags = 0;

1107
	if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1108
		control->pause_filter_count = 3000;
1109
		set_intercept(svm, INTERCEPT_PAUSE);
1110 1111
	}

1112 1113
	mark_all_dirty(svm->vmcb);

1114
	enable_gif(svm);
Avi Kivity's avatar
Avi Kivity committed
1115 1116
}

1117
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1118 1119
{
	struct vcpu_svm *svm = to_svm(vcpu);
1120 1121
	u32 dummy;
	u32 eax = 1;
1122

1123 1124 1125 1126 1127 1128
	if (!init_event) {
		svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
					   MSR_IA32_APICBASE_ENABLE;
		if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
			svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
	}
Paolo Bonzini's avatar
Paolo Bonzini committed
1129
	init_vmcb(svm);
1130

1131 1132
	kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
	kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
1133 1134
}

1135
static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity's avatar
Avi Kivity committed
1136
{
1137
	struct vcpu_svm *svm;
Avi Kivity's avatar
Avi Kivity committed
1138
	struct page *page;
1139
	struct page *msrpm_pages;
Alexander Graf's avatar
Alexander Graf committed
1140
	struct page *hsave_page;
Alexander Graf's avatar
Alexander Graf committed
1141
	struct page *nested_msrpm_pages;
1142
	int err;
Avi Kivity's avatar
Avi Kivity committed
1143

1144
	svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1145 1146 1147 1148 1149 1150 1151 1152 1153
	if (!svm) {
		err = -ENOMEM;
		goto out;
	}

	err = kvm_vcpu_init(&svm->vcpu, kvm, id);
	if (err)
		goto free_svm;

1154
	err = -ENOMEM;
Avi Kivity's avatar
Avi Kivity committed
1155
	page = alloc_page(GFP_KERNEL);
1156
	if (!page)
1157
		goto uninit;
Avi Kivity's avatar
Avi Kivity committed
1158

1159 1160
	msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
	if (!msrpm_pages)
1161
		goto free_page1;
Alexander Graf's avatar
Alexander Graf committed
1162 1163 1164

	nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
	if (!nested_msrpm_pages)
1165
		goto free_page2;
1166

Alexander Graf's avatar
Alexander Graf committed
1167 1168
	hsave_page = alloc_page(GFP_KERNEL);
	if (!hsave_page)
1169 1170
		goto free_page3;

1171
	svm->nested.hsave = page_address(hsave_page);
Alexander Graf's avatar
Alexander Graf committed
1172

1173 1174 1175
	svm->msrpm = page_address(msrpm_pages);
	svm_vcpu_init_msrpm(svm->msrpm);

1176
	svm->nested.msrpm = page_address(nested_msrpm_pages);
1177
	svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf's avatar
Alexander Graf committed
1178

1179 1180 1181 1182
	svm->vmcb = page_address(page);
	clear_page(svm->vmcb);
	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
	svm->asid_generation = 0;
Paolo Bonzini's avatar
Paolo Bonzini committed
1183
	init_vmcb(svm);
Avi Kivity's avatar
Avi Kivity committed
1184

1185 1186
	svm_init_osvw(&svm->vcpu);

1187
	return &svm->vcpu;
1188

1189 1190 1191 1192 1193 1194
free_page3:
	__free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
free_page2:
	__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
free_page1:
	__free_page(page);
1195 1196 1197
uninit:
	kvm_vcpu_uninit(&svm->vcpu);
free_svm:
1198
	kmem_cache_free(kvm_vcpu_cache, svm);
1199 1200
out:
	return ERR_PTR(err);
Avi Kivity's avatar
Avi Kivity committed
1201 1202 1203 1204
}

static void svm_free_vcpu(struct kvm_vcpu *vcpu)
{
1205 1206
	struct vcpu_svm *svm = to_svm(vcpu);

1207
	__free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
1208
	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
1209 1210
	__free_page(virt_to_page(svm->nested.hsave));
	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
1211
	kvm_vcpu_uninit(vcpu);
1212
	kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity's avatar
Avi Kivity committed
1213 1214
}

1215
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity's avatar
Avi Kivity committed
1216
{
1217
	struct vcpu_svm *svm = to_svm(vcpu);
1218
	int i;
1219 1220

	if (unlikely(cpu != vcpu->cpu)) {
1221
		svm->asid_generation = 0;
1222
		mark_all_dirty(svm->vmcb);
1223
	}
1224

1225 1226 1227
#ifdef CONFIG_X86_64
	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
#endif
1228 1229 1230 1231
	savesegment(fs, svm->host.fs);
	savesegment(gs, svm->host.gs);
	svm->host.ldt = kvm_read_ldt();

1232
	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1233
		rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1234

1235 1236 1237 1238 1239 1240
	if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
		if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
			__this_cpu_write(current_tsc_ratio, tsc_ratio);
			wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
		}
1241
	}
Avi Kivity's avatar
Avi Kivity committed
1242 1243 1244 1245
}

static void svm_vcpu_put(struct kvm_vcpu *vcpu)
{
1246
	struct vcpu_svm *svm = to_svm(vcpu);
1247 1248
	int i;

1249
	++vcpu->stat.host_state_reload;
1250 1251 1252 1253
	kvm_load_ldt(svm->host.ldt);
#ifdef CONFIG_X86_64
	loadsegment(fs, svm->host.fs);
	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
1254
	load_gs_index(svm->host.gs);
1255
#else
1256
#ifdef CONFIG_X86_32_LAZY_GS
1257
	loadsegment(gs, svm->host.gs);
1258
#endif
1259
#endif
1260
	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1261
		wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity's avatar
Avi Kivity committed
1262 1263 1264 1265
}

static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
{
1266
	return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity's avatar
Avi Kivity committed
1267 1268 1269 1270
}

static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
Paolo Bonzini's avatar
Paolo Bonzini committed
1271 1272 1273 1274 1275
       /*
        * Any change of EFLAGS.VM is accompained by a reload of SS
        * (caused by either a task switch or an inter-privilege IRET),
        * so we do not need to update the CPL here.
        */
1276
	to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity's avatar
Avi Kivity committed
1277 1278
}

Avi Kivity's avatar
Avi Kivity committed
1279 1280 1281 1282 1283
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
	switch (reg) {
	case VCPU_EXREG_PDPTR:
		BUG_ON(!npt_enabled);
1284
		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity's avatar
Avi Kivity committed
1285 1286 1287 1288 1289 1290
		break;
	default:
		BUG();
	}
}

1291 1292
static void svm_set_vintr(struct vcpu_svm *svm)
{
1293
	set_intercept(svm, INTERCEPT_VINTR);
1294 1295 1296 1297
}

static void svm_clear_vintr(struct vcpu_svm *svm)
{
1298
	clr_intercept(svm, INTERCEPT_VINTR);
1299 1300
}

Avi Kivity's avatar
Avi Kivity committed
1301 1302
static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
{
1303
	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity's avatar
Avi Kivity committed
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315

	switch (seg) {
	case VCPU_SREG_CS: return &save->cs;
	case VCPU_SREG_DS: return &save->ds;
	case VCPU_SREG_ES: return &save->es;
	case VCPU_SREG_FS: return &save->fs;
	case VCPU_SREG_GS: return &save->gs;
	case VCPU_SREG_SS: return &save->ss;
	case VCPU_SREG_TR: return &save->tr;
	case VCPU_SREG_LDTR: return &save->ldtr;
	}
	BUG();
Al Viro's avatar
Al Viro committed
1316
	return NULL;
Avi Kivity's avatar
Avi Kivity committed
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
}

static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
	struct vmcb_seg *s = svm_seg(vcpu, seg);

	return s->base;
}

static void svm_get_segment(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg)
{
	struct vmcb_seg *s = svm_seg(vcpu, seg);

	var->base = s->base;
	var->limit = s->limit;
	var->selector = s->selector;
	var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
	var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
	var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
	var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
	var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
	var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
	var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350

	/*
	 * AMD CPUs circa 2014 track the G bit for all segments except CS.
	 * However, the SVM spec states that the G bit is not observed by the
	 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
	 * So let's synthesize a legal G bit for all segments, this helps
	 * running KVM nested. It also helps cross-vendor migration, because
	 * Intel's vmentry has a check on the 'G' bit.
	 */
	var->g = s->limit > 0xfffff;
1351

Joerg Roedel's avatar
Joerg Roedel committed
1352 1353
	/*
	 * AMD's VMCB does not have an explicit unusable field, so emulate it
1354 1355 1356 1357
	 * for cross vendor migration purposes by "not present"
	 */
	var->unusable = !var->present || (var->type == 0);

1358 1359 1360 1361 1362 1363
	switch (seg) {
	case VCPU_SREG_TR:
		/*
		 * Work around a bug where the busy flag in the tr selector
		 * isn't exposed
		 */
1364
		var->type |= 0x2;
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
		break;
	case VCPU_SREG_DS:
	case VCPU_SREG_ES:
	case VCPU_SREG_FS:
	case VCPU_SREG_GS:
		/*
		 * The accessed bit must always be set in the segment
		 * descriptor cache, although it can be cleared in the
		 * descriptor, the cached bit always remains at 1. Since
		 * Intel has a check on this, set it here to support
		 * cross-vendor migration.
		 */
		if (!var->unusable)
			var->type |= 0x1;
		break;
1380
	case VCPU_SREG_SS:
Joerg Roedel's avatar
Joerg Roedel committed
1381 1382
		/*
		 * On AMD CPUs sometimes the DB bit in the segment
1383 1384 1385 1386 1387 1388
		 * descriptor is left as 1, although the whole segment has
		 * been made unusable. Clear it here to pass an Intel VMX
		 * entry check when cross vendor migrating.
		 */
		if (var->unusable)
			var->db = 0;
1389
		/* This is symmetric with svm_set_segment() */
1390
		var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1391
		break;
1392
	}
Avi Kivity's avatar
Avi Kivity committed
1393 1394
}

1395 1396 1397 1398 1399 1400 1401
static int svm_get_cpl(struct kvm_vcpu *vcpu)
{
	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;

	return save->cpl;
}

1402
static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity's avatar
Avi Kivity committed
1403
{
1404 1405
	struct vcpu_svm *svm = to_svm(vcpu);

1406 1407
	dt->size = svm->vmcb->save.idtr.limit;
	dt->address = svm->vmcb->save.idtr.base;
Avi Kivity's avatar
Avi Kivity committed
1408 1409
}

1410
static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity's avatar
Avi Kivity committed
1411
{
1412 1413
	struct vcpu_svm *svm = to_svm(vcpu);

1414 1415
	svm->vmcb->save.idtr.limit = dt->size;
	svm->vmcb->save.idtr.base = dt->address ;
1416
	mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity's avatar
Avi Kivity committed
1417 1418
}

1419
static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity's avatar
Avi Kivity committed
1420
{
1421 1422
	struct vcpu_svm *svm = to_svm(vcpu);

1423 1424
	dt->size = svm->vmcb->save.gdtr.limit;
	dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity's avatar
Avi Kivity committed
1425 1426
}

1427
static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity's avatar
Avi Kivity committed
1428
{
1429 1430
	struct vcpu_svm *svm = to_svm(vcpu);

1431 1432
	svm->vmcb->save.gdtr.limit = dt->size;
	svm->vmcb->save.gdtr.base = dt->address ;
1433
	mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity's avatar
Avi Kivity committed
1434 1435
}

1436 1437 1438 1439
static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
}

1440 1441 1442 1443
static void svm_decache_cr3(struct kvm_vcpu *vcpu)
{
}

1444
static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1445 1446 1447
{
}

Avi Kivity's avatar
Avi Kivity committed
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
static void update_cr0_intercept(struct vcpu_svm *svm)
{
	ulong gcr0 = svm->vcpu.arch.cr0;
	u64 *hcr0 = &svm->vmcb->save.cr0;

	if (!svm->vcpu.fpu_active)
		*hcr0 |= SVM_CR0_SELECTIVE_MASK;
	else
		*hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
			| (gcr0 & SVM_CR0_SELECTIVE_MASK);

1459
	mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivity's avatar
Avi Kivity committed
1460 1461

	if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
1462 1463
		clr_cr_intercept(svm, INTERCEPT_CR0_READ);
		clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivity's avatar
Avi Kivity committed
1464
	} else {
1465 1466
		set_cr_intercept(svm, INTERCEPT_CR0_READ);
		set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivity's avatar
Avi Kivity committed
1467 1468 1469
	}
}

Avi Kivity's avatar
Avi Kivity committed
1470 1471
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
1472 1473
	struct vcpu_svm *svm = to_svm(vcpu);

1474
#ifdef CONFIG_X86_64
1475
	if (vcpu->arch.efer & EFER_LME) {
1476
		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1477
			vcpu->arch.efer |= EFER_LMA;
1478
			svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity's avatar
Avi Kivity committed
1479 1480
		}

Mike Day's avatar
Mike Day committed
1481
		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1482
			vcpu->arch.efer &= ~EFER_LMA;
1483
			svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity's avatar
Avi Kivity committed
1484 1485 1486
		}
	}
#endif
1487
	vcpu->arch.cr0 = cr0;
1488 1489 1490

	if (!npt_enabled)
		cr0 |= X86_CR0_PG | X86_CR0_WP;
1491 1492

	if (!vcpu->fpu_active)
1493
		cr0 |= X86_CR0_TS;
1494 1495 1496 1497 1498 1499 1500
	/*
	 * re-enable caching here because the QEMU bios
	 * does not do it - this results in some delay at
	 * reboot
	 */
	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
		cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1501
	svm->vmcb->save.cr0 = cr0;
1502
	mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivity's avatar
Avi Kivity committed
1503
	update_cr0_intercept(svm);
Avi Kivity's avatar
Avi Kivity committed
1504 1505
}

1506
static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity's avatar
Avi Kivity committed
1507
{
1508
	unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1509 1510
	unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;

1511 1512 1513
	if (cr4 & X86_CR4_VMXE)
		return 1;

1514
	if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1515
		svm_flush_tlb(vcpu);
1516

1517 1518 1519
	vcpu->arch.cr4 = cr4;
	if (!npt_enabled)
		cr4 |= X86_CR4_PAE;
1520
	cr4 |= host_cr4_mce;
1521
	to_svm(vcpu)->vmcb->save.cr4 = cr4;
1522
	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1523
	return 0;
Avi Kivity's avatar
Avi Kivity committed
1524 1525 1526 1527 1528
}

static void svm_set_segment(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg)
{
1529
	struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity's avatar
Avi Kivity committed
1530 1531 1532 1533 1534
	struct vmcb_seg *s = svm_seg(vcpu, seg);

	s->base = var->base;
	s->limit = var->limit;
	s->selector = var->selector;
1535 1536 1537 1538 1539 1540 1541 1542
	s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
	s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
	s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
	s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
	s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
	s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
	s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
	s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
Paolo Bonzini's avatar
Paolo Bonzini committed
1543 1544 1545 1546 1547 1548 1549 1550

	/*
	 * This is always accurate, except if SYSRET returned to a segment
	 * with SS.DPL != 3.  Intel does not have this quirk, and always
	 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
	 * would entail passing the CPL to userspace and back.
	 */
	if (seg == VCPU_SREG_SS)
1551 1552
		/* This is symmetric with svm_get_segment() */
		svm->vmcb->save.cpl = (var->dpl & 3);
Avi Kivity's avatar
Avi Kivity committed
1553

1554
	mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity's avatar
Avi Kivity committed
1555 1556
}

1557
static void update_bp_intercept(struct kvm_vcpu *vcpu)
Avi Kivity's avatar
Avi Kivity committed
1558
{
Jan Kiszka's avatar
Jan Kiszka committed
1559 1560
	struct vcpu_svm *svm = to_svm(vcpu);

1561
	clr_exception_intercept(svm, BP_VECTOR);
1562

Jan Kiszka's avatar
Jan Kiszka committed
1563 1564
	if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1565
			set_exception_intercept(svm, BP_VECTOR);
Jan Kiszka's avatar
Jan Kiszka committed
1566 1567
	} else
		vcpu->guest_debug = 0;
1568 1569
}

1570
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity's avatar
Avi Kivity committed
1571
{
1572 1573 1574
	if (sd->next_asid > sd->max_asid) {
		++sd->asid_generation;
		sd->next_asid = 1;
1575
		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity's avatar
Avi Kivity committed
1576 1577
	}

1578 1579
	svm->asid_generation = sd->asid_generation;
	svm->vmcb->control.asid = sd->next_asid++;
1580 1581

	mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity's avatar
Avi Kivity committed
1582 1583
}

Jan Kiszka's avatar
Jan Kiszka committed
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
{
	return to_svm(vcpu)->vmcb->save.dr6;
}

static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->save.dr6 = value;
	mark_dirty(svm->vmcb, VMCB_DR);
}

1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	get_debugreg(vcpu->arch.db[0], 0);
	get_debugreg(vcpu->arch.db[1], 1);
	get_debugreg(vcpu->arch.db[2], 2);
	get_debugreg(vcpu->arch.db[3], 3);
	vcpu->arch.dr6 = svm_get_dr6(vcpu);
	vcpu->arch.dr7 = svm->vmcb->save.dr7;

	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
	set_dr_intercepts(svm);
}

1612
static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity's avatar
Avi Kivity committed
1613
{
1614 1615
	struct vcpu_svm *svm = to_svm(vcpu);

1616
	svm->vmcb->save.dr7 = value;
1617
	mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity's avatar
Avi Kivity committed
1618 1619
}

Avi Kivity's avatar
Avi Kivity committed
1620
static int pf_interception(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
1621
{
1622
	u64 fault_address = svm->vmcb->control.exit_info_2;
Avi Kivity's avatar
Avi Kivity committed
1623
	u32 error_code;
1624
	int r = 1;
Avi Kivity's avatar
Avi Kivity committed
1625

1626 1627 1628
	switch (svm->apf_reason) {
	default:
		error_code = svm->vmcb->control.exit_info_1;
1629

1630 1631 1632
		trace_kvm_page_fault(fault_address, error_code);
		if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
			kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1633 1634 1635
		r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
			svm->vmcb->control.insn_bytes,
			svm->vmcb->control.insn_len);
1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
		break;
	case KVM_PV_REASON_PAGE_NOT_PRESENT:
		svm->apf_reason = 0;
		local_irq_disable();
		kvm_async_pf_task_wait(fault_address);
		local_irq_enable();
		break;
	case KVM_PV_REASON_PAGE_READY:
		svm->apf_reason = 0;
		local_irq_disable();
		kvm_async_pf_task_wake(fault_address);
		local_irq_enable();
		break;
	}
	return r;
Avi Kivity's avatar
Avi Kivity committed
1651 1652
}

Avi Kivity's avatar
Avi Kivity committed
1653
static int db_interception(struct vcpu_svm *svm)
Jan Kiszka's avatar
Jan Kiszka committed
1654
{
Avi Kivity's avatar
Avi Kivity committed
1655 1656
	struct kvm_run *kvm_run = svm->vcpu.run;

Jan Kiszka's avatar
Jan Kiszka committed
1657
	if (!(svm->vcpu.guest_debug &
1658
	      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka's avatar
Jan Kiszka committed
1659
		!svm->nmi_singlestep) {
Jan Kiszka's avatar
Jan Kiszka committed
1660 1661 1662
		kvm_queue_exception(&svm->vcpu, DB_VECTOR);
		return 1;
	}
1663

Jan Kiszka's avatar
Jan Kiszka committed
1664 1665
	if (svm->nmi_singlestep) {
		svm->nmi_singlestep = false;
1666 1667 1668 1669 1670 1671
		if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
			svm->vmcb->save.rflags &=
				~(X86_EFLAGS_TF | X86_EFLAGS_RF);
	}

	if (svm->vcpu.guest_debug &
Joerg Roedel's avatar
Joerg Roedel committed
1672
	    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1673 1674 1675 1676 1677 1678 1679 1680
		kvm_run->exit_reason = KVM_EXIT_DEBUG;
		kvm_run->debug.arch.pc =
			svm->vmcb->save.cs.base + svm->vmcb->save.rip;
		kvm_run->debug.arch.exception = DB_VECTOR;
		return 0;
	}

	return 1;
Jan Kiszka's avatar
Jan Kiszka committed
1681 1682
}

Avi Kivity's avatar
Avi Kivity committed
1683
static int bp_interception(struct vcpu_svm *svm)
Jan Kiszka's avatar
Jan Kiszka committed
1684
{
Avi Kivity's avatar
Avi Kivity committed
1685 1686
	struct kvm_run *kvm_run = svm->vcpu.run;

Jan Kiszka's avatar
Jan Kiszka committed
1687 1688 1689 1690 1691 1692
	kvm_run->exit_reason = KVM_EXIT_DEBUG;
	kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
	kvm_run->debug.arch.exception = BP_VECTOR;
	return 0;
}

Avi Kivity's avatar
Avi Kivity committed
1693
static int ud_interception(struct vcpu_svm *svm)
1694 1695 1696
{
	int er;

1697
	er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
1698 1699
	if (er == EMULATE_USER_EXIT)
		return 0;
1700
	if (er != EMULATE_DONE)
1701
		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1702 1703 1704
	return 1;
}

1705 1706 1707 1708 1709 1710
static int ac_interception(struct vcpu_svm *svm)
{
	kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
1711
static void svm_fpu_activate(struct kvm_vcpu *vcpu)
1712
{
Avi Kivity's avatar
Avi Kivity committed
1713
	struct vcpu_svm *svm = to_svm(vcpu);
1714

1715
	clr_exception_intercept(svm, NM_VECTOR);
1716

Rusty Russell's avatar
Rusty Russell committed
1717
	svm->vcpu.fpu_active = 1;
Avi Kivity's avatar
Avi Kivity committed
1718
	update_cr0_intercept(svm);
Avi Kivity's avatar
Avi Kivity committed
1719
}
1720

Avi Kivity's avatar
Avi Kivity committed
1721 1722 1723
static int nm_interception(struct vcpu_svm *svm)
{
	svm_fpu_activate(&svm->vcpu);
1724
	return 1;
1725 1726
}

1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
static bool is_erratum_383(void)
{
	int err, i;
	u64 value;

	if (!erratum_383_found)
		return false;

	value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
	if (err)
		return false;

	/* Bit 62 may or may not be set for this mce */
	value &= ~(1ULL << 62);

	if (value != 0xb600000000010015ULL)
		return false;

	/* Clear MCi_STATUS registers */
	for (i = 0; i < 6; ++i)
		native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);

	value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
	if (!err) {
		u32 low, high;

		value &= ~(1ULL << 2);
		low    = lower_32_bits(value);
		high   = upper_32_bits(value);

		native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
	}

	/* Flush tlb to evict multi-match entries */
	__flush_tlb_all();

	return true;
}

1766
static void svm_handle_mce(struct vcpu_svm *svm)
1767
{
1768 1769 1770 1771 1772 1773 1774
	if (is_erratum_383()) {
		/*
		 * Erratum 383 triggered. Guest state is corrupt so kill the
		 * guest.
		 */
		pr_err("KVM: Guest triggered AMD Erratum 383\n");

1775
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
1776 1777 1778 1779

		return;
	}

1780 1781 1782 1783 1784 1785 1786 1787
	/*
	 * On an #MC intercept the MCE handler is not called automatically in
	 * the host. So do it by hand here.
	 */
	asm volatile (
		"int $0x12\n");
	/* not sure if we ever come back to this point */

1788 1789 1790 1791 1792
	return;
}

static int mc_interception(struct vcpu_svm *svm)
{
1793 1794 1795
	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
1796
static int shutdown_interception(struct vcpu_svm *svm)
1797
{
Avi Kivity's avatar
Avi Kivity committed
1798 1799
	struct kvm_run *kvm_run = svm->vcpu.run;

1800 1801 1802 1803
	/*
	 * VMCB is undefined after a SHUTDOWN intercept
	 * so reinitialize it.
	 */
1804
	clear_page(svm->vmcb);
Paolo Bonzini's avatar
Paolo Bonzini committed
1805
	init_vmcb(svm);
1806 1807 1808 1809 1810

	kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
	return 0;
}

Avi Kivity's avatar
Avi Kivity committed
1811
static int io_interception(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
1812
{
1813
	struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Day's avatar
Mike Day committed
1814
	u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1815
	int size, in, string;
1816
	unsigned port;
Avi Kivity's avatar
Avi Kivity committed
1817

Rusty Russell's avatar
Rusty Russell committed
1818
	++svm->vcpu.stat.io_exits;
1819
	string = (io_info & SVM_IOIO_STR_MASK) != 0;
1820
	in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1821
	if (string || in)
1822
		return emulate_instruction(vcpu, 0) == EMULATE_DONE;
1823

1824 1825
	port = io_info >> 16;
	size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1826
	svm->next_rip = svm->vmcb->control.exit_info_2;
1827
	skip_emulated_instruction(&svm->vcpu);
1828 1829

	return kvm_fast_pio_out(vcpu, size, port);
Avi Kivity's avatar
Avi Kivity committed
1830 1831
}

Avi Kivity's avatar
Avi Kivity committed
1832
static int nmi_interception(struct vcpu_svm *svm)
1833 1834 1835 1836
{
	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
1837
static int intr_interception(struct vcpu_svm *svm)
1838 1839 1840 1841 1842
{
	++svm->vcpu.stat.irq_exits;
	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
1843
static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
1844 1845 1846 1847
{
	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
1848
static int halt_interception(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
1849
{
1850
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russell's avatar
Rusty Russell committed
1851
	return kvm_emulate_halt(&svm->vcpu);
Avi Kivity's avatar
Avi Kivity committed
1852 1853
}

Avi Kivity's avatar
Avi Kivity committed
1854
static int vmmcall_interception(struct vcpu_svm *svm)
1855
{
1856
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1857 1858
	kvm_emulate_hypercall(&svm->vcpu);
	return 1;
1859 1860
}

1861 1862 1863 1864 1865 1866 1867
static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	return svm->nested.nested_cr3;
}

1868 1869 1870 1871 1872 1873 1874
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 cr3 = svm->nested.nested_cr3;
	u64 pdpte;
	int ret;

1875 1876
	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
				       offset_in_page(cr3) + index * 8, 8);
1877 1878 1879 1880 1881
	if (ret)
		return 0;
	return pdpte;
}

1882 1883 1884 1885 1886 1887
static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
				   unsigned long root)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->control.nested_cr3 = root;
1888
	mark_dirty(svm->vmcb, VMCB_NPT);
1889
	svm_flush_tlb(vcpu);
1890 1891
}

1892 1893
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
				       struct x86_exception *fault)
1894 1895 1896
{
	struct vcpu_svm *svm = to_svm(vcpu);

1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916
	if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
		/*
		 * TODO: track the cause of the nested page fault, and
		 * correctly fill in the high bits of exit_info_1.
		 */
		svm->vmcb->control.exit_code = SVM_EXIT_NPF;
		svm->vmcb->control.exit_code_hi = 0;
		svm->vmcb->control.exit_info_1 = (1ULL << 32);
		svm->vmcb->control.exit_info_2 = fault->address;
	}

	svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
	svm->vmcb->control.exit_info_1 |= fault->error_code;

	/*
	 * The present bit is always zero for page structure faults on real
	 * hardware.
	 */
	if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
		svm->vmcb->control.exit_info_1 &= ~1;
1917 1918 1919 1920

	nested_svm_vmexit(svm);
}

1921
static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1922
{
1923 1924
	WARN_ON(mmu_is_nested(vcpu));
	kvm_init_shadow_mmu(vcpu);
1925 1926
	vcpu->arch.mmu.set_cr3           = nested_svm_set_tdp_cr3;
	vcpu->arch.mmu.get_cr3           = nested_svm_get_tdp_cr3;
1927
	vcpu->arch.mmu.get_pdptr         = nested_svm_get_tdp_pdptr;
1928 1929
	vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
	vcpu->arch.mmu.shadow_root_level = get_npt_level();
1930
	reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
1931 1932 1933 1934 1935 1936 1937 1938
	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
}

static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
	vcpu->arch.walk_mmu = &vcpu->arch.mmu;
}

1939 1940
static int nested_svm_check_permissions(struct vcpu_svm *svm)
{
1941
	if (!(svm->vcpu.arch.efer & EFER_SVME)
1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
	    || !is_paging(&svm->vcpu)) {
		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
		return 1;
	}

	if (svm->vmcb->save.cpl) {
		kvm_inject_gp(&svm->vcpu, 0);
		return 1;
	}

       return 0;
}

1955 1956 1957
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
				      bool has_error_code, u32 error_code)
{
1958 1959
	int vmexit;

1960
	if (!is_guest_mode(&svm->vcpu))
1961
		return 0;
1962

1963 1964 1965 1966 1967
	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
	svm->vmcb->control.exit_code_hi = 0;
	svm->vmcb->control.exit_info_1 = error_code;
	svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;

1968 1969 1970 1971 1972
	vmexit = nested_svm_intercept(svm);
	if (vmexit == NESTED_EXIT_DONE)
		svm->nested.exit_required = true;

	return vmexit;
1973 1974
}

1975 1976
/* This function returns true if it is save to enable the irq window */
static inline bool nested_svm_intr(struct vcpu_svm *svm)
1977
{
1978
	if (!is_guest_mode(&svm->vcpu))
1979
		return true;
1980

1981
	if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1982
		return true;
1983

1984
	if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1985
		return false;
1986

1987 1988 1989 1990 1991 1992 1993 1994
	/*
	 * if vmexit was already requested (by intercepted exception
	 * for instance) do not overwrite it with "external interrupt"
	 * vmexit.
	 */
	if (svm->nested.exit_required)
		return false;

1995 1996 1997
	svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
	svm->vmcb->control.exit_info_1 = 0;
	svm->vmcb->control.exit_info_2 = 0;
1998

1999 2000 2001
	if (svm->nested.intercept & 1ULL) {
		/*
		 * The #vmexit can't be emulated here directly because this
Guo Chao's avatar
Guo Chao committed
2002
		 * code path runs with irqs and preemption disabled. A
2003 2004 2005 2006
		 * #vmexit emulation might sleep. Only signal request for
		 * the #vmexit here.
		 */
		svm->nested.exit_required = true;
2007
		trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
2008
		return false;
2009 2010
	}

2011
	return true;
2012 2013
}

2014 2015 2016
/* This function returns true if it is save to enable the nmi window */
static inline bool nested_svm_nmi(struct vcpu_svm *svm)
{
2017
	if (!is_guest_mode(&svm->vcpu))
2018 2019 2020 2021 2022 2023 2024 2025 2026
		return true;

	if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
		return true;

	svm->vmcb->control.exit_code = SVM_EXIT_NMI;
	svm->nested.exit_required = true;

	return false;
2027 2028
}

2029
static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
2030 2031 2032
{
	struct page *page;

2033 2034
	might_sleep();

2035
	page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
2036 2037 2038
	if (is_error_page(page))
		goto error;

2039 2040 2041
	*_page = page;

	return kmap(page);
2042 2043 2044 2045 2046 2047 2048

error:
	kvm_inject_gp(&svm->vcpu, 0);

	return NULL;
}

2049
static void nested_svm_unmap(struct page *page)
2050
{
2051
	kunmap(page);
2052 2053 2054
	kvm_release_page_dirty(page);
}

2055 2056
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
{
2057 2058 2059
	unsigned port, size, iopm_len;
	u16 val, mask;
	u8 start_bit;
2060
	u64 gpa;
2061

2062 2063
	if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
		return NESTED_EXIT_HOST;
2064

2065
	port = svm->vmcb->control.exit_info_1 >> 16;
2066 2067
	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
		SVM_IOIO_SIZE_SHIFT;
2068
	gpa  = svm->nested.vmcb_iopm + (port / 8);
2069 2070 2071 2072
	start_bit = port % 8;
	iopm_len = (start_bit + size > 8) ? 2 : 1;
	mask = (0xf >> (4 - size)) << start_bit;
	val = 0;
2073

2074
	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
2075
		return NESTED_EXIT_DONE;
2076

2077
	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2078 2079
}

2080
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
2081
{
2082 2083
	u32 offset, msr, value;
	int write, mask;
2084

2085
	if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2086
		return NESTED_EXIT_HOST;
2087

2088 2089 2090 2091
	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
	offset = svm_msrpm_offset(msr);
	write  = svm->vmcb->control.exit_info_1 & 1;
	mask   = 1 << ((2 * (msr & 0xf)) + write);
2092

2093 2094
	if (offset == MSR_INVALID)
		return NESTED_EXIT_DONE;
2095

2096 2097
	/* Offset is in 32 bit units but need in 8 bit units */
	offset *= 4;
2098

2099
	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
2100
		return NESTED_EXIT_DONE;
2101

2102
	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2103 2104
}

2105
static int nested_svm_exit_special(struct vcpu_svm *svm)
2106 2107
{
	u32 exit_code = svm->vmcb->control.exit_code;
2108

2109 2110 2111
	switch (exit_code) {
	case SVM_EXIT_INTR:
	case SVM_EXIT_NMI:
2112
	case SVM_EXIT_EXCP_BASE + MC_VECTOR:
2113 2114
		return NESTED_EXIT_HOST;
	case SVM_EXIT_NPF:
Joerg Roedel's avatar
Joerg Roedel committed
2115
		/* For now we are always handling NPFs when using them */
2116 2117 2118 2119
		if (npt_enabled)
			return NESTED_EXIT_HOST;
		break;
	case SVM_EXIT_EXCP_BASE + PF_VECTOR:
2120 2121
		/* When we're shadowing, trap PFs, but not async PF */
		if (!npt_enabled && svm->apf_reason == 0)
2122 2123
			return NESTED_EXIT_HOST;
		break;
2124 2125 2126
	case SVM_EXIT_EXCP_BASE + NM_VECTOR:
		nm_interception(svm);
		break;
2127 2128
	default:
		break;
2129 2130
	}

2131 2132 2133 2134 2135 2136
	return NESTED_EXIT_CONTINUE;
}

/*
 * If this function returns true, this #vmexit was already handled
 */
2137
static int nested_svm_intercept(struct vcpu_svm *svm)
2138 2139 2140 2141
{
	u32 exit_code = svm->vmcb->control.exit_code;
	int vmexit = NESTED_EXIT_HOST;

2142
	switch (exit_code) {
2143
	case SVM_EXIT_MSR:
2144
		vmexit = nested_svm_exit_handled_msr(svm);
2145
		break;
2146 2147 2148
	case SVM_EXIT_IOIO:
		vmexit = nested_svm_intercept_ioio(svm);
		break;
2149 2150 2151
	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
		u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
		if (svm->nested.intercept_cr & bit)
2152
			vmexit = NESTED_EXIT_DONE;
2153 2154
		break;
	}
2155 2156 2157
	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
		u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
		if (svm->nested.intercept_dr & bit)
2158
			vmexit = NESTED_EXIT_DONE;
2159 2160 2161 2162
		break;
	}
	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
2163
		if (svm->nested.intercept_exceptions & excp_bits)
2164
			vmexit = NESTED_EXIT_DONE;
2165 2166 2167 2168
		/* async page fault always cause vmexit */
		else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
			 svm->apf_reason != 0)
			vmexit = NESTED_EXIT_DONE;
2169 2170
		break;
	}
2171 2172 2173 2174
	case SVM_EXIT_ERR: {
		vmexit = NESTED_EXIT_DONE;
		break;
	}
2175 2176
	default: {
		u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
2177
		if (svm->nested.intercept & exit_bits)
2178
			vmexit = NESTED_EXIT_DONE;
2179 2180 2181
	}
	}

2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
	return vmexit;
}

static int nested_svm_exit_handled(struct vcpu_svm *svm)
{
	int vmexit;

	vmexit = nested_svm_intercept(svm);

	if (vmexit == NESTED_EXIT_DONE)
2192 2193 2194
		nested_svm_vmexit(svm);

	return vmexit;
2195 2196
}

2197 2198 2199 2200 2201
static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
{
	struct vmcb_control_area *dst  = &dst_vmcb->control;
	struct vmcb_control_area *from = &from_vmcb->control;

2202
	dst->intercept_cr         = from->intercept_cr;
2203
	dst->intercept_dr         = from->intercept_dr;
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226
	dst->intercept_exceptions = from->intercept_exceptions;
	dst->intercept            = from->intercept;
	dst->iopm_base_pa         = from->iopm_base_pa;
	dst->msrpm_base_pa        = from->msrpm_base_pa;
	dst->tsc_offset           = from->tsc_offset;
	dst->asid                 = from->asid;
	dst->tlb_ctl              = from->tlb_ctl;
	dst->int_ctl              = from->int_ctl;
	dst->int_vector           = from->int_vector;
	dst->int_state            = from->int_state;
	dst->exit_code            = from->exit_code;
	dst->exit_code_hi         = from->exit_code_hi;
	dst->exit_info_1          = from->exit_info_1;
	dst->exit_info_2          = from->exit_info_2;
	dst->exit_int_info        = from->exit_int_info;
	dst->exit_int_info_err    = from->exit_int_info_err;
	dst->nested_ctl           = from->nested_ctl;
	dst->event_inj            = from->event_inj;
	dst->event_inj_err        = from->event_inj_err;
	dst->nested_cr3           = from->nested_cr3;
	dst->lbr_ctl              = from->lbr_ctl;
}

2227
static int nested_svm_vmexit(struct vcpu_svm *svm)
2228
{
2229
	struct vmcb *nested_vmcb;
2230
	struct vmcb *hsave = svm->nested.hsave;
2231
	struct vmcb *vmcb = svm->vmcb;
2232
	struct page *page;
2233

2234 2235 2236 2237
	trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
				       vmcb->control.exit_info_1,
				       vmcb->control.exit_info_2,
				       vmcb->control.exit_int_info,
2238 2239
				       vmcb->control.exit_int_info_err,
				       KVM_ISA_SVM);
2240

2241
	nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
2242 2243 2244
	if (!nested_vmcb)
		return 1;

2245 2246
	/* Exit Guest-Mode */
	leave_guest_mode(&svm->vcpu);
2247 2248
	svm->nested.vmcb = 0;

2249
	/* Give the current vmcb to the guest */
2250 2251 2252 2253 2254 2255 2256 2257
	disable_gif(svm);

	nested_vmcb->save.es     = vmcb->save.es;
	nested_vmcb->save.cs     = vmcb->save.cs;
	nested_vmcb->save.ss     = vmcb->save.ss;
	nested_vmcb->save.ds     = vmcb->save.ds;
	nested_vmcb->save.gdtr   = vmcb->save.gdtr;
	nested_vmcb->save.idtr   = vmcb->save.idtr;
2258
	nested_vmcb->save.efer   = svm->vcpu.arch.efer;
2259
	nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
2260
	nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
2261
	nested_vmcb->save.cr2    = vmcb->save.cr2;
2262
	nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
2263
	nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
	nested_vmcb->save.rip    = vmcb->save.rip;
	nested_vmcb->save.rsp    = vmcb->save.rsp;
	nested_vmcb->save.rax    = vmcb->save.rax;
	nested_vmcb->save.dr7    = vmcb->save.dr7;
	nested_vmcb->save.dr6    = vmcb->save.dr6;
	nested_vmcb->save.cpl    = vmcb->save.cpl;

	nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
	nested_vmcb->control.int_vector        = vmcb->control.int_vector;
	nested_vmcb->control.int_state         = vmcb->control.int_state;
	nested_vmcb->control.exit_code         = vmcb->control.exit_code;
	nested_vmcb->control.exit_code_hi      = vmcb->control.exit_code_hi;
	nested_vmcb->control.exit_info_1       = vmcb->control.exit_info_1;
	nested_vmcb->control.exit_info_2       = vmcb->control.exit_info_2;
	nested_vmcb->control.exit_int_info     = vmcb->control.exit_int_info;
	nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
2280 2281 2282

	if (svm->nrips_enabled)
		nested_vmcb->control.next_rip  = vmcb->control.next_rip;
2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298

	/*
	 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
	 * to make sure that we do not lose injected events. So check event_inj
	 * here and copy it to exit_int_info if it is valid.
	 * Exit_int_info and event_inj can't be both valid because the case
	 * below only happens on a VMRUN instruction intercept which has
	 * no valid exit_int_info set.
	 */
	if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
		struct vmcb_control_area *nc = &nested_vmcb->control;

		nc->exit_int_info     = vmcb->control.event_inj;
		nc->exit_int_info_err = vmcb->control.event_inj_err;
	}

2299 2300 2301
	nested_vmcb->control.tlb_ctl           = 0;
	nested_vmcb->control.event_inj         = 0;
	nested_vmcb->control.event_inj_err     = 0;
2302 2303 2304 2305 2306 2307

	/* We always set V_INTR_MASKING and remember the old value in hflags */
	if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
		nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;

	/* Restore the original control entries */
2308
	copy_vmcb_control_area(vmcb, hsave);
2309

2310 2311
	kvm_clear_exception_queue(&svm->vcpu);
	kvm_clear_interrupt_queue(&svm->vcpu);
2312

2313 2314
	svm->nested.nested_cr3 = 0;

2315 2316 2317 2318 2319 2320 2321
	/* Restore selected save entries */
	svm->vmcb->save.es = hsave->save.es;
	svm->vmcb->save.cs = hsave->save.cs;
	svm->vmcb->save.ss = hsave->save.ss;
	svm->vmcb->save.ds = hsave->save.ds;
	svm->vmcb->save.gdtr = hsave->save.gdtr;
	svm->vmcb->save.idtr = hsave->save.idtr;
2322
	kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
2323 2324 2325 2326 2327 2328 2329
	svm_set_efer(&svm->vcpu, hsave->save.efer);
	svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
	svm_set_cr4(&svm->vcpu, hsave->save.cr4);
	if (npt_enabled) {
		svm->vmcb->save.cr3 = hsave->save.cr3;
		svm->vcpu.arch.cr3 = hsave->save.cr3;
	} else {
2330
		(void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
2331 2332 2333 2334 2335 2336 2337 2338
	}
	kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
	svm->vmcb->save.dr7 = 0;
	svm->vmcb->save.cpl = 0;
	svm->vmcb->control.exit_int_info = 0;

2339 2340
	mark_all_dirty(svm->vmcb);

2341
	nested_svm_unmap(page);
2342

2343
	nested_svm_uninit_mmu_context(&svm->vcpu);
2344 2345 2346 2347 2348
	kvm_mmu_reset_context(&svm->vcpu);
	kvm_mmu_load(&svm->vcpu);

	return 0;
}
Alexander Graf's avatar
Alexander Graf committed
2349

2350
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Alexander Graf's avatar
Alexander Graf committed
2351
{
2352 2353
	/*
	 * This function merges the msr permission bitmaps of kvm and the
Guo Chao's avatar
Guo Chao committed
2354
	 * nested vmcb. It is optimized in that it only merges the parts where
2355 2356
	 * the kvm msr permission bitmap may contain zero bits
	 */
Alexander Graf's avatar
Alexander Graf committed
2357
	int i;
2358

2359 2360
	if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
		return true;
2361

2362 2363 2364
	for (i = 0; i < MSRPM_OFFSETS; i++) {
		u32 value, p;
		u64 offset;
2365

2366 2367
		if (msrpm_offsets[i] == 0xffffffff)
			break;
Alexander Graf's avatar
Alexander Graf committed
2368

2369 2370
		p      = msrpm_offsets[i];
		offset = svm->nested.vmcb_msrpm + (p * 4);
2371

2372
		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
2373 2374 2375 2376
			return false;

		svm->nested.msrpm[p] = svm->msrpm[p] | value;
	}
Alexander Graf's avatar
Alexander Graf committed
2377

2378
	svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
2379 2380

	return true;
Alexander Graf's avatar
Alexander Graf committed
2381 2382
}

2383 2384 2385 2386 2387
static bool nested_vmcb_checks(struct vmcb *vmcb)
{
	if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
		return false;

2388 2389 2390
	if (vmcb->control.asid == 0)
		return false;

2391 2392 2393
	if (vmcb->control.nested_ctl && !npt_enabled)
		return false;

2394 2395 2396
	return true;
}

2397
static bool nested_svm_vmrun(struct vcpu_svm *svm)
Alexander Graf's avatar
Alexander Graf committed
2398
{
2399
	struct vmcb *nested_vmcb;
2400
	struct vmcb *hsave = svm->nested.hsave;
Joerg Roedel's avatar
Joerg Roedel committed
2401
	struct vmcb *vmcb = svm->vmcb;
2402
	struct page *page;
2403
	u64 vmcb_gpa;
Alexander Graf's avatar
Alexander Graf committed
2404

2405
	vmcb_gpa = svm->vmcb->save.rax;
Alexander Graf's avatar
Alexander Graf committed
2406

2407
	nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2408 2409 2410
	if (!nested_vmcb)
		return false;

2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421
	if (!nested_vmcb_checks(nested_vmcb)) {
		nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
		nested_vmcb->control.exit_code_hi = 0;
		nested_vmcb->control.exit_info_1  = 0;
		nested_vmcb->control.exit_info_2  = 0;

		nested_svm_unmap(page);

		return false;
	}

2422
	trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
2423 2424 2425 2426 2427
			       nested_vmcb->save.rip,
			       nested_vmcb->control.int_ctl,
			       nested_vmcb->control.event_inj,
			       nested_vmcb->control.nested_ctl);

2428 2429
	trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
				    nested_vmcb->control.intercept_cr >> 16,
2430 2431 2432
				    nested_vmcb->control.intercept_exceptions,
				    nested_vmcb->control.intercept);

Alexander Graf's avatar
Alexander Graf committed
2433
	/* Clear internal status */
2434 2435
	kvm_clear_exception_queue(&svm->vcpu);
	kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Graf's avatar
Alexander Graf committed
2436

Joerg Roedel's avatar
Joerg Roedel committed
2437 2438 2439 2440
	/*
	 * Save the old vmcb, so we don't need to pick what we save, but can
	 * restore everything when a VMEXIT occurs
	 */
Joerg Roedel's avatar
Joerg Roedel committed
2441 2442 2443 2444 2445 2446
	hsave->save.es     = vmcb->save.es;
	hsave->save.cs     = vmcb->save.cs;
	hsave->save.ss     = vmcb->save.ss;
	hsave->save.ds     = vmcb->save.ds;
	hsave->save.gdtr   = vmcb->save.gdtr;
	hsave->save.idtr   = vmcb->save.idtr;
2447
	hsave->save.efer   = svm->vcpu.arch.efer;
2448
	hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
Joerg Roedel's avatar
Joerg Roedel committed
2449
	hsave->save.cr4    = svm->vcpu.arch.cr4;
2450
	hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
2451
	hsave->save.rip    = kvm_rip_read(&svm->vcpu);
Joerg Roedel's avatar
Joerg Roedel committed
2452 2453 2454 2455 2456
	hsave->save.rsp    = vmcb->save.rsp;
	hsave->save.rax    = vmcb->save.rax;
	if (npt_enabled)
		hsave->save.cr3    = vmcb->save.cr3;
	else
2457
		hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
Joerg Roedel's avatar
Joerg Roedel committed
2458

2459
	copy_vmcb_control_area(hsave, vmcb);
Alexander Graf's avatar
Alexander Graf committed
2460

2461
	if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
Alexander Graf's avatar
Alexander Graf committed
2462 2463 2464 2465
		svm->vcpu.arch.hflags |= HF_HIF_MASK;
	else
		svm->vcpu.arch.hflags &= ~HF_HIF_MASK;

2466 2467 2468 2469 2470 2471
	if (nested_vmcb->control.nested_ctl) {
		kvm_mmu_unload(&svm->vcpu);
		svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
		nested_svm_init_mmu_context(&svm->vcpu);
	}

Alexander Graf's avatar
Alexander Graf committed
2472 2473 2474 2475 2476 2477 2478
	/* Load the nested guest state */
	svm->vmcb->save.es = nested_vmcb->save.es;
	svm->vmcb->save.cs = nested_vmcb->save.cs;
	svm->vmcb->save.ss = nested_vmcb->save.ss;
	svm->vmcb->save.ds = nested_vmcb->save.ds;
	svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
	svm->vmcb->save.idtr = nested_vmcb->save.idtr;
2479
	kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
Alexander Graf's avatar
Alexander Graf committed
2480 2481 2482 2483 2484 2485
	svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
	svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
	svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
	if (npt_enabled) {
		svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
		svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
2486
	} else
2487
		(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
2488 2489 2490 2491

	/* Guest paging mode is active - reset mmu */
	kvm_mmu_reset_context(&svm->vcpu);

Joerg Roedel's avatar
Joerg Roedel committed
2492
	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
Alexander Graf's avatar
Alexander Graf committed
2493 2494 2495
	kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
Joerg Roedel's avatar
Joerg Roedel committed
2496

Alexander Graf's avatar
Alexander Graf committed
2497 2498 2499 2500 2501 2502 2503 2504
	/* In case we don't even reach vcpu_run, the fields are not updated */
	svm->vmcb->save.rax = nested_vmcb->save.rax;
	svm->vmcb->save.rsp = nested_vmcb->save.rsp;
	svm->vmcb->save.rip = nested_vmcb->save.rip;
	svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
	svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
	svm->vmcb->save.cpl = nested_vmcb->save.cpl;

2505
	svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
2506
	svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
Alexander Graf's avatar
Alexander Graf committed
2507

2508
	/* cache intercepts */
2509
	svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
2510
	svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
2511 2512 2513
	svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
	svm->nested.intercept            = nested_vmcb->control.intercept;

2514
	svm_flush_tlb(&svm->vcpu);
Alexander Graf's avatar
Alexander Graf committed
2515 2516 2517 2518 2519 2520
	svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
	if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
		svm->vcpu.arch.hflags |= HF_VINTR_MASK;
	else
		svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;

2521 2522
	if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
		/* We only want the cr8 intercept bits of the guest */
2523 2524
		clr_cr_intercept(svm, INTERCEPT_CR8_READ);
		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
2525 2526
	}

2527
	/* We don't want to see VMMCALLs from a nested guest */
2528
	clr_intercept(svm, INTERCEPT_VMMCALL);
2529

2530
	svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
Alexander Graf's avatar
Alexander Graf committed
2531 2532 2533 2534 2535 2536
	svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
	svm->vmcb->control.int_state = nested_vmcb->control.int_state;
	svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
	svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
	svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;

2537
	nested_svm_unmap(page);
2538

2539 2540 2541
	/* Enter Guest-Mode */
	enter_guest_mode(&svm->vcpu);

2542 2543 2544 2545 2546 2547
	/*
	 * Merge guest and host intercepts - must be called  with vcpu in
	 * guest-mode to take affect here
	 */
	recalc_intercepts(svm);

2548
	svm->nested.vmcb = vmcb_gpa;
2549

2550
	enable_gif(svm);
Alexander Graf's avatar
Alexander Graf committed
2551

2552 2553
	mark_all_dirty(svm->vmcb);

2554
	return true;
Alexander Graf's avatar
Alexander Graf committed
2555 2556
}

2557
static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572
{
	to_vmcb->save.fs = from_vmcb->save.fs;
	to_vmcb->save.gs = from_vmcb->save.gs;
	to_vmcb->save.tr = from_vmcb->save.tr;
	to_vmcb->save.ldtr = from_vmcb->save.ldtr;
	to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
	to_vmcb->save.star = from_vmcb->save.star;
	to_vmcb->save.lstar = from_vmcb->save.lstar;
	to_vmcb->save.cstar = from_vmcb->save.cstar;
	to_vmcb->save.sfmask = from_vmcb->save.sfmask;
	to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
	to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
	to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
}

Avi Kivity's avatar
Avi Kivity committed
2573
static int vmload_interception(struct vcpu_svm *svm)
2574
{
2575
	struct vmcb *nested_vmcb;
2576
	struct page *page;
2577

2578 2579 2580
	if (nested_svm_check_permissions(svm))
		return 1;

2581
	nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2582 2583 2584
	if (!nested_vmcb)
		return 1;

2585 2586 2587
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
	skip_emulated_instruction(&svm->vcpu);

2588
	nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
2589
	nested_svm_unmap(page);
2590 2591 2592 2593

	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
2594
static int vmsave_interception(struct vcpu_svm *svm)
2595
{
2596
	struct vmcb *nested_vmcb;
2597
	struct page *page;
2598

2599 2600 2601
	if (nested_svm_check_permissions(svm))
		return 1;

2602
	nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2603 2604 2605
	if (!nested_vmcb)
		return 1;

2606 2607 2608
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
	skip_emulated_instruction(&svm->vcpu);

2609
	nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
2610
	nested_svm_unmap(page);
2611 2612 2613 2614

	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
2615
static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf's avatar
Alexander Graf committed
2616 2617 2618 2619
{
	if (nested_svm_check_permissions(svm))
		return 1;

2620 2621
	/* Save rip after vmrun instruction */
	kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
Alexander Graf's avatar
Alexander Graf committed
2622

2623
	if (!nested_svm_vmrun(svm))
Alexander Graf's avatar
Alexander Graf committed
2624 2625
		return 1;

2626
	if (!nested_svm_vmrun_msrpm(svm))
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638
		goto failed;

	return 1;

failed:

	svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
	svm->vmcb->control.exit_code_hi = 0;
	svm->vmcb->control.exit_info_1  = 0;
	svm->vmcb->control.exit_info_2  = 0;

	nested_svm_vmexit(svm);
Alexander Graf's avatar
Alexander Graf committed
2639 2640 2641 2642

	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
2643
static int stgi_interception(struct vcpu_svm *svm)
2644 2645 2646 2647 2648 2649
{
	if (nested_svm_check_permissions(svm))
		return 1;

	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
	skip_emulated_instruction(&svm->vcpu);
2650
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2651

2652
	enable_gif(svm);
2653 2654 2655 2656

	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
2657
static int clgi_interception(struct vcpu_svm *svm)
2658 2659 2660 2661 2662 2663 2664
{
	if (nested_svm_check_permissions(svm))
		return 1;

	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
	skip_emulated_instruction(&svm->vcpu);

2665
	disable_gif(svm);
2666 2667 2668 2669 2670

	/* After a CLGI no interrupts should come */
	svm_clear_vintr(svm);
	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;

2671 2672
	mark_dirty(svm->vmcb, VMCB_INTR);

2673 2674 2675
	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
2676
static int invlpga_interception(struct vcpu_svm *svm)
Alexander Graf's avatar
Alexander Graf committed
2677 2678 2679
{
	struct kvm_vcpu *vcpu = &svm->vcpu;

2680 2681
	trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
			  kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
2682

Alexander Graf's avatar
Alexander Graf committed
2683
	/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2684
	kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Alexander Graf's avatar
Alexander Graf committed
2685 2686 2687 2688 2689 2690

	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
	skip_emulated_instruction(&svm->vcpu);
	return 1;
}

2691 2692
static int skinit_interception(struct vcpu_svm *svm)
{
2693
	trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
2694 2695 2696 2697 2698

	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
	return 1;
}

David Kaplan's avatar
David Kaplan committed
2699 2700 2701 2702 2703 2704
static int wbinvd_interception(struct vcpu_svm *svm)
{
	kvm_emulate_wbinvd(&svm->vcpu);
	return 1;
}

Joerg Roedel's avatar
Joerg Roedel committed
2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717
static int xsetbv_interception(struct vcpu_svm *svm)
{
	u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
	u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);

	if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
		svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
		skip_emulated_instruction(&svm->vcpu);
	}

	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
2718
static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
2719
{
2720
	u16 tss_selector;
2721 2722 2723
	int reason;
	int int_type = svm->vmcb->control.exit_int_info &
		SVM_EXITINTINFO_TYPE_MASK;
2724
	int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2725 2726 2727 2728
	uint32_t type =
		svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
	uint32_t idt_v =
		svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2729 2730
	bool has_error_code = false;
	u32 error_code = 0;
2731 2732

	tss_selector = (u16)svm->vmcb->control.exit_info_1;
2733

2734 2735
	if (svm->vmcb->control.exit_info_2 &
	    (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2736 2737 2738 2739
		reason = TASK_SWITCH_IRET;
	else if (svm->vmcb->control.exit_info_2 &
		 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
		reason = TASK_SWITCH_JMP;
2740
	else if (idt_v)
2741 2742 2743 2744
		reason = TASK_SWITCH_GATE;
	else
		reason = TASK_SWITCH_CALL;

2745 2746 2747 2748 2749 2750
	if (reason == TASK_SWITCH_GATE) {
		switch (type) {
		case SVM_EXITINTINFO_TYPE_NMI:
			svm->vcpu.arch.nmi_injected = false;
			break;
		case SVM_EXITINTINFO_TYPE_EXEPT:
2751 2752 2753 2754 2755 2756
			if (svm->vmcb->control.exit_info_2 &
			    (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
				has_error_code = true;
				error_code =
					(u32)svm->vmcb->control.exit_info_2;
			}
2757 2758 2759 2760 2761 2762 2763 2764 2765
			kvm_clear_exception_queue(&svm->vcpu);
			break;
		case SVM_EXITINTINFO_TYPE_INTR:
			kvm_clear_interrupt_queue(&svm->vcpu);
			break;
		default:
			break;
		}
	}
2766

2767 2768 2769
	if (reason != TASK_SWITCH_GATE ||
	    int_type == SVM_EXITINTINFO_TYPE_SOFT ||
	    (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2770 2771
	     (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
		skip_emulated_instruction(&svm->vcpu);
2772

2773 2774 2775 2776
	if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
		int_vec = -1;

	if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
2777 2778 2779 2780 2781 2782 2783
				has_error_code, error_code) == EMULATE_FAIL) {
		svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
		svm->vcpu.run->internal.ndata = 0;
		return 0;
	}
	return 1;
Avi Kivity's avatar
Avi Kivity committed
2784 2785
}

Avi Kivity's avatar
Avi Kivity committed
2786
static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
2787
{
2788
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russell's avatar
Rusty Russell committed
2789
	kvm_emulate_cpuid(&svm->vcpu);
2790
	return 1;
Avi Kivity's avatar
Avi Kivity committed
2791 2792
}

Avi Kivity's avatar
Avi Kivity committed
2793
static int iret_interception(struct vcpu_svm *svm)
2794 2795
{
	++svm->vcpu.stat.nmi_window_exits;
2796
	clr_intercept(svm, INTERCEPT_IRET);
2797
	svm->vcpu.arch.hflags |= HF_IRET_MASK;
2798
	svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
2799
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2800 2801 2802
	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
2803
static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosatti's avatar
Marcelo Tosatti committed
2804
{
2805 2806 2807 2808 2809 2810
	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
		return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;

	kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
	skip_emulated_instruction(&svm->vcpu);
	return 1;
Marcelo Tosatti's avatar
Marcelo Tosatti committed
2811 2812
}

Avi Kivity's avatar
Avi Kivity committed
2813
static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
2814
{
2815
	return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
Avi Kivity's avatar
Avi Kivity committed
2816 2817
}

Avi Kivity's avatar
Avi Kivity committed
2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830
static int rdpmc_interception(struct vcpu_svm *svm)
{
	int err;

	if (!static_cpu_has(X86_FEATURE_NRIPS))
		return emulate_on_interception(svm);

	err = kvm_rdpmc(&svm->vcpu);
	kvm_complete_insn_gp(&svm->vcpu, err);

	return 1;
}

2831 2832
static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
					    unsigned long val)
2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854
{
	unsigned long cr0 = svm->vcpu.arch.cr0;
	bool ret = false;
	u64 intercept;

	intercept = svm->nested.intercept;

	if (!is_guest_mode(&svm->vcpu) ||
	    (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
		return false;

	cr0 &= ~SVM_CR0_SELECTIVE_MASK;
	val &= ~SVM_CR0_SELECTIVE_MASK;

	if (cr0 ^ val) {
		svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
		ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
	}

	return ret;
}

2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869
#define CR_VALID (1ULL << 63)

static int cr_interception(struct vcpu_svm *svm)
{
	int reg, cr;
	unsigned long val;
	int err;

	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
		return emulate_on_interception(svm);

	if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
		return emulate_on_interception(svm);

	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2870 2871 2872 2873
	if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
		cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
	else
		cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2874 2875 2876 2877 2878 2879 2880

	err = 0;
	if (cr >= 16) { /* mov to cr */
		cr -= 16;
		val = kvm_register_read(&svm->vcpu, reg);
		switch (cr) {
		case 0:
2881 2882
			if (!check_selective_cr0_intercepted(svm, val))
				err = kvm_set_cr0(&svm->vcpu, val);
2883 2884 2885
			else
				return 1;

2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909
			break;
		case 3:
			err = kvm_set_cr3(&svm->vcpu, val);
			break;
		case 4:
			err = kvm_set_cr4(&svm->vcpu, val);
			break;
		case 8:
			err = kvm_set_cr8(&svm->vcpu, val);
			break;
		default:
			WARN(1, "unhandled write to CR%d", cr);
			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
			return 1;
		}
	} else { /* mov from cr */
		switch (cr) {
		case 0:
			val = kvm_read_cr0(&svm->vcpu);
			break;
		case 2:
			val = svm->vcpu.arch.cr2;
			break;
		case 3:
2910
			val = kvm_read_cr3(&svm->vcpu);
2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929
			break;
		case 4:
			val = kvm_read_cr4(&svm->vcpu);
			break;
		case 8:
			val = kvm_get_cr8(&svm->vcpu);
			break;
		default:
			WARN(1, "unhandled read from CR%d", cr);
			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
			return 1;
		}
		kvm_register_write(&svm->vcpu, reg, val);
	}
	kvm_complete_insn_gp(&svm->vcpu, err);

	return 1;
}

2930 2931 2932 2933 2934
static int dr_interception(struct vcpu_svm *svm)
{
	int reg, dr;
	unsigned long val;

2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945
	if (svm->vcpu.guest_debug == 0) {
		/*
		 * No more DR vmexits; force a reload of the debug registers
		 * and reenter on this instruction.  The next vmexit will
		 * retrieve the full state of the debug registers.
		 */
		clr_dr_intercepts(svm);
		svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
		return 1;
	}

2946 2947 2948 2949 2950 2951 2952
	if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
		return emulate_on_interception(svm);

	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;

	if (dr >= 16) { /* mov to DRn */
2953 2954
		if (!kvm_require_dr(&svm->vcpu, dr - 16))
			return 1;
2955 2956 2957
		val = kvm_register_read(&svm->vcpu, reg);
		kvm_set_dr(&svm->vcpu, dr - 16, val);
	} else {
2958 2959 2960 2961
		if (!kvm_require_dr(&svm->vcpu, dr))
			return 1;
		kvm_get_dr(&svm->vcpu, dr, &val);
		kvm_register_write(&svm->vcpu, reg, val);
2962 2963
	}

2964 2965
	skip_emulated_instruction(&svm->vcpu);

2966 2967 2968
	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
2969
static int cr8_write_interception(struct vcpu_svm *svm)
2970
{
Avi Kivity's avatar
Avi Kivity committed
2971
	struct kvm_run *kvm_run = svm->vcpu.run;
Andre Przywara's avatar
Andre Przywara committed
2972
	int r;
Avi Kivity's avatar
Avi Kivity committed
2973

2974 2975
	u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
	/* instruction emulation calls kvm_set_cr8() */
2976
	r = cr_interception(svm);
2977
	if (lapic_in_kernel(&svm->vcpu))
2978
		return r;
2979
	if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
2980
		return r;
2981 2982 2983 2984
	kvm_run->exit_reason = KVM_EXIT_SET_TPR;
	return 0;
}

2985
static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
Nadav Har'El's avatar
Nadav Har'El committed
2986 2987
{
	struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
2988
	return vmcb->control.tsc_offset + host_tsc;
Nadav Har'El's avatar
Nadav Har'El committed
2989 2990
}

2991
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Avi Kivity's avatar
Avi Kivity committed
2992
{
2993 2994
	struct vcpu_svm *svm = to_svm(vcpu);

2995
	switch (msr_info->index) {
2996
	case MSR_IA32_TSC: {
2997
		msr_info->data = svm->vmcb->control.tsc_offset +
2998
			kvm_scale_tsc(vcpu, rdtsc());
2999

Avi Kivity's avatar
Avi Kivity committed
3000 3001
		break;
	}
Brian Gerst's avatar
Brian Gerst committed
3002
	case MSR_STAR:
3003
		msr_info->data = svm->vmcb->save.star;
Avi Kivity's avatar
Avi Kivity committed
3004
		break;
3005
#ifdef CONFIG_X86_64
Avi Kivity's avatar
Avi Kivity committed
3006
	case MSR_LSTAR:
3007
		msr_info->data = svm->vmcb->save.lstar;
Avi Kivity's avatar
Avi Kivity committed
3008 3009
		break;
	case MSR_CSTAR:
3010
		msr_info->data = svm->vmcb->save.cstar;
Avi Kivity's avatar
Avi Kivity committed
3011 3012
		break;
	case MSR_KERNEL_GS_BASE:
3013
		msr_info->data = svm->vmcb->save.kernel_gs_base;
Avi Kivity's avatar
Avi Kivity committed
3014 3015
		break;
	case MSR_SYSCALL_MASK:
3016
		msr_info->data = svm->vmcb->save.sfmask;
Avi Kivity's avatar
Avi Kivity committed
3017 3018 3019
		break;
#endif
	case MSR_IA32_SYSENTER_CS:
3020
		msr_info->data = svm->vmcb->save.sysenter_cs;
Avi Kivity's avatar
Avi Kivity committed
3021 3022
		break;
	case MSR_IA32_SYSENTER_EIP:
3023
		msr_info->data = svm->sysenter_eip;
Avi Kivity's avatar
Avi Kivity committed
3024 3025
		break;
	case MSR_IA32_SYSENTER_ESP:
3026
		msr_info->data = svm->sysenter_esp;
Avi Kivity's avatar
Avi Kivity committed
3027
		break;
Joerg Roedel's avatar
Joerg Roedel committed
3028 3029 3030 3031 3032
	/*
	 * Nobody will change the following 5 values in the VMCB so we can
	 * safely return them on rdmsr. They will always be 0 until LBRV is
	 * implemented.
	 */
3033
	case MSR_IA32_DEBUGCTLMSR:
3034
		msr_info->data = svm->vmcb->save.dbgctl;
3035 3036
		break;
	case MSR_IA32_LASTBRANCHFROMIP:
3037
		msr_info->data = svm->vmcb->save.br_from;
3038 3039
		break;
	case MSR_IA32_LASTBRANCHTOIP:
3040
		msr_info->data = svm->vmcb->save.br_to;
3041 3042
		break;
	case MSR_IA32_LASTINTFROMIP:
3043
		msr_info->data = svm->vmcb->save.last_excp_from;
3044 3045
		break;
	case MSR_IA32_LASTINTTOIP:
3046
		msr_info->data = svm->vmcb->save.last_excp_to;
3047
		break;
Alexander Graf's avatar
Alexander Graf committed
3048
	case MSR_VM_HSAVE_PA:
3049
		msr_info->data = svm->nested.hsave_msr;
Alexander Graf's avatar
Alexander Graf committed
3050
		break;
3051
	case MSR_VM_CR:
3052
		msr_info->data = svm->nested.vm_cr_msr;
3053
		break;
3054
	case MSR_IA32_UCODE_REV:
3055
		msr_info->data = 0x01000065;
3056
		break;
Avi Kivity's avatar
Avi Kivity committed
3057
	default:
3058
		return kvm_get_msr_common(vcpu, msr_info);
Avi Kivity's avatar
Avi Kivity committed
3059 3060 3061 3062
	}
	return 0;
}

Avi Kivity's avatar
Avi Kivity committed
3063
static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
3064
{
3065
	u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3066
	struct msr_data msr_info;
Avi Kivity's avatar
Avi Kivity committed
3067

3068 3069 3070
	msr_info.index = ecx;
	msr_info.host_initiated = false;
	if (svm_get_msr(&svm->vcpu, &msr_info)) {
3071
		trace_kvm_msr_read_ex(ecx);
3072
		kvm_inject_gp(&svm->vcpu, 0);
3073
	} else {
3074
		trace_kvm_msr_read(ecx, msr_info.data);
3075

3076 3077 3078 3079
		kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
				   msr_info.data & 0xffffffff);
		kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
				   msr_info.data >> 32);
3080
		svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russell's avatar
Rusty Russell committed
3081
		skip_emulated_instruction(&svm->vcpu);
Avi Kivity's avatar
Avi Kivity committed
3082 3083 3084 3085
	}
	return 1;
}

3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	int svm_dis, chg_mask;

	if (data & ~SVM_VM_CR_VALID_MASK)
		return 1;

	chg_mask = SVM_VM_CR_VALID_MASK;

	if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
		chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);

	svm->nested.vm_cr_msr &= ~chg_mask;
	svm->nested.vm_cr_msr |= (data & chg_mask);

	svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;

	/* check for svm_disable while efer.svme is set */
	if (svm_dis && (vcpu->arch.efer & EFER_SVME))
		return 1;

	return 0;
}

3111
static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
Avi Kivity's avatar
Avi Kivity committed
3112
{
3113 3114
	struct vcpu_svm *svm = to_svm(vcpu);

3115 3116
	u32 ecx = msr->index;
	u64 data = msr->data;
Avi Kivity's avatar
Avi Kivity committed
3117
	switch (ecx) {
Paolo Bonzini's avatar
Paolo Bonzini committed
3118 3119 3120 3121 3122 3123 3124
	case MSR_IA32_CR_PAT:
		if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
			return 1;
		vcpu->arch.pat = data;
		svm->vmcb->save.g_pat = data;
		mark_dirty(svm->vmcb, VMCB_NPT);
		break;
3125
	case MSR_IA32_TSC:
3126
		kvm_write_tsc(vcpu, msr);
Avi Kivity's avatar
Avi Kivity committed
3127
		break;
Brian Gerst's avatar
Brian Gerst committed
3128
	case MSR_STAR:
3129
		svm->vmcb->save.star = data;
Avi Kivity's avatar
Avi Kivity committed
3130
		break;
3131
#ifdef CONFIG_X86_64
Avi Kivity's avatar
Avi Kivity committed
3132
	case MSR_LSTAR:
3133
		svm->vmcb->save.lstar = data;
Avi Kivity's avatar
Avi Kivity committed
3134 3135
		break;
	case MSR_CSTAR:
3136
		svm->vmcb->save.cstar = data;
Avi Kivity's avatar
Avi Kivity committed
3137 3138
		break;
	case MSR_KERNEL_GS_BASE:
3139
		svm->vmcb->save.kernel_gs_base = data;
Avi Kivity's avatar
Avi Kivity committed
3140 3141
		break;
	case MSR_SYSCALL_MASK:
3142
		svm->vmcb->save.sfmask = data;
Avi Kivity's avatar
Avi Kivity committed
3143 3144 3145
		break;
#endif
	case MSR_IA32_SYSENTER_CS:
3146
		svm->vmcb->save.sysenter_cs = data;
Avi Kivity's avatar
Avi Kivity committed
3147 3148
		break;
	case MSR_IA32_SYSENTER_EIP:
3149
		svm->sysenter_eip = data;
3150
		svm->vmcb->save.sysenter_eip = data;
Avi Kivity's avatar
Avi Kivity committed
3151 3152
		break;
	case MSR_IA32_SYSENTER_ESP:
3153
		svm->sysenter_esp = data;
3154
		svm->vmcb->save.sysenter_esp = data;
Avi Kivity's avatar
Avi Kivity committed
3155
		break;
3156
	case MSR_IA32_DEBUGCTLMSR:
3157
		if (!boot_cpu_has(X86_FEATURE_LBRV)) {
3158 3159
			vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
				    __func__, data);
3160 3161 3162 3163 3164 3165
			break;
		}
		if (data & DEBUGCTL_RESERVED_BITS)
			return 1;

		svm->vmcb->save.dbgctl = data;
3166
		mark_dirty(svm->vmcb, VMCB_LBR);
3167 3168 3169 3170
		if (data & (1ULL<<0))
			svm_enable_lbrv(svm);
		else
			svm_disable_lbrv(svm);
3171
		break;
Alexander Graf's avatar
Alexander Graf committed
3172
	case MSR_VM_HSAVE_PA:
3173
		svm->nested.hsave_msr = data;
3174
		break;
3175
	case MSR_VM_CR:
3176
		return svm_set_vm_cr(vcpu, data);
3177
	case MSR_VM_IGNNE:
3178
		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3179
		break;
Avi Kivity's avatar
Avi Kivity committed
3180
	default:
3181
		return kvm_set_msr_common(vcpu, msr);
Avi Kivity's avatar
Avi Kivity committed
3182 3183 3184 3185
	}
	return 0;
}

Avi Kivity's avatar
Avi Kivity committed
3186
static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
3187
{
3188
	struct msr_data msr;
3189 3190
	u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
	u64 data = kvm_read_edx_eax(&svm->vcpu);
3191

3192 3193 3194
	msr.data = data;
	msr.index = ecx;
	msr.host_initiated = false;
3195

3196
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3197
	if (kvm_set_msr(&svm->vcpu, &msr)) {
3198
		trace_kvm_msr_write_ex(ecx, data);
3199
		kvm_inject_gp(&svm->vcpu, 0);
3200 3201
	} else {
		trace_kvm_msr_write(ecx, data);
Rusty Russell's avatar
Rusty Russell committed
3202
		skip_emulated_instruction(&svm->vcpu);
3203
	}
Avi Kivity's avatar
Avi Kivity committed
3204 3205 3206
	return 1;
}

Avi Kivity's avatar
Avi Kivity committed
3207
static int msr_interception(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
3208
{
Rusty Russell's avatar
Rusty Russell committed
3209
	if (svm->vmcb->control.exit_info_1)
Avi Kivity's avatar
Avi Kivity committed
3210
		return wrmsr_interception(svm);
Avi Kivity's avatar
Avi Kivity committed
3211
	else
Avi Kivity's avatar
Avi Kivity committed
3212
		return rdmsr_interception(svm);
Avi Kivity's avatar
Avi Kivity committed
3213 3214
}

Avi Kivity's avatar
Avi Kivity committed
3215
static int interrupt_window_interception(struct vcpu_svm *svm)
3216
{
3217
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3218
	svm_clear_vintr(svm);
3219
	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3220
	mark_dirty(svm->vmcb, VMCB_INTR);
3221
	++svm->vcpu.stat.irq_window_exits;
3222 3223 3224
	return 1;
}

3225 3226 3227 3228 3229 3230
static int pause_interception(struct vcpu_svm *svm)
{
	kvm_vcpu_on_spin(&(svm->vcpu));
	return 1;
}

3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248
static int nop_interception(struct vcpu_svm *svm)
{
	skip_emulated_instruction(&(svm->vcpu));
	return 1;
}

static int monitor_interception(struct vcpu_svm *svm)
{
	printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
	return nop_interception(svm);
}

static int mwait_interception(struct vcpu_svm *svm)
{
	printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
	return nop_interception(svm);
}

3249
static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
3250 3251 3252 3253
	[SVM_EXIT_READ_CR0]			= cr_interception,
	[SVM_EXIT_READ_CR3]			= cr_interception,
	[SVM_EXIT_READ_CR4]			= cr_interception,
	[SVM_EXIT_READ_CR8]			= cr_interception,
3254
	[SVM_EXIT_CR0_SEL_WRITE]		= cr_interception,
3255
	[SVM_EXIT_WRITE_CR0]			= cr_interception,
3256 3257
	[SVM_EXIT_WRITE_CR3]			= cr_interception,
	[SVM_EXIT_WRITE_CR4]			= cr_interception,
Joerg Roedel's avatar
Joerg Roedel committed
3258
	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274
	[SVM_EXIT_READ_DR0]			= dr_interception,
	[SVM_EXIT_READ_DR1]			= dr_interception,
	[SVM_EXIT_READ_DR2]			= dr_interception,
	[SVM_EXIT_READ_DR3]			= dr_interception,
	[SVM_EXIT_READ_DR4]			= dr_interception,
	[SVM_EXIT_READ_DR5]			= dr_interception,
	[SVM_EXIT_READ_DR6]			= dr_interception,
	[SVM_EXIT_READ_DR7]			= dr_interception,
	[SVM_EXIT_WRITE_DR0]			= dr_interception,
	[SVM_EXIT_WRITE_DR1]			= dr_interception,
	[SVM_EXIT_WRITE_DR2]			= dr_interception,
	[SVM_EXIT_WRITE_DR3]			= dr_interception,
	[SVM_EXIT_WRITE_DR4]			= dr_interception,
	[SVM_EXIT_WRITE_DR5]			= dr_interception,
	[SVM_EXIT_WRITE_DR6]			= dr_interception,
	[SVM_EXIT_WRITE_DR7]			= dr_interception,
Jan Kiszka's avatar
Jan Kiszka committed
3275 3276
	[SVM_EXIT_EXCP_BASE + DB_VECTOR]	= db_interception,
	[SVM_EXIT_EXCP_BASE + BP_VECTOR]	= bp_interception,
3277
	[SVM_EXIT_EXCP_BASE + UD_VECTOR]	= ud_interception,
Joerg Roedel's avatar
Joerg Roedel committed
3278 3279 3280
	[SVM_EXIT_EXCP_BASE + PF_VECTOR]	= pf_interception,
	[SVM_EXIT_EXCP_BASE + NM_VECTOR]	= nm_interception,
	[SVM_EXIT_EXCP_BASE + MC_VECTOR]	= mc_interception,
3281
	[SVM_EXIT_EXCP_BASE + AC_VECTOR]	= ac_interception,
Joerg Roedel's avatar
Joerg Roedel committed
3282
	[SVM_EXIT_INTR]				= intr_interception,
3283
	[SVM_EXIT_NMI]				= nmi_interception,
Avi Kivity's avatar
Avi Kivity committed
3284 3285
	[SVM_EXIT_SMI]				= nop_on_interception,
	[SVM_EXIT_INIT]				= nop_on_interception,
3286
	[SVM_EXIT_VINTR]			= interrupt_window_interception,
Avi Kivity's avatar
Avi Kivity committed
3287
	[SVM_EXIT_RDPMC]			= rdpmc_interception,
Avi Kivity's avatar
Avi Kivity committed
3288
	[SVM_EXIT_CPUID]			= cpuid_interception,
3289
	[SVM_EXIT_IRET]                         = iret_interception,
3290
	[SVM_EXIT_INVD]                         = emulate_on_interception,
3291
	[SVM_EXIT_PAUSE]			= pause_interception,
Avi Kivity's avatar
Avi Kivity committed
3292
	[SVM_EXIT_HLT]				= halt_interception,
Marcelo Tosatti's avatar
Marcelo Tosatti committed
3293
	[SVM_EXIT_INVLPG]			= invlpg_interception,
Alexander Graf's avatar
Alexander Graf committed
3294
	[SVM_EXIT_INVLPGA]			= invlpga_interception,
Joerg Roedel's avatar
Joerg Roedel committed
3295
	[SVM_EXIT_IOIO]				= io_interception,
Avi Kivity's avatar
Avi Kivity committed
3296 3297
	[SVM_EXIT_MSR]				= msr_interception,
	[SVM_EXIT_TASK_SWITCH]			= task_switch_interception,
3298
	[SVM_EXIT_SHUTDOWN]			= shutdown_interception,
Alexander Graf's avatar
Alexander Graf committed
3299
	[SVM_EXIT_VMRUN]			= vmrun_interception,
3300
	[SVM_EXIT_VMMCALL]			= vmmcall_interception,
3301 3302
	[SVM_EXIT_VMLOAD]			= vmload_interception,
	[SVM_EXIT_VMSAVE]			= vmsave_interception,
3303 3304
	[SVM_EXIT_STGI]				= stgi_interception,
	[SVM_EXIT_CLGI]				= clgi_interception,
3305
	[SVM_EXIT_SKINIT]			= skinit_interception,
David Kaplan's avatar
David Kaplan committed
3306
	[SVM_EXIT_WBINVD]                       = wbinvd_interception,
3307 3308
	[SVM_EXIT_MONITOR]			= monitor_interception,
	[SVM_EXIT_MWAIT]			= mwait_interception,
Joerg Roedel's avatar
Joerg Roedel committed
3309
	[SVM_EXIT_XSETBV]			= xsetbv_interception,
3310
	[SVM_EXIT_NPF]				= pf_interception,
3311
	[SVM_EXIT_RSM]                          = emulate_on_interception,
Avi Kivity's avatar
Avi Kivity committed
3312 3313
};

3314
static void dump_vmcb(struct kvm_vcpu *vcpu)
3315 3316 3317 3318 3319 3320
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;
	struct vmcb_save_area *save = &svm->vmcb->save;

	pr_err("VMCB Control Area:\n");
3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346
	pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
	pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
	pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
	pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
	pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
	pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
	pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
	pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
	pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
	pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
	pr_err("%-20s%d\n", "asid:", control->asid);
	pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
	pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
	pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
	pr_err("%-20s%08x\n", "int_state:", control->int_state);
	pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
	pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
	pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
	pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
	pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
	pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
	pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
	pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
	pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
	pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
	pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3347
	pr_err("VMCB State Save Area:\n");
3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "es:",
	       save->es.selector, save->es.attrib,
	       save->es.limit, save->es.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "cs:",
	       save->cs.selector, save->cs.attrib,
	       save->cs.limit, save->cs.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "ss:",
	       save->ss.selector, save->ss.attrib,
	       save->ss.limit, save->ss.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "ds:",
	       save->ds.selector, save->ds.attrib,
	       save->ds.limit, save->ds.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "fs:",
	       save->fs.selector, save->fs.attrib,
	       save->fs.limit, save->fs.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "gs:",
	       save->gs.selector, save->gs.attrib,
	       save->gs.limit, save->gs.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "gdtr:",
	       save->gdtr.selector, save->gdtr.attrib,
	       save->gdtr.limit, save->gdtr.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "ldtr:",
	       save->ldtr.selector, save->ldtr.attrib,
	       save->ldtr.limit, save->ldtr.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "idtr:",
	       save->idtr.selector, save->idtr.attrib,
	       save->idtr.limit, save->idtr.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "tr:",
	       save->tr.selector, save->tr.attrib,
	       save->tr.limit, save->tr.base);
3388 3389
	pr_err("cpl:            %d                efer:         %016llx\n",
		save->cpl, save->efer);
3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "cr0:", save->cr0, "cr2:", save->cr2);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "cr3:", save->cr3, "cr4:", save->cr4);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "dr6:", save->dr6, "dr7:", save->dr7);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "rip:", save->rip, "rflags:", save->rflags);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "rsp:", save->rsp, "rax:", save->rax);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "star:", save->star, "lstar:", save->lstar);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "cstar:", save->cstar, "sfmask:", save->sfmask);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "kernel_gs_base:", save->kernel_gs_base,
	       "sysenter_cs:", save->sysenter_cs);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "sysenter_esp:", save->sysenter_esp,
	       "sysenter_eip:", save->sysenter_eip);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "br_from:", save->br_from, "br_to:", save->br_to);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "excp_from:", save->last_excp_from,
	       "excp_to:", save->last_excp_to);
3417 3418
}

3419 3420 3421 3422 3423 3424 3425 3426
static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
	struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;

	*info1 = control->exit_info_1;
	*info2 = control->exit_info_2;
}

Avi Kivity's avatar
Avi Kivity committed
3427
static int handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity's avatar
Avi Kivity committed
3428
{
3429
	struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity's avatar
Avi Kivity committed
3430
	struct kvm_run *kvm_run = vcpu->run;
3431
	u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity's avatar
Avi Kivity committed
3432

3433 3434
	trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);

3435
	if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3436 3437 3438
		vcpu->arch.cr0 = svm->vmcb->save.cr0;
	if (npt_enabled)
		vcpu->arch.cr3 = svm->vmcb->save.cr3;
3439

3440 3441 3442 3443 3444 3445 3446
	if (unlikely(svm->nested.exit_required)) {
		nested_svm_vmexit(svm);
		svm->nested.exit_required = false;

		return 1;
	}

3447
	if (is_guest_mode(vcpu)) {
3448 3449
		int vmexit;

3450 3451 3452 3453
		trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
					svm->vmcb->control.exit_info_1,
					svm->vmcb->control.exit_info_2,
					svm->vmcb->control.exit_int_info,
3454 3455
					svm->vmcb->control.exit_int_info_err,
					KVM_ISA_SVM);
3456

3457 3458 3459 3460 3461 3462
		vmexit = nested_svm_exit_special(svm);

		if (vmexit == NESTED_EXIT_CONTINUE)
			vmexit = nested_svm_exit_handled(svm);

		if (vmexit == NESTED_EXIT_DONE)
3463 3464 3465
			return 1;
	}

3466 3467
	svm_complete_interrupts(svm);

3468 3469 3470 3471
	if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
		kvm_run->fail_entry.hardware_entry_failure_reason
			= svm->vmcb->control.exit_code;
3472 3473
		pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
		dump_vmcb(vcpu);
3474 3475 3476
		return 0;
	}

3477
	if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
3478
	    exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
3479 3480
	    exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
	    exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
3481
		printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
Avi Kivity's avatar
Avi Kivity committed
3482
		       "exit_code 0x%x\n",
3483
		       __func__, svm->vmcb->control.exit_int_info,
Avi Kivity's avatar
Avi Kivity committed
3484 3485
		       exit_code);

3486
	if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches's avatar
Joe Perches committed
3487
	    || !svm_exit_handlers[exit_code]) {
3488
		WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
3489 3490
		kvm_queue_exception(vcpu, UD_VECTOR);
		return 1;
Avi Kivity's avatar
Avi Kivity committed
3491 3492
	}

Avi Kivity's avatar
Avi Kivity committed
3493
	return svm_exit_handlers[exit_code](svm);
Avi Kivity's avatar
Avi Kivity committed
3494 3495 3496 3497 3498 3499
}

static void reload_tss(struct kvm_vcpu *vcpu)
{
	int cpu = raw_smp_processor_id();

3500 3501
	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
	sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity's avatar
Avi Kivity committed
3502 3503 3504
	load_TR_desc();
}

Rusty Russell's avatar
Rusty Russell committed
3505
static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
3506 3507 3508
{
	int cpu = raw_smp_processor_id();

3509
	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity's avatar
Avi Kivity committed
3510

3511
	/* FIXME: handle wraparound of asid_generation */
3512 3513
	if (svm->asid_generation != sd->asid_generation)
		new_asid(svm, sd);
Avi Kivity's avatar
Avi Kivity committed
3514 3515
}

3516 3517 3518 3519 3520 3521
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
	vcpu->arch.hflags |= HF_NMI_MASK;
3522
	set_intercept(svm, INTERCEPT_IRET);
3523 3524
	++vcpu->stat.nmi_injections;
}
Avi Kivity's avatar
Avi Kivity committed
3525

3526
static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity's avatar
Avi Kivity committed
3527 3528 3529
{
	struct vmcb_control_area *control;

Rusty Russell's avatar
Rusty Russell committed
3530
	control = &svm->vmcb->control;
3531
	control->int_vector = irq;
Avi Kivity's avatar
Avi Kivity committed
3532 3533 3534
	control->int_ctl &= ~V_INTR_PRIO_MASK;
	control->int_ctl |= V_IRQ_MASK |
		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
3535
	mark_dirty(svm->vmcb, VMCB_INTR);
Avi Kivity's avatar
Avi Kivity committed
3536 3537
}

3538
static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong's avatar
Eddie Dong committed
3539 3540 3541
{
	struct vcpu_svm *svm = to_svm(vcpu);

3542
	BUG_ON(!(gif_set(svm)));
3543

3544 3545 3546
	trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
	++vcpu->stat.irq_injections;

3547 3548
	svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
		SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong's avatar
Eddie Dong committed
3549 3550
}

3551
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3552 3553 3554
{
	struct vcpu_svm *svm = to_svm(vcpu);

3555
	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3556 3557
		return;

3558 3559
	clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);

3560
	if (irr == -1)
3561 3562
		return;

3563
	if (tpr >= irr)
3564
		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3565
}
3566

3567 3568 3569 3570 3571
static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
{
	return;
}

3572
static int svm_cpu_uses_apicv(struct kvm_vcpu *vcpu)
3573 3574 3575 3576
{
	return 0;
}

3577
static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu)
3578 3579 3580 3581
{
	return;
}

3582 3583 3584 3585 3586
static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
	return;
}

3587 3588 3589 3590
static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb *vmcb = svm->vmcb;
3591 3592 3593 3594 3595 3596
	int ret;
	ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
	      !(svm->vcpu.arch.hflags & HF_NMI_MASK);
	ret = ret && gif_set(svm) && nested_svm_nmi(svm);

	return ret;
3597 3598
}

3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
}

static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (masked) {
		svm->vcpu.arch.hflags |= HF_NMI_MASK;
3612
		set_intercept(svm, INTERCEPT_IRET);
3613 3614
	} else {
		svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
3615
		clr_intercept(svm, INTERCEPT_IRET);
3616 3617 3618
	}
}

3619 3620 3621 3622
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb *vmcb = svm->vmcb;
3623 3624 3625 3626 3627 3628
	int ret;

	if (!gif_set(svm) ||
	     (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
		return 0;

3629
	ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
3630

3631
	if (is_guest_mode(vcpu))
3632 3633 3634
		return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);

	return ret;
3635 3636
}

3637
static void enable_irq_window(struct kvm_vcpu *vcpu)
Avi Kivity's avatar
Avi Kivity committed
3638
{
3639 3640
	struct vcpu_svm *svm = to_svm(vcpu);

Joerg Roedel's avatar
Joerg Roedel committed
3641 3642 3643 3644 3645 3646
	/*
	 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
	 * 1, because that's a separate STGI/VMRUN intercept.  The next time we
	 * get that intercept, this function will be called again though and
	 * we'll get the vintr intercept.
	 */
3647
	if (gif_set(svm) && nested_svm_intr(svm)) {
3648 3649 3650
		svm_set_vintr(svm);
		svm_inject_irq(svm, 0x0);
	}
3651 3652
}

3653
static void enable_nmi_window(struct kvm_vcpu *vcpu)
3654
{
3655
	struct vcpu_svm *svm = to_svm(vcpu);
3656

3657 3658
	if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
	    == HF_NMI_MASK)
3659
		return; /* IRET will cause a vm exit */
3660

Joerg Roedel's avatar
Joerg Roedel committed
3661 3662 3663 3664
	/*
	 * Something prevents NMI from been injected. Single step over possible
	 * problem (IRET or exception injection or interrupt shadow)
	 */
Jan Kiszka's avatar
Jan Kiszka committed
3665
	svm->nmi_singlestep = true;
3666
	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3667 3668
}

3669 3670 3671 3672 3673
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
	return 0;
}

3674 3675
static void svm_flush_tlb(struct kvm_vcpu *vcpu)
{
3676 3677 3678 3679 3680 3681
	struct vcpu_svm *svm = to_svm(vcpu);

	if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
	else
		svm->asid_generation--;
3682 3683
}

3684 3685 3686 3687
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}

3688 3689 3690 3691
static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

3692
	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3693 3694
		return;

3695
	if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
3696
		int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3697
		kvm_set_cr8(vcpu, cr8);
3698 3699 3700
	}
}

3701 3702 3703 3704 3705
static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 cr8;

3706
	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3707 3708
		return;

3709 3710 3711 3712 3713
	cr8 = kvm_get_cr8(vcpu);
	svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
	svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
}

3714 3715 3716 3717 3718
static void svm_complete_interrupts(struct vcpu_svm *svm)
{
	u8 vector;
	int type;
	u32 exitintinfo = svm->vmcb->control.exit_int_info;
3719 3720 3721
	unsigned int3_injected = svm->int3_injected;

	svm->int3_injected = 0;
3722

3723 3724 3725 3726 3727 3728
	/*
	 * If we've made progress since setting HF_IRET_MASK, we've
	 * executed an IRET and can allow NMI injection.
	 */
	if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
	    && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
3729
		svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3730 3731
		kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
	}
3732

3733 3734 3735 3736 3737 3738 3739
	svm->vcpu.arch.nmi_injected = false;
	kvm_clear_exception_queue(&svm->vcpu);
	kvm_clear_interrupt_queue(&svm->vcpu);

	if (!(exitintinfo & SVM_EXITINTINFO_VALID))
		return;

3740 3741
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);

3742 3743 3744 3745 3746 3747 3748 3749
	vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
	type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;

	switch (type) {
	case SVM_EXITINTINFO_TYPE_NMI:
		svm->vcpu.arch.nmi_injected = true;
		break;
	case SVM_EXITINTINFO_TYPE_EXEPT:
3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760
		/*
		 * In case of software exceptions, do not reinject the vector,
		 * but re-execute the instruction instead. Rewind RIP first
		 * if we emulated INT3 before.
		 */
		if (kvm_exception_is_soft(vector)) {
			if (vector == BP_VECTOR && int3_injected &&
			    kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
				kvm_rip_write(&svm->vcpu,
					      kvm_rip_read(&svm->vcpu) -
					      int3_injected);
3761
			break;
3762
		}
3763 3764
		if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
			u32 err = svm->vmcb->control.exit_int_info_err;
3765
			kvm_requeue_exception_e(&svm->vcpu, vector, err);
3766 3767

		} else
3768
			kvm_requeue_exception(&svm->vcpu, vector);
3769 3770
		break;
	case SVM_EXITINTINFO_TYPE_INTR:
3771
		kvm_queue_interrupt(&svm->vcpu, vector, false);
3772 3773 3774 3775 3776 3777
		break;
	default:
		break;
	}
}

3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788
static void svm_cancel_injection(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;

	control->exit_int_info = control->event_inj;
	control->exit_int_info_err = control->event_inj_err;
	control->event_inj = 0;
	svm_complete_interrupts(svm);
}

Avi Kivity's avatar
Avi Kivity committed
3789
static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity's avatar
Avi Kivity committed
3790
{
3791
	struct vcpu_svm *svm = to_svm(vcpu);
3792

3793 3794 3795 3796
	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
	svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];

3797 3798 3799 3800 3801 3802 3803
	/*
	 * A vmexit emulation is required before the vcpu can be executed
	 * again.
	 */
	if (unlikely(svm->nested.exit_required))
		return;

Rusty Russell's avatar
Rusty Russell committed
3804
	pre_svm_run(svm);
Avi Kivity's avatar
Avi Kivity committed
3805

3806 3807
	sync_lapic_to_cr8(vcpu);

3808
	svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity's avatar
Avi Kivity committed
3809

3810 3811 3812
	clgi();

	local_irq_enable();
3813

Avi Kivity's avatar
Avi Kivity committed
3814
	asm volatile (
Avi Kivity's avatar
Avi Kivity committed
3815 3816 3817 3818 3819 3820 3821
		"push %%" _ASM_BP "; \n\t"
		"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
		"mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
		"mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
		"mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
		"mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
		"mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
3822
#ifdef CONFIG_X86_64
3823 3824 3825 3826 3827 3828 3829 3830
		"mov %c[r8](%[svm]),  %%r8  \n\t"
		"mov %c[r9](%[svm]),  %%r9  \n\t"
		"mov %c[r10](%[svm]), %%r10 \n\t"
		"mov %c[r11](%[svm]), %%r11 \n\t"
		"mov %c[r12](%[svm]), %%r12 \n\t"
		"mov %c[r13](%[svm]), %%r13 \n\t"
		"mov %c[r14](%[svm]), %%r14 \n\t"
		"mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity's avatar
Avi Kivity committed
3831 3832 3833
#endif

		/* Enter guest mode */
Avi Kivity's avatar
Avi Kivity committed
3834 3835
		"push %%" _ASM_AX " \n\t"
		"mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
3836 3837 3838
		__ex(SVM_VMLOAD) "\n\t"
		__ex(SVM_VMRUN) "\n\t"
		__ex(SVM_VMSAVE) "\n\t"
Avi Kivity's avatar
Avi Kivity committed
3839
		"pop %%" _ASM_AX " \n\t"
Avi Kivity's avatar
Avi Kivity committed
3840 3841

		/* Save guest registers, load host registers */
Avi Kivity's avatar
Avi Kivity committed
3842 3843 3844 3845 3846 3847
		"mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
		"mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
		"mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
		"mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
		"mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
		"mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
3848
#ifdef CONFIG_X86_64
3849 3850 3851 3852 3853 3854 3855 3856
		"mov %%r8,  %c[r8](%[svm]) \n\t"
		"mov %%r9,  %c[r9](%[svm]) \n\t"
		"mov %%r10, %c[r10](%[svm]) \n\t"
		"mov %%r11, %c[r11](%[svm]) \n\t"
		"mov %%r12, %c[r12](%[svm]) \n\t"
		"mov %%r13, %c[r13](%[svm]) \n\t"
		"mov %%r14, %c[r14](%[svm]) \n\t"
		"mov %%r15, %c[r15](%[svm]) \n\t"
3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875
#endif
		/*
		* Clear host registers marked as clobbered to prevent
		* speculative use.
		*/
		"xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
		"xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
		"xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
		"xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
		"xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
#ifdef CONFIG_X86_64
		"xor %%r8, %%r8 \n\t"
		"xor %%r9, %%r9 \n\t"
		"xor %%r10, %%r10 \n\t"
		"xor %%r11, %%r11 \n\t"
		"xor %%r12, %%r12 \n\t"
		"xor %%r13, %%r13 \n\t"
		"xor %%r14, %%r14 \n\t"
		"xor %%r15, %%r15 \n\t"
Avi Kivity's avatar
Avi Kivity committed
3876
#endif
Avi Kivity's avatar
Avi Kivity committed
3877
		"pop %%" _ASM_BP
Avi Kivity's avatar
Avi Kivity committed
3878
		:
3879
		: [svm]"a"(svm),
Avi Kivity's avatar
Avi Kivity committed
3880
		  [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
3881 3882 3883 3884 3885 3886
		  [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
		  [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
		  [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
		  [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
		  [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
		  [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
3887
#ifdef CONFIG_X86_64
3888 3889 3890 3891 3892 3893 3894 3895
		  , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
		  [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
		  [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
		  [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
		  [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
		  [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
		  [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
		  [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity's avatar
Avi Kivity committed
3896
#endif
3897 3898
		: "cc", "memory"
#ifdef CONFIG_X86_64
Avi Kivity's avatar
Avi Kivity committed
3899
		, "rbx", "rcx", "rdx", "rsi", "rdi"
3900
		, "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
Avi Kivity's avatar
Avi Kivity committed
3901 3902
#else
		, "ebx", "ecx", "edx", "esi", "edi"
3903 3904
#endif
		);
Avi Kivity's avatar
Avi Kivity committed
3905

3906 3907 3908
	/* Eliminate branch target predictions from guest mode */
	vmexit_fill_RSB();

3909 3910 3911
#ifdef CONFIG_X86_64
	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else
3912
	loadsegment(fs, svm->host.fs);
3913 3914 3915
#ifndef CONFIG_X86_32_LAZY_GS
	loadsegment(gs, svm->host.gs);
#endif
3916
#endif
Avi Kivity's avatar
Avi Kivity committed
3917 3918 3919

	reload_tss(vcpu);

3920 3921
	local_irq_disable();

3922 3923 3924 3925 3926
	vcpu->arch.cr2 = svm->vmcb->save.cr2;
	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;

3927 3928 3929 3930 3931 3932 3933 3934 3935 3936
	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
		kvm_before_handle_nmi(&svm->vcpu);

	stgi();

	/* Any pending NMI will happen here */

	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
		kvm_after_handle_nmi(&svm->vcpu);

3937 3938
	sync_cr8_to_lapic(vcpu);

3939
	svm->next_rip = 0;
3940

3941 3942
	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;

3943 3944 3945 3946
	/* if exit due to PF check for async PF */
	if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
		svm->apf_reason = kvm_read_and_reset_pf_reason();

Avi Kivity's avatar
Avi Kivity committed
3947 3948 3949 3950
	if (npt_enabled) {
		vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
		vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
	}
3951 3952 3953 3954 3955 3956 3957 3958

	/*
	 * We need to handle MC intercepts here before the vcpu has a chance to
	 * change the physical cpu
	 */
	if (unlikely(svm->vmcb->control.exit_code ==
		     SVM_EXIT_EXCP_BASE + MC_VECTOR))
		svm_handle_mce(svm);
3959 3960

	mark_all_clean(svm->vmcb);
Avi Kivity's avatar
Avi Kivity committed
3961 3962 3963 3964
}

static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
3965 3966 3967
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->save.cr3 = root;
3968
	mark_dirty(svm->vmcb, VMCB_CR);
3969
	svm_flush_tlb(vcpu);
Avi Kivity's avatar
Avi Kivity committed
3970 3971
}

3972 3973 3974 3975 3976
static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->control.nested_cr3 = root;
3977
	mark_dirty(svm->vmcb, VMCB_NPT);
3978 3979

	/* Also sync guest cr3 here in case we live migrate */
3980
	svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
3981
	mark_dirty(svm->vmcb, VMCB_CR);
3982

3983
	svm_flush_tlb(vcpu);
3984 3985
}

Avi Kivity's avatar
Avi Kivity committed
3986 3987
static int is_disabled(void)
{
3988 3989 3990 3991 3992 3993
	u64 vm_cr;

	rdmsrl(MSR_VM_CR, vm_cr);
	if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
		return 1;

Avi Kivity's avatar
Avi Kivity committed
3994 3995 3996
	return 0;
}

Ingo Molnar's avatar
Ingo Molnar committed
3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007
static void
svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
{
	/*
	 * Patch in the VMMCALL instruction:
	 */
	hypercall[0] = 0x0f;
	hypercall[1] = 0x01;
	hypercall[2] = 0xd9;
}

4008 4009 4010 4011 4012
static void svm_check_processor_compat(void *rtn)
{
	*(int *)rtn = 0;
}

4013 4014 4015 4016 4017
static bool svm_cpu_has_accelerated_tpr(void)
{
	return false;
}

4018 4019 4020 4021 4022
static bool svm_has_high_real_mode_segbase(void)
{
	return true;
}

4023 4024 4025 4026 4027
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
	return 0;
}

4028 4029
static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
4030 4031 4032 4033
	struct vcpu_svm *svm = to_svm(vcpu);

	/* Update nrips enabled cache */
	svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
4034 4035
}

4036 4037
static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
{
4038
	switch (func) {
4039 4040 4041 4042
	case 0x80000001:
		if (nested)
			entry->ecx |= (1 << 2); /* Set SVM bit */
		break;
4043 4044 4045 4046 4047
	case 0x8000000A:
		entry->eax = 1; /* SVM revision 1 */
		entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
				   ASID emulation to nested SVM */
		entry->ecx = 0; /* Reserved */
4048 4049 4050 4051
		entry->edx = 0; /* Per default do not support any
				   additional features */

		/* Support next_rip if host supports it */
4052
		if (boot_cpu_has(X86_FEATURE_NRIPS))
4053
			entry->edx |= SVM_FEATURE_NRIP;
4054

4055 4056 4057 4058
		/* Support NPT for the guest if enabled */
		if (npt_enabled)
			entry->edx |= SVM_FEATURE_NPT;

4059 4060
		break;
	}
4061 4062
}

4063
static int svm_get_lpage_level(void)
4064
{
4065
	return PT_PDPE_LEVEL;
4066 4067
}

4068 4069 4070 4071 4072
static bool svm_rdtscp_supported(void)
{
	return false;
}

4073 4074 4075 4076 4077
static bool svm_invpcid_supported(void)
{
	return false;
}

4078 4079 4080 4081 4082
static bool svm_mpx_supported(void)
{
	return false;
}

4083 4084 4085 4086 4087
static bool svm_xsaves_supported(void)
{
	return false;
}

4088 4089 4090 4091 4092
static bool svm_has_wbinvd_exit(void)
{
	return true;
}

4093 4094 4095 4096
static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

4097
	set_exception_intercept(svm, NM_VECTOR);
4098
	update_cr0_intercept(svm);
4099 4100
}

4101
#define PRE_EX(exit)  { .exit_code = (exit), \
4102
			.stage = X86_ICPT_PRE_EXCEPT, }
4103
#define POST_EX(exit) { .exit_code = (exit), \
4104
			.stage = X86_ICPT_POST_EXCEPT, }
4105
#define POST_MEM(exit) { .exit_code = (exit), \
4106
			.stage = X86_ICPT_POST_MEMACCESS, }
4107

4108
static const struct __x86_intercept {
4109 4110 4111 4112 4113 4114 4115 4116
	u32 exit_code;
	enum x86_intercept_stage stage;
} x86_intercept_map[] = {
	[x86_intercept_cr_read]		= POST_EX(SVM_EXIT_READ_CR0),
	[x86_intercept_cr_write]	= POST_EX(SVM_EXIT_WRITE_CR0),
	[x86_intercept_clts]		= POST_EX(SVM_EXIT_WRITE_CR0),
	[x86_intercept_lmsw]		= POST_EX(SVM_EXIT_WRITE_CR0),
	[x86_intercept_smsw]		= POST_EX(SVM_EXIT_READ_CR0),
4117 4118
	[x86_intercept_dr_read]		= POST_EX(SVM_EXIT_READ_DR0),
	[x86_intercept_dr_write]	= POST_EX(SVM_EXIT_WRITE_DR0),
4119 4120 4121 4122 4123 4124 4125 4126
	[x86_intercept_sldt]		= POST_EX(SVM_EXIT_LDTR_READ),
	[x86_intercept_str]		= POST_EX(SVM_EXIT_TR_READ),
	[x86_intercept_lldt]		= POST_EX(SVM_EXIT_LDTR_WRITE),
	[x86_intercept_ltr]		= POST_EX(SVM_EXIT_TR_WRITE),
	[x86_intercept_sgdt]		= POST_EX(SVM_EXIT_GDTR_READ),
	[x86_intercept_sidt]		= POST_EX(SVM_EXIT_IDTR_READ),
	[x86_intercept_lgdt]		= POST_EX(SVM_EXIT_GDTR_WRITE),
	[x86_intercept_lidt]		= POST_EX(SVM_EXIT_IDTR_WRITE),
4127 4128 4129 4130 4131 4132 4133 4134
	[x86_intercept_vmrun]		= POST_EX(SVM_EXIT_VMRUN),
	[x86_intercept_vmmcall]		= POST_EX(SVM_EXIT_VMMCALL),
	[x86_intercept_vmload]		= POST_EX(SVM_EXIT_VMLOAD),
	[x86_intercept_vmsave]		= POST_EX(SVM_EXIT_VMSAVE),
	[x86_intercept_stgi]		= POST_EX(SVM_EXIT_STGI),
	[x86_intercept_clgi]		= POST_EX(SVM_EXIT_CLGI),
	[x86_intercept_skinit]		= POST_EX(SVM_EXIT_SKINIT),
	[x86_intercept_invlpga]		= POST_EX(SVM_EXIT_INVLPGA),
4135 4136 4137
	[x86_intercept_rdtscp]		= POST_EX(SVM_EXIT_RDTSCP),
	[x86_intercept_monitor]		= POST_MEM(SVM_EXIT_MONITOR),
	[x86_intercept_mwait]		= POST_EX(SVM_EXIT_MWAIT),
4138 4139 4140 4141 4142 4143 4144 4145 4146
	[x86_intercept_invlpg]		= POST_EX(SVM_EXIT_INVLPG),
	[x86_intercept_invd]		= POST_EX(SVM_EXIT_INVD),
	[x86_intercept_wbinvd]		= POST_EX(SVM_EXIT_WBINVD),
	[x86_intercept_wrmsr]		= POST_EX(SVM_EXIT_MSR),
	[x86_intercept_rdtsc]		= POST_EX(SVM_EXIT_RDTSC),
	[x86_intercept_rdmsr]		= POST_EX(SVM_EXIT_MSR),
	[x86_intercept_rdpmc]		= POST_EX(SVM_EXIT_RDPMC),
	[x86_intercept_cpuid]		= PRE_EX(SVM_EXIT_CPUID),
	[x86_intercept_rsm]		= PRE_EX(SVM_EXIT_RSM),
4147 4148 4149 4150 4151 4152 4153
	[x86_intercept_pause]		= PRE_EX(SVM_EXIT_PAUSE),
	[x86_intercept_pushf]		= PRE_EX(SVM_EXIT_PUSHF),
	[x86_intercept_popf]		= PRE_EX(SVM_EXIT_POPF),
	[x86_intercept_intn]		= PRE_EX(SVM_EXIT_SWINT),
	[x86_intercept_iret]		= PRE_EX(SVM_EXIT_IRET),
	[x86_intercept_icebp]		= PRE_EX(SVM_EXIT_ICEBP),
	[x86_intercept_hlt]		= POST_EX(SVM_EXIT_HLT),
4154 4155 4156 4157
	[x86_intercept_in]		= POST_EX(SVM_EXIT_IOIO),
	[x86_intercept_ins]		= POST_EX(SVM_EXIT_IOIO),
	[x86_intercept_out]		= POST_EX(SVM_EXIT_IOIO),
	[x86_intercept_outs]		= POST_EX(SVM_EXIT_IOIO),
4158 4159
};

4160
#undef PRE_EX
4161
#undef POST_EX
4162
#undef POST_MEM
4163

4164 4165 4166 4167
static int svm_check_intercept(struct kvm_vcpu *vcpu,
			       struct x86_instruction_info *info,
			       enum x86_intercept_stage stage)
{
4168 4169 4170 4171 4172 4173 4174 4175 4176 4177
	struct vcpu_svm *svm = to_svm(vcpu);
	int vmexit, ret = X86EMUL_CONTINUE;
	struct __x86_intercept icpt_info;
	struct vmcb *vmcb = svm->vmcb;

	if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
		goto out;

	icpt_info = x86_intercept_map[info->intercept];

4178
	if (stage != icpt_info.stage)
4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192
		goto out;

	switch (icpt_info.exit_code) {
	case SVM_EXIT_READ_CR0:
		if (info->intercept == x86_intercept_cr_read)
			icpt_info.exit_code += info->modrm_reg;
		break;
	case SVM_EXIT_WRITE_CR0: {
		unsigned long cr0, val;
		u64 intercept;

		if (info->intercept == x86_intercept_cr_write)
			icpt_info.exit_code += info->modrm_reg;

4193 4194
		if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
		    info->intercept == x86_intercept_clts)
4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217
			break;

		intercept = svm->nested.intercept;

		if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
			break;

		cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
		val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;

		if (info->intercept == x86_intercept_lmsw) {
			cr0 &= 0xfUL;
			val &= 0xfUL;
			/* lmsw can't clear PE - catch this here */
			if (cr0 & X86_CR0_PE)
				val |= X86_CR0_PE;
		}

		if (cr0 ^ val)
			icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;

		break;
	}
4218 4219 4220 4221
	case SVM_EXIT_READ_DR0:
	case SVM_EXIT_WRITE_DR0:
		icpt_info.exit_code += info->modrm_reg;
		break;
4222 4223 4224 4225 4226 4227
	case SVM_EXIT_MSR:
		if (info->intercept == x86_intercept_wrmsr)
			vmcb->control.exit_info_1 = 1;
		else
			vmcb->control.exit_info_1 = 0;
		break;
4228 4229 4230 4231 4232 4233 4234
	case SVM_EXIT_PAUSE:
		/*
		 * We get this for NOP only, but pause
		 * is rep not, check this here
		 */
		if (info->rep_prefix != REPE_PREFIX)
			goto out;
4235 4236 4237 4238 4239 4240
	case SVM_EXIT_IOIO: {
		u64 exit_info;
		u32 bytes;

		if (info->intercept == x86_intercept_in ||
		    info->intercept == x86_intercept_ins) {
4241 4242
			exit_info = ((info->src_val & 0xffff) << 16) |
				SVM_IOIO_TYPE_MASK;
4243
			bytes = info->dst_bytes;
4244
		} else {
4245
			exit_info = (info->dst_val & 0xffff) << 16;
4246
			bytes = info->src_bytes;
4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266
		}

		if (info->intercept == x86_intercept_outs ||
		    info->intercept == x86_intercept_ins)
			exit_info |= SVM_IOIO_STR_MASK;

		if (info->rep_prefix)
			exit_info |= SVM_IOIO_REP_MASK;

		bytes = min(bytes, 4u);

		exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;

		exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);

		vmcb->control.exit_info_1 = exit_info;
		vmcb->control.exit_info_2 = info->next_rip;

		break;
	}
4267 4268 4269 4270
	default:
		break;
	}

4271 4272 4273
	/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
	if (static_cpu_has(X86_FEATURE_NRIPS))
		vmcb->control.next_rip  = info->next_rip;
4274 4275 4276 4277 4278 4279 4280 4281
	vmcb->control.exit_code = icpt_info.exit_code;
	vmexit = nested_svm_exit_handled(svm);

	ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
					   : X86EMUL_CONTINUE;

out:
	return ret;
4282 4283
}

4284 4285 4286 4287 4288
static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
{
	local_irq_enable();
}

4289 4290 4291 4292
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
}

4293
static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity's avatar
Avi Kivity committed
4294 4295 4296 4297
	.cpu_has_kvm_support = has_svm,
	.disabled_by_bios = is_disabled,
	.hardware_setup = svm_hardware_setup,
	.hardware_unsetup = svm_hardware_unsetup,
4298
	.check_processor_compatibility = svm_check_processor_compat,
Avi Kivity's avatar
Avi Kivity committed
4299 4300
	.hardware_enable = svm_hardware_enable,
	.hardware_disable = svm_hardware_disable,
4301
	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
4302
	.cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
Avi Kivity's avatar
Avi Kivity committed
4303 4304 4305

	.vcpu_create = svm_create_vcpu,
	.vcpu_free = svm_free_vcpu,
4306
	.vcpu_reset = svm_vcpu_reset,
Avi Kivity's avatar
Avi Kivity committed
4307

4308
	.prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity's avatar
Avi Kivity committed
4309 4310 4311
	.vcpu_load = svm_vcpu_load,
	.vcpu_put = svm_vcpu_put,

4312
	.update_bp_intercept = update_bp_intercept,
Avi Kivity's avatar
Avi Kivity committed
4313 4314 4315 4316 4317
	.get_msr = svm_get_msr,
	.set_msr = svm_set_msr,
	.get_segment_base = svm_get_segment_base,
	.get_segment = svm_get_segment,
	.set_segment = svm_set_segment,
4318
	.get_cpl = svm_get_cpl,
4319
	.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
4320
	.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
4321
	.decache_cr3 = svm_decache_cr3,
4322
	.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity's avatar
Avi Kivity committed
4323 4324 4325 4326 4327 4328 4329 4330
	.set_cr0 = svm_set_cr0,
	.set_cr3 = svm_set_cr3,
	.set_cr4 = svm_set_cr4,
	.set_efer = svm_set_efer,
	.get_idt = svm_get_idt,
	.set_idt = svm_set_idt,
	.get_gdt = svm_get_gdt,
	.set_gdt = svm_set_gdt,
Jan Kiszka's avatar
Jan Kiszka committed
4331 4332
	.get_dr6 = svm_get_dr6,
	.set_dr6 = svm_set_dr6,
4333
	.set_dr7 = svm_set_dr7,
4334
	.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
Avi Kivity's avatar
Avi Kivity committed
4335
	.cache_reg = svm_cache_reg,
Avi Kivity's avatar
Avi Kivity committed
4336 4337
	.get_rflags = svm_get_rflags,
	.set_rflags = svm_set_rflags,
4338
	.fpu_activate = svm_fpu_activate,
4339
	.fpu_deactivate = svm_fpu_deactivate,
Avi Kivity's avatar
Avi Kivity committed
4340 4341 4342 4343

	.tlb_flush = svm_flush_tlb,

	.run = svm_vcpu_run,
4344
	.handle_exit = handle_exit,
Avi Kivity's avatar
Avi Kivity committed
4345
	.skip_emulated_instruction = skip_emulated_instruction,
4346 4347
	.set_interrupt_shadow = svm_set_interrupt_shadow,
	.get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar's avatar
Ingo Molnar committed
4348
	.patch_hypercall = svm_patch_hypercall,
Eddie Dong's avatar
Eddie Dong committed
4349
	.set_irq = svm_set_irq,
4350
	.set_nmi = svm_inject_nmi,
4351
	.queue_exception = svm_queue_exception,
4352
	.cancel_injection = svm_cancel_injection,
4353
	.interrupt_allowed = svm_interrupt_allowed,
4354
	.nmi_allowed = svm_nmi_allowed,
4355 4356
	.get_nmi_mask = svm_get_nmi_mask,
	.set_nmi_mask = svm_set_nmi_mask,
4357 4358 4359
	.enable_nmi_window = enable_nmi_window,
	.enable_irq_window = enable_irq_window,
	.update_cr8_intercept = update_cr8_intercept,
4360
	.set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4361
	.cpu_uses_apicv = svm_cpu_uses_apicv,
4362
	.load_eoi_exitmap = svm_load_eoi_exitmap,
4363
	.sync_pir_to_irr = svm_sync_pir_to_irr,
4364 4365

	.set_tss_addr = svm_set_tss_addr,
4366
	.get_tdp_level = get_npt_level,
4367
	.get_mt_mask = svm_get_mt_mask,
4368

4369 4370
	.get_exit_info = svm_get_exit_info,

4371
	.get_lpage_level = svm_get_lpage_level,
4372 4373

	.cpuid_update = svm_cpuid_update,
4374 4375

	.rdtscp_supported = svm_rdtscp_supported,
4376
	.invpcid_supported = svm_invpcid_supported,
4377
	.mpx_supported = svm_mpx_supported,
4378
	.xsaves_supported = svm_xsaves_supported,
4379 4380

	.set_supported_cpuid = svm_set_supported_cpuid,
4381 4382

	.has_wbinvd_exit = svm_has_wbinvd_exit,
4383

4384
	.read_tsc_offset = svm_read_tsc_offset,
4385
	.write_tsc_offset = svm_write_tsc_offset,
4386
	.adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
Nadav Har'El's avatar
Nadav Har'El committed
4387
	.read_l1_tsc = svm_read_l1_tsc,
4388 4389

	.set_tdp_cr3 = set_tdp_cr3,
4390 4391

	.check_intercept = svm_check_intercept,
4392
	.handle_external_intr = svm_handle_external_intr,
4393 4394

	.sched_in = svm_sched_in,
4395 4396

	.pmu_ops = &amd_pmu_ops,
Avi Kivity's avatar
Avi Kivity committed
4397 4398 4399 4400
};

static int __init svm_init(void)
{
4401
	return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
4402
			__alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity's avatar
Avi Kivity committed
4403 4404 4405 4406
}

static void __exit svm_exit(void)
{
4407
	kvm_exit();
Avi Kivity's avatar
Avi Kivity committed
4408 4409 4410 4411
}

module_init(svm_init)
module_exit(svm_exit)