processor.c 5.94 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
// SPDX-License-Identifier: GPL-2.0-only
/*
 * KVM selftest s390x library code - CPU-related functions (page tables...)
 *
 * Copyright (C) 2019, Red Hat, Inc.
 */

#include "processor.h"
#include "kvm_util.h"

#define PAGES_PER_REGION 4

13
void virt_arch_pgd_alloc(struct kvm_vm *vm)
14 15 16
{
	vm_paddr_t paddr;

17
	TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
18 19 20 21 22 23
		    vm->page_size);

	if (vm->pgd_created)
		return;

	paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
24 25
				   KVM_GUEST_PAGE_TABLE_MIN_PADDR,
				   vm->memslots[MEM_REGION_PT]);
26 27 28 29 30 31 32 33 34 35 36
	memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);

	vm->pgd = paddr;
	vm->pgd_created = true;
}

/*
 * Allocate 4 pages for a region/segment table (ri < 4), or one page for
 * a page table (ri == 4). Returns a suitable region/segment table entry
 * which points to the freshly allocated pages.
 */
37
static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
38 39 40 41
{
	uint64_t taddr;

	taddr = vm_phy_pages_alloc(vm,  ri < 4 ? PAGES_PER_REGION : 1,
42
				   KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
43 44 45 46 47 48 49
	memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);

	return (taddr & REGION_ENTRY_ORIGIN)
		| (((4 - ri) << 2) & REGION_ENTRY_TYPE)
		| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
}

50
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
{
	int ri, idx;
	uint64_t *entry;

	TEST_ASSERT((gva % vm->page_size) == 0,
		"Virtual address not on page boundary,\n"
		"  vaddr: 0x%lx vm->page_size: 0x%x",
		gva, vm->page_size);
	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
		(gva >> vm->page_shift)),
		"Invalid virtual address, vaddr: 0x%lx",
		gva);
	TEST_ASSERT((gpa % vm->page_size) == 0,
		"Physical address not on page boundary,\n"
		"  paddr: 0x%lx vm->page_size: 0x%x",
		gva, vm->page_size);
	TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
		"Physical address beyond beyond maximum supported,\n"
		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
		gva, vm->max_gfn, vm->page_size);

	/* Walk through region and segment tables */
	entry = addr_gpa2hva(vm, vm->pgd);
	for (ri = 1; ri <= 4; ri++) {
		idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
		if (entry[idx] & REGION_ENTRY_INVALID)
77
			entry[idx] = virt_alloc_region(vm, ri);
78 79 80 81
		entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
	}

	/* Fill in page table entry */
82
	idx = (gva >> PAGE_SHIFT) & 0x0ffu;		/* page index */
83 84 85 86 87 88
	if (!(entry[idx] & PAGE_INVALID))
		fprintf(stderr,
			"WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
	entry[idx] = gpa;
}

89
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
90 91 92 93
{
	int ri, idx;
	uint64_t *entry;

94
	TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
95 96 97 98 99 100 101 102 103 104 105
		    vm->page_size);

	entry = addr_gpa2hva(vm, vm->pgd);
	for (ri = 1; ri <= 4; ri++) {
		idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
		TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
			    "No region mapping for vm virtual address 0x%lx",
			    gva);
		entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
	}

106
	idx = (gva >> PAGE_SHIFT) & 0x0ffu;		/* page index */
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149

	TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
		    "No page mapping for vm virtual address 0x%lx", gva);

	return (entry[idx] & ~0xffful) + (gva & 0xffful);
}

static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
			   uint64_t ptea_start)
{
	uint64_t *pte, ptea;

	for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
		pte = addr_gpa2hva(vm, ptea);
		if (*pte & PAGE_INVALID)
			continue;
		fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
			indent, "", ptea, *pte);
	}
}

static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
			     uint64_t reg_tab_addr)
{
	uint64_t addr, *entry;

	for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
		entry = addr_gpa2hva(vm, addr);
		if (*entry & REGION_ENTRY_INVALID)
			continue;
		fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
			indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
			addr, *entry);
		if (*entry & REGION_ENTRY_TYPE) {
			virt_dump_region(stream, vm, indent + 2,
					 *entry & REGION_ENTRY_ORIGIN);
		} else {
			virt_dump_ptes(stream, vm, indent + 2,
				       *entry & REGION_ENTRY_ORIGIN);
		}
	}
}

150
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
151 152 153 154 155 156 157
{
	if (!vm->pgd_created)
		return;

	virt_dump_region(stream, vm, indent, vm->pgd);
}

158 159 160 161 162 163
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
{
	vcpu->run->psw_addr = (uintptr_t)guest_code;
}

struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
164 165 166 167 168
{
	size_t stack_size =  DEFAULT_STACK_PGS * getpagesize();
	uint64_t stack_vaddr;
	struct kvm_regs regs;
	struct kvm_sregs sregs;
169
	struct kvm_vcpu *vcpu;
170

171
	TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
172 173
		    vm->page_size);

174 175 176
	stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
				       DEFAULT_GUEST_STACK_VADDR_MIN,
				       MEM_REGION_DATA);
177

178
	vcpu = __vm_vcpu_add(vm, vcpu_id);
179 180

	/* Setup guest registers */
181
	vcpu_regs_get(vcpu, &regs);
182
	regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
183
	vcpu_regs_set(vcpu, &regs);
184

185
	vcpu_sregs_get(vcpu, &sregs);
186
	sregs.crs[0] |= 0x00040000;		/* Enable floating point regs */
187
	sregs.crs[1] = vm->pgd | 0xf;		/* Primary region table */
188
	vcpu_sregs_set(vcpu, &sregs);
189

190
	vcpu->run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
191 192

	return vcpu;
193 194
}

195
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
196 197 198 199 200 201
{
	va_list ap;
	struct kvm_regs regs;
	int i;

	TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
202
		    "  num: %u",
203 204 205
		    num);

	va_start(ap, num);
206
	vcpu_regs_get(vcpu, &regs);
207 208 209 210

	for (i = 0; i < num; i++)
		regs.gprs[i + 2] = va_arg(ap, uint64_t);

211
	vcpu_regs_set(vcpu, &regs);
212 213 214
	va_end(ap);
}

215
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
216 217
{
	fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
218
		indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
219
}
220

221
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
222 223
{
}