KVM: selftests: Remove duplicate guest mode handling

Remove the duplication code in run_test() of dirty_log_test because
after some reordering of functions now we can directly use the outcome
of vm_create().

Meanwhile, with the new VM_MODE_PXXV48_4K, we can safely revert
b442324b58 too where we stick the x86_64 PA width to 39 bits for
dirty_log_test.

Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Peter Xu 2019-08-30 09:36:19 +08:00 committed by Paolo Bonzini
parent 567a9f1e9d
commit 52200d0d94
3 changed files with 26 additions and 47 deletions

View File

@ -267,10 +267,8 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
static void run_test(enum vm_guest_mode mode, unsigned long iterations,
unsigned long interval, uint64_t phys_offset)
{
unsigned int guest_pa_bits, guest_page_shift;
pthread_t vcpu_thread;
struct kvm_vm *vm;
uint64_t max_gfn;
unsigned long *bmap;
/*
@ -285,54 +283,13 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
guest_code);
switch (mode) {
case VM_MODE_P52V48_4K:
case VM_MODE_PXXV48_4K:
guest_pa_bits = 52;
guest_page_shift = 12;
break;
case VM_MODE_P52V48_64K:
guest_pa_bits = 52;
guest_page_shift = 16;
break;
case VM_MODE_P48V48_4K:
guest_pa_bits = 48;
guest_page_shift = 12;
break;
case VM_MODE_P48V48_64K:
guest_pa_bits = 48;
guest_page_shift = 16;
break;
case VM_MODE_P40V48_4K:
guest_pa_bits = 40;
guest_page_shift = 12;
break;
case VM_MODE_P40V48_64K:
guest_pa_bits = 40;
guest_page_shift = 16;
break;
default:
TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
}
DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode));
#ifdef __x86_64__
/*
* FIXME
* The x86_64 kvm selftests framework currently only supports a
* single PML4 which restricts the number of physical address
* bits we can change to 39.
*/
guest_pa_bits = 39;
#endif
max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
guest_page_size = (1ul << guest_page_shift);
guest_page_size = vm_get_page_size(vm);
/*
* A little more than 1G of guest page sized pages. Cover the
* case where the size is not aligned to 64 pages.
*/
guest_num_pages = (1ul << (DIRTY_MEM_BITS - guest_page_shift)) + 16;
guest_num_pages = (1ul << (DIRTY_MEM_BITS -
vm_get_page_shift(vm))) + 16;
#ifdef __s390x__
/* Round up to multiple of 1M (segment size) */
guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL;
@ -342,7 +299,8 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
!!((guest_num_pages * guest_page_size) % host_page_size);
if (!phys_offset) {
guest_test_phys_mem = (max_gfn - guest_num_pages) * guest_page_size;
guest_test_phys_mem = (vm_get_max_gfn(vm) -
guest_num_pages) * guest_page_size;
guest_test_phys_mem &= ~(host_page_size - 1);
} else {
guest_test_phys_mem = phys_offset;

View File

@ -154,6 +154,10 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
bool vm_is_unrestricted_guest(struct kvm_vm *vm);
unsigned int vm_get_page_size(struct kvm_vm *vm);
unsigned int vm_get_page_shift(struct kvm_vm *vm);
unsigned int vm_get_max_gfn(struct kvm_vm *vm);
struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
uint64_t end);

View File

@ -136,6 +136,8 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
{
struct kvm_vm *vm;
DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode));
vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficient Memory");
@ -1650,3 +1652,18 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm)
return val == 'Y';
}
unsigned int vm_get_page_size(struct kvm_vm *vm)
{
return vm->page_size;
}
unsigned int vm_get_page_shift(struct kvm_vm *vm)
{
return vm->page_shift;
}
unsigned int vm_get_max_gfn(struct kvm_vm *vm)
{
return vm->max_gfn;
}