1d391177aSMatt Evans /* 2d391177aSMatt Evans * PPC CPU identification 3d391177aSMatt Evans * 4d391177aSMatt Evans * This is a very simple "host CPU info" struct to get us going. 5d391177aSMatt Evans * For the little host information we need, I don't want to grub about 6d391177aSMatt Evans * parsing stuff in /proc/device-tree so just match host PVR to differentiate 7d391177aSMatt Evans * PPC970 and POWER7 (which is all that's currently supported). 8d391177aSMatt Evans * 9d391177aSMatt Evans * Qemu does something similar but this is MUCH simpler! 10d391177aSMatt Evans * 11d391177aSMatt Evans * Copyright 2012 Matt Evans <matt@ozlabs.org>, IBM Corporation. 12d391177aSMatt Evans * 13d391177aSMatt Evans * This program is free software; you can redistribute it and/or modify it 14d391177aSMatt Evans * under the terms of the GNU General Public License version 2 as published 15d391177aSMatt Evans * by the Free Software Foundation. 16d391177aSMatt Evans */ 17d391177aSMatt Evans 18de494ec5SMichael Ellerman #include <kvm/kvm.h> 19de494ec5SMichael Ellerman #include <sys/ioctl.h> 20de494ec5SMichael Ellerman 21d391177aSMatt Evans #include "cpu_info.h" 22d391177aSMatt Evans #include "kvm/util.h" 23d391177aSMatt Evans 24d391177aSMatt Evans /* POWER7 */ 25d391177aSMatt Evans 26d391177aSMatt Evans static struct cpu_info cpu_power7_info = { 2795477af3SMichael Ellerman .name = "POWER7", 2895477af3SMichael Ellerman .tb_freq = 512000000, 2995477af3SMichael Ellerman .d_bsize = 128, 3095477af3SMichael Ellerman .i_bsize = 128, 3195477af3SMichael Ellerman .flags = CPUINFO_FLAG_DFP | CPUINFO_FLAG_VSX | CPUINFO_FLAG_VMX, 32de494ec5SMichael Ellerman .mmu_info = { 33de494ec5SMichael Ellerman .flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS, 34*cb90966cSMichael Ellerman .slb_size = 32, 35de494ec5SMichael Ellerman }, 36d391177aSMatt Evans }; 37d391177aSMatt Evans 38d391177aSMatt Evans /* PPC970/G5 */ 39d391177aSMatt Evans 40d391177aSMatt Evans static struct cpu_info cpu_970_info = { 4195477af3SMichael Ellerman .name = "G5", 4295477af3SMichael Ellerman .tb_freq = 33333333, 4395477af3SMichael Ellerman .d_bsize = 128, 4495477af3SMichael Ellerman .i_bsize = 128, 4595477af3SMichael Ellerman .flags = CPUINFO_FLAG_VMX, 46d391177aSMatt Evans }; 47d391177aSMatt Evans 48d391177aSMatt Evans /* This is a default catchall for 'no match' on PVR: */ 4995477af3SMichael Ellerman static struct cpu_info cpu_dummy_info = { .name = "unknown" }; 50d391177aSMatt Evans 51d391177aSMatt Evans static struct pvr_info host_pvr_info[] = { 52d391177aSMatt Evans { 0xffffffff, 0x0f000003, &cpu_power7_info }, 53d391177aSMatt Evans { 0xffff0000, 0x003f0000, &cpu_power7_info }, 54d391177aSMatt Evans { 0xffff0000, 0x004a0000, &cpu_power7_info }, 55d391177aSMatt Evans { 0xffff0000, 0x00390000, &cpu_970_info }, 56d391177aSMatt Evans { 0xffff0000, 0x003c0000, &cpu_970_info }, 57d391177aSMatt Evans { 0xffff0000, 0x00440000, &cpu_970_info }, 58d391177aSMatt Evans { 0xffff0000, 0x00450000, &cpu_970_info }, 59d391177aSMatt Evans }; 60d391177aSMatt Evans 61de494ec5SMichael Ellerman /* If we can't query the kernel for supported page sizes assume 4K and 16M */ 62de494ec5SMichael Ellerman static struct kvm_ppc_one_seg_page_size fallback_sps[] = { 63de494ec5SMichael Ellerman [0] = { 64de494ec5SMichael Ellerman .page_shift = 12, 65de494ec5SMichael Ellerman .slb_enc = 0, 66de494ec5SMichael Ellerman .enc = { 67de494ec5SMichael Ellerman [0] = { 68de494ec5SMichael Ellerman .page_shift = 12, 69de494ec5SMichael Ellerman .pte_enc = 0, 70de494ec5SMichael Ellerman }, 71de494ec5SMichael Ellerman }, 72de494ec5SMichael Ellerman }, 73de494ec5SMichael Ellerman [1] = { 74de494ec5SMichael Ellerman .page_shift = 24, 75de494ec5SMichael Ellerman .slb_enc = 0x100, 76de494ec5SMichael Ellerman .enc = { 77de494ec5SMichael Ellerman [0] = { 78de494ec5SMichael Ellerman .page_shift = 24, 79de494ec5SMichael Ellerman .pte_enc = 0, 80de494ec5SMichael Ellerman }, 81de494ec5SMichael Ellerman }, 82de494ec5SMichael Ellerman }, 83de494ec5SMichael Ellerman }; 84de494ec5SMichael Ellerman 85de494ec5SMichael Ellerman 86de494ec5SMichael Ellerman static void setup_mmu_info(struct kvm *kvm, struct cpu_info *cpu_info) 87de494ec5SMichael Ellerman { 88de494ec5SMichael Ellerman static struct kvm_ppc_smmu_info *mmu_info; 89de494ec5SMichael Ellerman struct kvm_ppc_one_seg_page_size *sps; 90de494ec5SMichael Ellerman int i, j, k, valid; 91de494ec5SMichael Ellerman 92de494ec5SMichael Ellerman if (!kvm__supports_extension(kvm, KVM_CAP_PPC_GET_SMMU_INFO)) { 93de494ec5SMichael Ellerman memcpy(&cpu_info->mmu_info.sps, fallback_sps, sizeof(fallback_sps)); 94de494ec5SMichael Ellerman } else if (ioctl(kvm->vm_fd, KVM_PPC_GET_SMMU_INFO, &cpu_info->mmu_info) < 0) { 95de494ec5SMichael Ellerman die_perror("KVM_PPC_GET_SMMU_INFO failed"); 96de494ec5SMichael Ellerman } 97de494ec5SMichael Ellerman 98de494ec5SMichael Ellerman mmu_info = &cpu_info->mmu_info; 99de494ec5SMichael Ellerman 100de494ec5SMichael Ellerman if (!(mmu_info->flags & KVM_PPC_PAGE_SIZES_REAL)) 101de494ec5SMichael Ellerman /* Guest pages are not restricted by the backing page size */ 102de494ec5SMichael Ellerman return; 103de494ec5SMichael Ellerman 104de494ec5SMichael Ellerman /* Filter based on backing page size */ 105de494ec5SMichael Ellerman 106de494ec5SMichael Ellerman for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) { 107de494ec5SMichael Ellerman sps = &mmu_info->sps[i]; 108de494ec5SMichael Ellerman 109de494ec5SMichael Ellerman if (!sps->page_shift) 110de494ec5SMichael Ellerman break; 111de494ec5SMichael Ellerman 112de494ec5SMichael Ellerman if (kvm->ram_pagesize < (1ul << sps->page_shift)) { 113de494ec5SMichael Ellerman /* Mark the whole segment size invalid */ 114de494ec5SMichael Ellerman sps->page_shift = 0; 115de494ec5SMichael Ellerman continue; 116de494ec5SMichael Ellerman } 117de494ec5SMichael Ellerman 118de494ec5SMichael Ellerman /* Check each page size for the segment */ 119de494ec5SMichael Ellerman for (j = 0, valid = 0; j < KVM_PPC_PAGE_SIZES_MAX_SZ; j++) { 120de494ec5SMichael Ellerman if (!sps->enc[j].page_shift) 121de494ec5SMichael Ellerman break; 122de494ec5SMichael Ellerman 123de494ec5SMichael Ellerman if (kvm->ram_pagesize < (1ul << sps->enc[j].page_shift)) 124de494ec5SMichael Ellerman sps->enc[j].page_shift = 0; 125de494ec5SMichael Ellerman else 126de494ec5SMichael Ellerman valid++; 127de494ec5SMichael Ellerman } 128de494ec5SMichael Ellerman 129de494ec5SMichael Ellerman if (!valid) { 130de494ec5SMichael Ellerman /* Mark the whole segment size invalid */ 131de494ec5SMichael Ellerman sps->page_shift = 0; 132de494ec5SMichael Ellerman continue; 133de494ec5SMichael Ellerman } 134de494ec5SMichael Ellerman 135de494ec5SMichael Ellerman /* Mark any trailing entries invalid if we broke out early */ 136de494ec5SMichael Ellerman for (k = j; k < KVM_PPC_PAGE_SIZES_MAX_SZ; k++) 137de494ec5SMichael Ellerman sps->enc[k].page_shift = 0; 138de494ec5SMichael Ellerman 139de494ec5SMichael Ellerman /* Collapse holes */ 140de494ec5SMichael Ellerman for (j = 0; j < KVM_PPC_PAGE_SIZES_MAX_SZ; j++) { 141de494ec5SMichael Ellerman if (sps->enc[j].page_shift) 142de494ec5SMichael Ellerman continue; 143de494ec5SMichael Ellerman 144de494ec5SMichael Ellerman for (k = j + 1; k < KVM_PPC_PAGE_SIZES_MAX_SZ; k++) { 145de494ec5SMichael Ellerman if (sps->enc[k].page_shift) { 146de494ec5SMichael Ellerman sps->enc[j] = sps->enc[k]; 147de494ec5SMichael Ellerman sps->enc[k].page_shift = 0; 148de494ec5SMichael Ellerman break; 149de494ec5SMichael Ellerman } 150de494ec5SMichael Ellerman } 151de494ec5SMichael Ellerman } 152de494ec5SMichael Ellerman } 153de494ec5SMichael Ellerman 154de494ec5SMichael Ellerman /* Mark any trailing entries invalid if we broke out early */ 155de494ec5SMichael Ellerman for (j = i; j < KVM_PPC_PAGE_SIZES_MAX_SZ; j++) 156de494ec5SMichael Ellerman mmu_info->sps[j].page_shift = 0; 157de494ec5SMichael Ellerman 158de494ec5SMichael Ellerman /* Collapse holes */ 159de494ec5SMichael Ellerman for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) { 160de494ec5SMichael Ellerman if (mmu_info->sps[i].page_shift) 161de494ec5SMichael Ellerman continue; 162de494ec5SMichael Ellerman 163de494ec5SMichael Ellerman for (j = i + 1; j < KVM_PPC_PAGE_SIZES_MAX_SZ; j++) { 164de494ec5SMichael Ellerman if (mmu_info->sps[j].page_shift) { 165de494ec5SMichael Ellerman mmu_info->sps[i] = mmu_info->sps[j]; 166de494ec5SMichael Ellerman mmu_info->sps[j].page_shift = 0; 167de494ec5SMichael Ellerman break; 168de494ec5SMichael Ellerman } 169de494ec5SMichael Ellerman } 170de494ec5SMichael Ellerman } 171de494ec5SMichael Ellerman } 172de494ec5SMichael Ellerman 1734a75c603SMichael Ellerman struct cpu_info *find_cpu_info(struct kvm *kvm) 174d391177aSMatt Evans { 1754a75c603SMichael Ellerman struct cpu_info *info; 176d391177aSMatt Evans unsigned int i; 1774a75c603SMichael Ellerman u32 pvr = kvm->pvr; 178ac92dd43SMichael Ellerman 1794a75c603SMichael Ellerman for (info = NULL, i = 0; i < ARRAY_SIZE(host_pvr_info); i++) { 180ac92dd43SMichael Ellerman if ((pvr & host_pvr_info[i].pvr_mask) == host_pvr_info[i].pvr) { 1814a75c603SMichael Ellerman info = host_pvr_info[i].cpu_info; 1824a75c603SMichael Ellerman break; 183d391177aSMatt Evans } 184d391177aSMatt Evans } 185ac92dd43SMichael Ellerman 186d391177aSMatt Evans /* Didn't find anything? Rut-ro. */ 1874a75c603SMichael Ellerman if (!info) { 188d391177aSMatt Evans pr_warning("Host CPU unsupported by kvmtool\n"); 1894a75c603SMichael Ellerman info = &cpu_dummy_info; 1904a75c603SMichael Ellerman } 191ac92dd43SMichael Ellerman 192de494ec5SMichael Ellerman setup_mmu_info(kvm, info); 193de494ec5SMichael Ellerman 1944a75c603SMichael Ellerman return info; 195d391177aSMatt Evans } 196