1f17e5a37SMatt Evans /* 2f17e5a37SMatt Evans * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics 3f17e5a37SMatt Evans * 4f17e5a37SMatt Evans * Borrowed heavily from QEMU's xics.c, 5f17e5a37SMatt Evans * Copyright (c) 2010,2011 David Gibson, IBM Corporation. 6f17e5a37SMatt Evans * 7f17e5a37SMatt Evans * Modifications copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation. 8f17e5a37SMatt Evans * 9f17e5a37SMatt Evans * This program is free software; you can redistribute it and/or modify it 10f17e5a37SMatt Evans * under the terms of the GNU General Public License version 2 as published 11f17e5a37SMatt Evans * by the Free Software Foundation. 12f17e5a37SMatt Evans */ 13f17e5a37SMatt Evans 14f17e5a37SMatt Evans #include "spapr.h" 15f17e5a37SMatt Evans #include "xics.h" 16f17e5a37SMatt Evans #include "kvm/util.h" 17f17e5a37SMatt Evans 18f17e5a37SMatt Evans #include <stdio.h> 19f17e5a37SMatt Evans #include <malloc.h> 20f17e5a37SMatt Evans 21e016c60dSMichael Ellerman #define XICS_NUM_IRQS 1024 22e016c60dSMichael Ellerman 23f17e5a37SMatt Evans 24f17e5a37SMatt Evans /* #define DEBUG_XICS yes */ 25f17e5a37SMatt Evans #ifdef DEBUG_XICS 26f17e5a37SMatt Evans #define xics_dprintf(fmt, ...) \ 27f17e5a37SMatt Evans do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) 28f17e5a37SMatt Evans #else 29f17e5a37SMatt Evans #define xics_dprintf(fmt, ...) \ 30f17e5a37SMatt Evans do { } while (0) 31f17e5a37SMatt Evans #endif 32f17e5a37SMatt Evans 33f17e5a37SMatt Evans /* 34f17e5a37SMatt Evans * ICP: Presentation layer 35f17e5a37SMatt Evans */ 36f17e5a37SMatt Evans 37f17e5a37SMatt Evans struct icp_server_state { 38f17e5a37SMatt Evans uint32_t xirr; 39f17e5a37SMatt Evans uint8_t pending_priority; 40f17e5a37SMatt Evans uint8_t mfrr; 41f17e5a37SMatt Evans struct kvm_cpu *cpu; 42f17e5a37SMatt Evans }; 43f17e5a37SMatt Evans 44f17e5a37SMatt Evans #define XICS_IRQ_OFFSET 16 45f17e5a37SMatt Evans #define XISR_MASK 0x00ffffff 46f17e5a37SMatt Evans #define CPPR_MASK 0xff000000 47f17e5a37SMatt Evans 48f17e5a37SMatt Evans #define XISR(ss) (((ss)->xirr) & XISR_MASK) 49f17e5a37SMatt Evans #define CPPR(ss) (((ss)->xirr) >> 24) 50f17e5a37SMatt Evans 51f17e5a37SMatt Evans struct ics_state; 52f17e5a37SMatt Evans 53f17e5a37SMatt Evans struct icp_state { 54f17e5a37SMatt Evans unsigned long nr_servers; 55f17e5a37SMatt Evans struct icp_server_state *ss; 56f17e5a37SMatt Evans struct ics_state *ics; 57f17e5a37SMatt Evans }; 58f17e5a37SMatt Evans 59f17e5a37SMatt Evans static void ics_reject(struct ics_state *ics, int nr); 60f17e5a37SMatt Evans static void ics_resend(struct ics_state *ics); 61f17e5a37SMatt Evans static void ics_eoi(struct ics_state *ics, int nr); 62f17e5a37SMatt Evans 63f17e5a37SMatt Evans static inline void cpu_irq_raise(struct kvm_cpu *vcpu) 64f17e5a37SMatt Evans { 65f17e5a37SMatt Evans xics_dprintf("INT1[%p]\n", vcpu); 66f17e5a37SMatt Evans kvm_cpu__irq(vcpu, POWER7_EXT_IRQ, 1); 67f17e5a37SMatt Evans } 68f17e5a37SMatt Evans 69f17e5a37SMatt Evans static inline void cpu_irq_lower(struct kvm_cpu *vcpu) 70f17e5a37SMatt Evans { 71f17e5a37SMatt Evans xics_dprintf("INT0[%p]\n", vcpu); 72f17e5a37SMatt Evans kvm_cpu__irq(vcpu, POWER7_EXT_IRQ, 0); 73f17e5a37SMatt Evans } 74f17e5a37SMatt Evans 75f17e5a37SMatt Evans static void icp_check_ipi(struct icp_state *icp, int server) 76f17e5a37SMatt Evans { 77f17e5a37SMatt Evans struct icp_server_state *ss = icp->ss + server; 78f17e5a37SMatt Evans 79f17e5a37SMatt Evans if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) { 80f17e5a37SMatt Evans return; 81f17e5a37SMatt Evans } 82f17e5a37SMatt Evans 83f17e5a37SMatt Evans if (XISR(ss)) { 84f17e5a37SMatt Evans ics_reject(icp->ics, XISR(ss)); 85f17e5a37SMatt Evans } 86f17e5a37SMatt Evans 87f17e5a37SMatt Evans ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI; 88f17e5a37SMatt Evans ss->pending_priority = ss->mfrr; 89f17e5a37SMatt Evans cpu_irq_raise(ss->cpu); 90f17e5a37SMatt Evans } 91f17e5a37SMatt Evans 92f17e5a37SMatt Evans static void icp_resend(struct icp_state *icp, int server) 93f17e5a37SMatt Evans { 94f17e5a37SMatt Evans struct icp_server_state *ss = icp->ss + server; 95f17e5a37SMatt Evans 96f17e5a37SMatt Evans if (ss->mfrr < CPPR(ss)) { 97f17e5a37SMatt Evans icp_check_ipi(icp, server); 98f17e5a37SMatt Evans } 99f17e5a37SMatt Evans ics_resend(icp->ics); 100f17e5a37SMatt Evans } 101f17e5a37SMatt Evans 102f17e5a37SMatt Evans static void icp_set_cppr(struct icp_state *icp, int server, uint8_t cppr) 103f17e5a37SMatt Evans { 104f17e5a37SMatt Evans struct icp_server_state *ss = icp->ss + server; 105f17e5a37SMatt Evans uint8_t old_cppr; 106f17e5a37SMatt Evans uint32_t old_xisr; 107f17e5a37SMatt Evans 108f17e5a37SMatt Evans old_cppr = CPPR(ss); 109f17e5a37SMatt Evans ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24); 110f17e5a37SMatt Evans 111f17e5a37SMatt Evans if (cppr < old_cppr) { 112f17e5a37SMatt Evans if (XISR(ss) && (cppr <= ss->pending_priority)) { 113f17e5a37SMatt Evans old_xisr = XISR(ss); 114f17e5a37SMatt Evans ss->xirr &= ~XISR_MASK; /* Clear XISR */ 115f17e5a37SMatt Evans cpu_irq_lower(ss->cpu); 116f17e5a37SMatt Evans ics_reject(icp->ics, old_xisr); 117f17e5a37SMatt Evans } 118f17e5a37SMatt Evans } else { 119f17e5a37SMatt Evans if (!XISR(ss)) { 120f17e5a37SMatt Evans icp_resend(icp, server); 121f17e5a37SMatt Evans } 122f17e5a37SMatt Evans } 123f17e5a37SMatt Evans } 124f17e5a37SMatt Evans 125f17e5a37SMatt Evans static void icp_set_mfrr(struct icp_state *icp, int nr, uint8_t mfrr) 126f17e5a37SMatt Evans { 127f17e5a37SMatt Evans struct icp_server_state *ss = icp->ss + nr; 128f17e5a37SMatt Evans 129f17e5a37SMatt Evans ss->mfrr = mfrr; 130f17e5a37SMatt Evans if (mfrr < CPPR(ss)) { 131f17e5a37SMatt Evans icp_check_ipi(icp, nr); 132f17e5a37SMatt Evans } 133f17e5a37SMatt Evans } 134f17e5a37SMatt Evans 135f17e5a37SMatt Evans static uint32_t icp_accept(struct icp_server_state *ss) 136f17e5a37SMatt Evans { 137f17e5a37SMatt Evans uint32_t xirr; 138f17e5a37SMatt Evans 139f17e5a37SMatt Evans cpu_irq_lower(ss->cpu); 140f17e5a37SMatt Evans xirr = ss->xirr; 141f17e5a37SMatt Evans ss->xirr = ss->pending_priority << 24; 142f17e5a37SMatt Evans return xirr; 143f17e5a37SMatt Evans } 144f17e5a37SMatt Evans 145f17e5a37SMatt Evans static void icp_eoi(struct icp_state *icp, int server, uint32_t xirr) 146f17e5a37SMatt Evans { 147f17e5a37SMatt Evans struct icp_server_state *ss = icp->ss + server; 148f17e5a37SMatt Evans 149f17e5a37SMatt Evans ics_eoi(icp->ics, xirr & XISR_MASK); 150f17e5a37SMatt Evans /* Send EOI -> ICS */ 151f17e5a37SMatt Evans ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); 152f17e5a37SMatt Evans if (!XISR(ss)) { 153f17e5a37SMatt Evans icp_resend(icp, server); 154f17e5a37SMatt Evans } 155f17e5a37SMatt Evans } 156f17e5a37SMatt Evans 157f17e5a37SMatt Evans static void icp_irq(struct icp_state *icp, int server, int nr, uint8_t priority) 158f17e5a37SMatt Evans { 159f17e5a37SMatt Evans struct icp_server_state *ss = icp->ss + server; 160f17e5a37SMatt Evans xics_dprintf("icp_irq(nr %d, server %d, prio 0x%x)\n", nr, server, priority); 161f17e5a37SMatt Evans if ((priority >= CPPR(ss)) 162f17e5a37SMatt Evans || (XISR(ss) && (ss->pending_priority <= priority))) { 163f17e5a37SMatt Evans xics_dprintf("reject %d, CPPR 0x%x, XISR 0x%x, pprio 0x%x, prio 0x%x\n", 164f17e5a37SMatt Evans nr, CPPR(ss), XISR(ss), ss->pending_priority, priority); 165f17e5a37SMatt Evans ics_reject(icp->ics, nr); 166f17e5a37SMatt Evans } else { 167f17e5a37SMatt Evans if (XISR(ss)) { 168f17e5a37SMatt Evans xics_dprintf("reject %d, CPPR 0x%x, XISR 0x%x, pprio 0x%x, prio 0x%x\n", 169f17e5a37SMatt Evans nr, CPPR(ss), XISR(ss), ss->pending_priority, priority); 170f17e5a37SMatt Evans ics_reject(icp->ics, XISR(ss)); 171f17e5a37SMatt Evans } 172f17e5a37SMatt Evans ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK); 173f17e5a37SMatt Evans ss->pending_priority = priority; 174f17e5a37SMatt Evans cpu_irq_raise(ss->cpu); 175f17e5a37SMatt Evans } 176f17e5a37SMatt Evans } 177f17e5a37SMatt Evans 178f17e5a37SMatt Evans /* 179f17e5a37SMatt Evans * ICS: Source layer 180f17e5a37SMatt Evans */ 181f17e5a37SMatt Evans 182f17e5a37SMatt Evans struct ics_irq_state { 183f17e5a37SMatt Evans int server; 184f17e5a37SMatt Evans uint8_t priority; 185f17e5a37SMatt Evans uint8_t saved_priority; 186f17e5a37SMatt Evans int rejected:1; 187f17e5a37SMatt Evans int masked_pending:1; 188f17e5a37SMatt Evans }; 189f17e5a37SMatt Evans 190f17e5a37SMatt Evans struct ics_state { 191f17e5a37SMatt Evans unsigned int nr_irqs; 192f17e5a37SMatt Evans unsigned int offset; 193f17e5a37SMatt Evans struct ics_irq_state *irqs; 194f17e5a37SMatt Evans struct icp_state *icp; 195f17e5a37SMatt Evans }; 196f17e5a37SMatt Evans 197f17e5a37SMatt Evans static int ics_valid_irq(struct ics_state *ics, uint32_t nr) 198f17e5a37SMatt Evans { 199f17e5a37SMatt Evans return (nr >= ics->offset) 200f17e5a37SMatt Evans && (nr < (ics->offset + ics->nr_irqs)); 201f17e5a37SMatt Evans } 202f17e5a37SMatt Evans 203f17e5a37SMatt Evans static void ics_set_irq_msi(struct ics_state *ics, int srcno, int val) 204f17e5a37SMatt Evans { 205f17e5a37SMatt Evans struct ics_irq_state *irq = ics->irqs + srcno; 206f17e5a37SMatt Evans 207f17e5a37SMatt Evans if (val) { 208f17e5a37SMatt Evans if (irq->priority == 0xff) { 209f17e5a37SMatt Evans xics_dprintf(" irq pri ff, masked pending\n"); 210f17e5a37SMatt Evans irq->masked_pending = 1; 211f17e5a37SMatt Evans } else { 212f17e5a37SMatt Evans icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 213f17e5a37SMatt Evans } 214f17e5a37SMatt Evans } 215f17e5a37SMatt Evans } 216f17e5a37SMatt Evans 217f17e5a37SMatt Evans static void ics_reject_msi(struct ics_state *ics, int nr) 218f17e5a37SMatt Evans { 219f17e5a37SMatt Evans struct ics_irq_state *irq = ics->irqs + nr - ics->offset; 220f17e5a37SMatt Evans 221f17e5a37SMatt Evans irq->rejected = 1; 222f17e5a37SMatt Evans } 223f17e5a37SMatt Evans 224f17e5a37SMatt Evans static void ics_resend_msi(struct ics_state *ics) 225f17e5a37SMatt Evans { 226f17e5a37SMatt Evans unsigned int i; 227f17e5a37SMatt Evans 228f17e5a37SMatt Evans for (i = 0; i < ics->nr_irqs; i++) { 229f17e5a37SMatt Evans struct ics_irq_state *irq = ics->irqs + i; 230f17e5a37SMatt Evans 231f17e5a37SMatt Evans /* FIXME: filter by server#? */ 232f17e5a37SMatt Evans if (irq->rejected) { 233f17e5a37SMatt Evans irq->rejected = 0; 234f17e5a37SMatt Evans if (irq->priority != 0xff) { 235f17e5a37SMatt Evans icp_irq(ics->icp, irq->server, i + ics->offset, irq->priority); 236f17e5a37SMatt Evans } 237f17e5a37SMatt Evans } 238f17e5a37SMatt Evans } 239f17e5a37SMatt Evans } 240f17e5a37SMatt Evans 241f17e5a37SMatt Evans static void ics_write_xive_msi(struct ics_state *ics, int nr, int server, 242f17e5a37SMatt Evans uint8_t priority) 243f17e5a37SMatt Evans { 244f17e5a37SMatt Evans struct ics_irq_state *irq = ics->irqs + nr - ics->offset; 245f17e5a37SMatt Evans 246f17e5a37SMatt Evans irq->server = server; 247f17e5a37SMatt Evans irq->priority = priority; 248f17e5a37SMatt Evans xics_dprintf("ics_write_xive_msi(nr %d, server %d, pri 0x%x)\n", nr, server, priority); 249f17e5a37SMatt Evans 250f17e5a37SMatt Evans if (!irq->masked_pending || (priority == 0xff)) { 251f17e5a37SMatt Evans return; 252f17e5a37SMatt Evans } 253f17e5a37SMatt Evans 254f17e5a37SMatt Evans irq->masked_pending = 0; 255f17e5a37SMatt Evans icp_irq(ics->icp, server, nr, priority); 256f17e5a37SMatt Evans } 257f17e5a37SMatt Evans 258f17e5a37SMatt Evans static void ics_reject(struct ics_state *ics, int nr) 259f17e5a37SMatt Evans { 260f17e5a37SMatt Evans ics_reject_msi(ics, nr); 261f17e5a37SMatt Evans } 262f17e5a37SMatt Evans 263f17e5a37SMatt Evans static void ics_resend(struct ics_state *ics) 264f17e5a37SMatt Evans { 265f17e5a37SMatt Evans ics_resend_msi(ics); 266f17e5a37SMatt Evans } 267f17e5a37SMatt Evans 268f17e5a37SMatt Evans static void ics_eoi(struct ics_state *ics, int nr) 269f17e5a37SMatt Evans { 270f17e5a37SMatt Evans } 271f17e5a37SMatt Evans 272f17e5a37SMatt Evans /* 273f17e5a37SMatt Evans * Exported functions 274f17e5a37SMatt Evans */ 275f17e5a37SMatt Evans 276f17e5a37SMatt Evans static int allocated_irqnum = XICS_IRQ_OFFSET; 277f17e5a37SMatt Evans 278f17e5a37SMatt Evans /* 279f17e5a37SMatt Evans * xics_alloc_irqnum(): This is hacky. The problem boils down to the PCI device 280f17e5a37SMatt Evans * code which just calls kvm__irq_line( .. pcidev->pci_hdr.irq_line ..) at will. 281f17e5a37SMatt Evans * Each PCI device's IRQ line is allocated by irq__register_device() (which 282f17e5a37SMatt Evans * allocates an IRQ AND allocates a.. PCI device num..). 283f17e5a37SMatt Evans * 284f17e5a37SMatt Evans * In future I'd like to at least mimic some kind of 'upstream IRQ controller' 285f17e5a37SMatt Evans * whereby PCI devices let their PHB know when they want to IRQ, and that 286f17e5a37SMatt Evans * percolates up. 287f17e5a37SMatt Evans * 288f17e5a37SMatt Evans * For now, allocate a REAL xics irq number and (via irq__register_device) push 289f17e5a37SMatt Evans * that into the config space. 8 bits only though! 290f17e5a37SMatt Evans */ 291f17e5a37SMatt Evans int xics_alloc_irqnum(void) 292f17e5a37SMatt Evans { 293f17e5a37SMatt Evans int irq = allocated_irqnum++; 294f17e5a37SMatt Evans 295f17e5a37SMatt Evans if (irq > 255) 296f17e5a37SMatt Evans die("Huge numbers of IRQs aren't supported with the daft kvmtool IRQ system."); 297f17e5a37SMatt Evans 298f17e5a37SMatt Evans return irq; 299f17e5a37SMatt Evans } 300f17e5a37SMatt Evans 301f17e5a37SMatt Evans static target_ulong h_cppr(struct kvm_cpu *vcpu, 302f17e5a37SMatt Evans target_ulong opcode, target_ulong *args) 303f17e5a37SMatt Evans { 304f17e5a37SMatt Evans target_ulong cppr = args[0]; 305f17e5a37SMatt Evans 306f17e5a37SMatt Evans xics_dprintf("h_cppr(%lx)\n", cppr); 30742ac24f9SSasha Levin icp_set_cppr(vcpu->kvm->arch.icp, vcpu->cpu_id, cppr); 308f17e5a37SMatt Evans return H_SUCCESS; 309f17e5a37SMatt Evans } 310f17e5a37SMatt Evans 311f17e5a37SMatt Evans static target_ulong h_ipi(struct kvm_cpu *vcpu, 312f17e5a37SMatt Evans target_ulong opcode, target_ulong *args) 313f17e5a37SMatt Evans { 314f17e5a37SMatt Evans target_ulong server = args[0]; 315f17e5a37SMatt Evans target_ulong mfrr = args[1]; 316f17e5a37SMatt Evans 317f17e5a37SMatt Evans xics_dprintf("h_ipi(%lx, %lx)\n", server, mfrr); 31842ac24f9SSasha Levin if (server >= vcpu->kvm->arch.icp->nr_servers) { 319f17e5a37SMatt Evans return H_PARAMETER; 320f17e5a37SMatt Evans } 321f17e5a37SMatt Evans 32242ac24f9SSasha Levin icp_set_mfrr(vcpu->kvm->arch.icp, server, mfrr); 323f17e5a37SMatt Evans return H_SUCCESS; 324f17e5a37SMatt Evans } 325f17e5a37SMatt Evans 326f17e5a37SMatt Evans static target_ulong h_xirr(struct kvm_cpu *vcpu, 327f17e5a37SMatt Evans target_ulong opcode, target_ulong *args) 328f17e5a37SMatt Evans { 32942ac24f9SSasha Levin uint32_t xirr = icp_accept(vcpu->kvm->arch.icp->ss + vcpu->cpu_id); 330f17e5a37SMatt Evans 331f17e5a37SMatt Evans xics_dprintf("h_xirr() = %x\n", xirr); 332f17e5a37SMatt Evans args[0] = xirr; 333f17e5a37SMatt Evans return H_SUCCESS; 334f17e5a37SMatt Evans } 335f17e5a37SMatt Evans 336f17e5a37SMatt Evans static target_ulong h_eoi(struct kvm_cpu *vcpu, 337f17e5a37SMatt Evans target_ulong opcode, target_ulong *args) 338f17e5a37SMatt Evans { 339f17e5a37SMatt Evans target_ulong xirr = args[0]; 340f17e5a37SMatt Evans 341f17e5a37SMatt Evans xics_dprintf("h_eoi(%lx)\n", xirr); 34242ac24f9SSasha Levin icp_eoi(vcpu->kvm->arch.icp, vcpu->cpu_id, xirr); 343f17e5a37SMatt Evans return H_SUCCESS; 344f17e5a37SMatt Evans } 345f17e5a37SMatt Evans 346f17e5a37SMatt Evans static void rtas_set_xive(struct kvm_cpu *vcpu, uint32_t token, 347f17e5a37SMatt Evans uint32_t nargs, target_ulong args, 348f17e5a37SMatt Evans uint32_t nret, target_ulong rets) 349f17e5a37SMatt Evans { 35042ac24f9SSasha Levin struct ics_state *ics = vcpu->kvm->arch.icp->ics; 351f17e5a37SMatt Evans uint32_t nr, server, priority; 352f17e5a37SMatt Evans 353f17e5a37SMatt Evans if ((nargs != 3) || (nret != 1)) { 354f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, -3); 355f17e5a37SMatt Evans return; 356f17e5a37SMatt Evans } 357f17e5a37SMatt Evans 358f17e5a37SMatt Evans nr = rtas_ld(vcpu->kvm, args, 0); 359f17e5a37SMatt Evans server = rtas_ld(vcpu->kvm, args, 1); 360f17e5a37SMatt Evans priority = rtas_ld(vcpu->kvm, args, 2); 361f17e5a37SMatt Evans 362f17e5a37SMatt Evans xics_dprintf("rtas_set_xive(%x,%x,%x)\n", nr, server, priority); 363f17e5a37SMatt Evans if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers) 364f17e5a37SMatt Evans || (priority > 0xff)) { 365f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, -3); 366f17e5a37SMatt Evans return; 367f17e5a37SMatt Evans } 368f17e5a37SMatt Evans 369f17e5a37SMatt Evans ics_write_xive_msi(ics, nr, server, priority); 370f17e5a37SMatt Evans 371f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, 0); /* Success */ 372f17e5a37SMatt Evans } 373f17e5a37SMatt Evans 374f17e5a37SMatt Evans static void rtas_get_xive(struct kvm_cpu *vcpu, uint32_t token, 375f17e5a37SMatt Evans uint32_t nargs, target_ulong args, 376f17e5a37SMatt Evans uint32_t nret, target_ulong rets) 377f17e5a37SMatt Evans { 37842ac24f9SSasha Levin struct ics_state *ics = vcpu->kvm->arch.icp->ics; 379f17e5a37SMatt Evans uint32_t nr; 380f17e5a37SMatt Evans 381f17e5a37SMatt Evans if ((nargs != 1) || (nret != 3)) { 382f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, -3); 383f17e5a37SMatt Evans return; 384f17e5a37SMatt Evans } 385f17e5a37SMatt Evans 386f17e5a37SMatt Evans nr = rtas_ld(vcpu->kvm, args, 0); 387f17e5a37SMatt Evans 388f17e5a37SMatt Evans if (!ics_valid_irq(ics, nr)) { 389f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, -3); 390f17e5a37SMatt Evans return; 391f17e5a37SMatt Evans } 392f17e5a37SMatt Evans 393f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, 0); /* Success */ 394f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 1, ics->irqs[nr - ics->offset].server); 395f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 2, ics->irqs[nr - ics->offset].priority); 396f17e5a37SMatt Evans } 397f17e5a37SMatt Evans 398f17e5a37SMatt Evans static void rtas_int_off(struct kvm_cpu *vcpu, uint32_t token, 399f17e5a37SMatt Evans uint32_t nargs, target_ulong args, 400f17e5a37SMatt Evans uint32_t nret, target_ulong rets) 401f17e5a37SMatt Evans { 40242ac24f9SSasha Levin struct ics_state *ics = vcpu->kvm->arch.icp->ics; 403f17e5a37SMatt Evans uint32_t nr; 404f17e5a37SMatt Evans 405f17e5a37SMatt Evans if ((nargs != 1) || (nret != 1)) { 406f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, -3); 407f17e5a37SMatt Evans return; 408f17e5a37SMatt Evans } 409f17e5a37SMatt Evans 410f17e5a37SMatt Evans nr = rtas_ld(vcpu->kvm, args, 0); 411f17e5a37SMatt Evans 412f17e5a37SMatt Evans if (!ics_valid_irq(ics, nr)) { 413f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, -3); 414f17e5a37SMatt Evans return; 415f17e5a37SMatt Evans } 416f17e5a37SMatt Evans 417f17e5a37SMatt Evans /* ME: QEMU wrote xive_msi here, in #if 0. Deleted. */ 418f17e5a37SMatt Evans 419f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, 0); /* Success */ 420f17e5a37SMatt Evans } 421f17e5a37SMatt Evans 422f17e5a37SMatt Evans static void rtas_int_on(struct kvm_cpu *vcpu, uint32_t token, 423f17e5a37SMatt Evans uint32_t nargs, target_ulong args, 424f17e5a37SMatt Evans uint32_t nret, target_ulong rets) 425f17e5a37SMatt Evans { 42642ac24f9SSasha Levin struct ics_state *ics = vcpu->kvm->arch.icp->ics; 427f17e5a37SMatt Evans uint32_t nr; 428f17e5a37SMatt Evans 429f17e5a37SMatt Evans if ((nargs != 1) || (nret != 1)) { 430f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, -3); 431f17e5a37SMatt Evans return; 432f17e5a37SMatt Evans } 433f17e5a37SMatt Evans 434f17e5a37SMatt Evans nr = rtas_ld(vcpu->kvm, args, 0); 435f17e5a37SMatt Evans 436f17e5a37SMatt Evans if (!ics_valid_irq(ics, nr)) { 437f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, -3); 438f17e5a37SMatt Evans return; 439f17e5a37SMatt Evans } 440f17e5a37SMatt Evans 441f17e5a37SMatt Evans /* ME: QEMU wrote xive_msi here, in #if 0. Deleted. */ 442f17e5a37SMatt Evans 443f17e5a37SMatt Evans rtas_st(vcpu->kvm, rets, 0, 0); /* Success */ 444f17e5a37SMatt Evans } 445f17e5a37SMatt Evans 446e016c60dSMichael Ellerman static int xics_init(struct kvm *kvm) 447f17e5a37SMatt Evans { 448f17e5a37SMatt Evans unsigned int i; 449f17e5a37SMatt Evans struct icp_state *icp; 450f17e5a37SMatt Evans struct ics_state *ics; 451e016c60dSMichael Ellerman int j; 452f17e5a37SMatt Evans 453f17e5a37SMatt Evans icp = malloc(sizeof(*icp)); 454*e54ababaSMichael Ellerman icp->nr_servers = kvm->nrcpus; 455f17e5a37SMatt Evans icp->ss = malloc(icp->nr_servers * sizeof(struct icp_server_state)); 456f17e5a37SMatt Evans 457f17e5a37SMatt Evans for (i = 0; i < icp->nr_servers; i++) { 458f17e5a37SMatt Evans icp->ss[i].xirr = 0; 459f17e5a37SMatt Evans icp->ss[i].pending_priority = 0; 460f17e5a37SMatt Evans icp->ss[i].cpu = 0; 461f17e5a37SMatt Evans icp->ss[i].mfrr = 0xff; 462f17e5a37SMatt Evans } 463f17e5a37SMatt Evans 464f17e5a37SMatt Evans /* 465f17e5a37SMatt Evans * icp->ss[env->cpu_index].cpu is set by CPUs calling in to 466f17e5a37SMatt Evans * xics_cpu_register(). 467f17e5a37SMatt Evans */ 468f17e5a37SMatt Evans 469f17e5a37SMatt Evans ics = malloc(sizeof(*ics)); 470e016c60dSMichael Ellerman ics->nr_irqs = XICS_NUM_IRQS; 471f17e5a37SMatt Evans ics->offset = XICS_IRQ_OFFSET; 472e016c60dSMichael Ellerman ics->irqs = malloc(ics->nr_irqs * sizeof(struct ics_irq_state)); 473f17e5a37SMatt Evans 474f17e5a37SMatt Evans icp->ics = ics; 475f17e5a37SMatt Evans ics->icp = icp; 476f17e5a37SMatt Evans 477e016c60dSMichael Ellerman for (i = 0; i < ics->nr_irqs; i++) { 478f17e5a37SMatt Evans ics->irqs[i].server = 0; 479f17e5a37SMatt Evans ics->irqs[i].priority = 0xff; 480f17e5a37SMatt Evans ics->irqs[i].saved_priority = 0xff; 481f17e5a37SMatt Evans ics->irqs[i].rejected = 0; 482f17e5a37SMatt Evans ics->irqs[i].masked_pending = 0; 483f17e5a37SMatt Evans } 484f17e5a37SMatt Evans 485f17e5a37SMatt Evans spapr_register_hypercall(H_CPPR, h_cppr); 486f17e5a37SMatt Evans spapr_register_hypercall(H_IPI, h_ipi); 487f17e5a37SMatt Evans spapr_register_hypercall(H_XIRR, h_xirr); 488f17e5a37SMatt Evans spapr_register_hypercall(H_EOI, h_eoi); 489f17e5a37SMatt Evans 490f17e5a37SMatt Evans spapr_rtas_register("ibm,set-xive", rtas_set_xive); 491f17e5a37SMatt Evans spapr_rtas_register("ibm,get-xive", rtas_get_xive); 492f17e5a37SMatt Evans spapr_rtas_register("ibm,int-off", rtas_int_off); 493f17e5a37SMatt Evans spapr_rtas_register("ibm,int-on", rtas_int_on); 494f17e5a37SMatt Evans 495e016c60dSMichael Ellerman for (j = 0; j < kvm->nrcpus; j++) { 496e016c60dSMichael Ellerman struct kvm_cpu *vcpu = kvm->cpus[j]; 497e016c60dSMichael Ellerman 498e016c60dSMichael Ellerman if (vcpu->cpu_id >= icp->nr_servers) 499e016c60dSMichael Ellerman die("Invalid server number for cpuid %ld\n", vcpu->cpu_id); 500e016c60dSMichael Ellerman 501e016c60dSMichael Ellerman icp->ss[vcpu->cpu_id].cpu = vcpu; 502f17e5a37SMatt Evans } 503f17e5a37SMatt Evans 504e016c60dSMichael Ellerman kvm->arch.icp = icp; 505e016c60dSMichael Ellerman 506e016c60dSMichael Ellerman return 0; 507e016c60dSMichael Ellerman } 508e016c60dSMichael Ellerman base_init(xics_init); 509e016c60dSMichael Ellerman 510e016c60dSMichael Ellerman 511f17e5a37SMatt Evans void kvm__irq_line(struct kvm *kvm, int irq, int level) 512f17e5a37SMatt Evans { 513f17e5a37SMatt Evans /* 514f17e5a37SMatt Evans * Route event to ICS, which routes to ICP, which eventually does a 515f17e5a37SMatt Evans * kvm_cpu__irq(vcpu, POWER7_EXT_IRQ, 1) 516f17e5a37SMatt Evans */ 517f17e5a37SMatt Evans xics_dprintf("Raising IRQ %d -> %d\n", irq, level); 51842ac24f9SSasha Levin ics_set_irq_msi(kvm->arch.icp->ics, irq - kvm->arch.icp->ics->offset, level); 519f17e5a37SMatt Evans } 520