xref: /linux/arch/powerpc/kvm/book3s_xics.c (revision 5af50993850a48ba749b122173d789ea90976c72)
1bc5ad3f3SBenjamin Herrenschmidt /*
2bc5ad3f3SBenjamin Herrenschmidt  * Copyright 2012 Michael Ellerman, IBM Corporation.
3bc5ad3f3SBenjamin Herrenschmidt  * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
4bc5ad3f3SBenjamin Herrenschmidt  *
5bc5ad3f3SBenjamin Herrenschmidt  * This program is free software; you can redistribute it and/or modify
6bc5ad3f3SBenjamin Herrenschmidt  * it under the terms of the GNU General Public License, version 2, as
7bc5ad3f3SBenjamin Herrenschmidt  * published by the Free Software Foundation.
8bc5ad3f3SBenjamin Herrenschmidt  */
9bc5ad3f3SBenjamin Herrenschmidt 
10bc5ad3f3SBenjamin Herrenschmidt #include <linux/kernel.h>
11bc5ad3f3SBenjamin Herrenschmidt #include <linux/kvm_host.h>
12bc5ad3f3SBenjamin Herrenschmidt #include <linux/err.h>
13bc5ad3f3SBenjamin Herrenschmidt #include <linux/gfp.h>
145975a2e0SPaul Mackerras #include <linux/anon_inodes.h>
15433c5c20SMichael Ellerman #include <linux/spinlock.h>
16bc5ad3f3SBenjamin Herrenschmidt 
177c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
18bc5ad3f3SBenjamin Herrenschmidt #include <asm/kvm_book3s.h>
19bc5ad3f3SBenjamin Herrenschmidt #include <asm/kvm_ppc.h>
20bc5ad3f3SBenjamin Herrenschmidt #include <asm/hvcall.h>
21bc5ad3f3SBenjamin Herrenschmidt #include <asm/xics.h>
22bc5ad3f3SBenjamin Herrenschmidt #include <asm/debug.h>
237bfa9ad5SPaul Mackerras #include <asm/time.h>
24bc5ad3f3SBenjamin Herrenschmidt 
25bc5ad3f3SBenjamin Herrenschmidt #include <linux/debugfs.h>
26bc5ad3f3SBenjamin Herrenschmidt #include <linux/seq_file.h>
27bc5ad3f3SBenjamin Herrenschmidt 
28bc5ad3f3SBenjamin Herrenschmidt #include "book3s_xics.h"
29bc5ad3f3SBenjamin Herrenschmidt 
30bc5ad3f3SBenjamin Herrenschmidt #if 1
31bc5ad3f3SBenjamin Herrenschmidt #define XICS_DBG(fmt...) do { } while (0)
32bc5ad3f3SBenjamin Herrenschmidt #else
33bc5ad3f3SBenjamin Herrenschmidt #define XICS_DBG(fmt...) trace_printk(fmt)
34bc5ad3f3SBenjamin Herrenschmidt #endif
35bc5ad3f3SBenjamin Herrenschmidt 
36e7d26f28SBenjamin Herrenschmidt #define ENABLE_REALMODE	true
37e7d26f28SBenjamin Herrenschmidt #define DEBUG_REALMODE	false
38e7d26f28SBenjamin Herrenschmidt 
39bc5ad3f3SBenjamin Herrenschmidt /*
40bc5ad3f3SBenjamin Herrenschmidt  * LOCKING
41bc5ad3f3SBenjamin Herrenschmidt  * =======
42bc5ad3f3SBenjamin Herrenschmidt  *
4334cb7954SSuresh Warrier  * Each ICS has a spin lock protecting the information about the IRQ
444e33d1f0SGreg Kurz  * sources and avoiding simultaneous deliveries of the same interrupt.
45bc5ad3f3SBenjamin Herrenschmidt  *
46bc5ad3f3SBenjamin Herrenschmidt  * ICP operations are done via a single compare & swap transaction
47bc5ad3f3SBenjamin Herrenschmidt  * (most ICP state fits in the union kvmppc_icp_state)
48bc5ad3f3SBenjamin Herrenschmidt  */
49bc5ad3f3SBenjamin Herrenschmidt 
50bc5ad3f3SBenjamin Herrenschmidt /*
51bc5ad3f3SBenjamin Herrenschmidt  * TODO
52bc5ad3f3SBenjamin Herrenschmidt  * ====
53bc5ad3f3SBenjamin Herrenschmidt  *
54bc5ad3f3SBenjamin Herrenschmidt  * - To speed up resends, keep a bitmap of "resend" set bits in the
55bc5ad3f3SBenjamin Herrenschmidt  *   ICS
56bc5ad3f3SBenjamin Herrenschmidt  *
57bc5ad3f3SBenjamin Herrenschmidt  * - Speed up server# -> ICP lookup (array ? hash table ?)
58bc5ad3f3SBenjamin Herrenschmidt  *
59bc5ad3f3SBenjamin Herrenschmidt  * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
60bc5ad3f3SBenjamin Herrenschmidt  *   locks array to improve scalability
61bc5ad3f3SBenjamin Herrenschmidt  */
62bc5ad3f3SBenjamin Herrenschmidt 
63bc5ad3f3SBenjamin Herrenschmidt /* -- ICS routines -- */
64bc5ad3f3SBenjamin Herrenschmidt 
65bc5ad3f3SBenjamin Herrenschmidt static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
6621acd0e4SLi Zhong 			    u32 new_irq, bool check_resend);
67bc5ad3f3SBenjamin Herrenschmidt 
6825a2150bSPaul Mackerras /*
6925a2150bSPaul Mackerras  * Return value ideally indicates how the interrupt was handled, but no
7025a2150bSPaul Mackerras  * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
7125a2150bSPaul Mackerras  * so just return 0.
7225a2150bSPaul Mackerras  */
7325a2150bSPaul Mackerras static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
74bc5ad3f3SBenjamin Herrenschmidt {
75bc5ad3f3SBenjamin Herrenschmidt 	struct ics_irq_state *state;
76bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
77bc5ad3f3SBenjamin Herrenschmidt 	u16 src;
7817d48610SLi Zhong 	u32 pq_old, pq_new;
79bc5ad3f3SBenjamin Herrenschmidt 
80bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
81bc5ad3f3SBenjamin Herrenschmidt 
82bc5ad3f3SBenjamin Herrenschmidt 	ics = kvmppc_xics_find_ics(xics, irq, &src);
83bc5ad3f3SBenjamin Herrenschmidt 	if (!ics) {
84bc5ad3f3SBenjamin Herrenschmidt 		XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
85bc5ad3f3SBenjamin Herrenschmidt 		return -EINVAL;
86bc5ad3f3SBenjamin Herrenschmidt 	}
87bc5ad3f3SBenjamin Herrenschmidt 	state = &ics->irq_state[src];
88bc5ad3f3SBenjamin Herrenschmidt 	if (!state->exists)
89bc5ad3f3SBenjamin Herrenschmidt 		return -EINVAL;
90bc5ad3f3SBenjamin Herrenschmidt 
9117d48610SLi Zhong 	if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET)
9217d48610SLi Zhong 		level = 1;
9317d48610SLi Zhong 	else if (level == KVM_INTERRUPT_UNSET)
9417d48610SLi Zhong 		level = 0;
95bc5ad3f3SBenjamin Herrenschmidt 	/*
9617d48610SLi Zhong 	 * Take other values the same as 1, consistent with original code.
9717d48610SLi Zhong 	 * maybe WARN here?
98bc5ad3f3SBenjamin Herrenschmidt 	 */
9917d48610SLi Zhong 
10017d48610SLi Zhong 	if (!state->lsi && level == 0) /* noop for MSI */
101bc5ad3f3SBenjamin Herrenschmidt 		return 0;
10217d48610SLi Zhong 
10317d48610SLi Zhong 	do {
10417d48610SLi Zhong 		pq_old = state->pq_state;
10517d48610SLi Zhong 		if (state->lsi) {
10617d48610SLi Zhong 			if (level) {
10717d48610SLi Zhong 				if (pq_old & PQ_PRESENTED)
10817d48610SLi Zhong 					/* Setting already set LSI ... */
10917d48610SLi Zhong 					return 0;
11017d48610SLi Zhong 
11117d48610SLi Zhong 				pq_new = PQ_PRESENTED;
11217d48610SLi Zhong 			} else
11317d48610SLi Zhong 				pq_new = 0;
11417d48610SLi Zhong 		} else
11517d48610SLi Zhong 			pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
11617d48610SLi Zhong 	} while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
11717d48610SLi Zhong 
11817d48610SLi Zhong 	/* Test P=1, Q=0, this is the only case where we present */
11917d48610SLi Zhong 	if (pq_new == PQ_PRESENTED)
12021acd0e4SLi Zhong 		icp_deliver_irq(xics, NULL, irq, false);
121bc5ad3f3SBenjamin Herrenschmidt 
1225d375199SPaul Mackerras 	/* Record which CPU this arrived on for passed-through interrupts */
1235d375199SPaul Mackerras 	if (state->host_irq)
1245d375199SPaul Mackerras 		state->intr_cpu = raw_smp_processor_id();
1255d375199SPaul Mackerras 
12625a2150bSPaul Mackerras 	return 0;
127bc5ad3f3SBenjamin Herrenschmidt }
128bc5ad3f3SBenjamin Herrenschmidt 
129bc5ad3f3SBenjamin Herrenschmidt static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
130bc5ad3f3SBenjamin Herrenschmidt 			     struct kvmppc_icp *icp)
131bc5ad3f3SBenjamin Herrenschmidt {
132bc5ad3f3SBenjamin Herrenschmidt 	int i;
133bc5ad3f3SBenjamin Herrenschmidt 
134bc5ad3f3SBenjamin Herrenschmidt 	for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
135bc5ad3f3SBenjamin Herrenschmidt 		struct ics_irq_state *state = &ics->irq_state[i];
13621acd0e4SLi Zhong 		if (state->resend) {
137bc5ad3f3SBenjamin Herrenschmidt 			XICS_DBG("resend %#x prio %#x\n", state->number,
138bc5ad3f3SBenjamin Herrenschmidt 				      state->priority);
13921acd0e4SLi Zhong 			icp_deliver_irq(xics, icp, state->number, true);
140bc5ad3f3SBenjamin Herrenschmidt 		}
14121acd0e4SLi Zhong 	}
142bc5ad3f3SBenjamin Herrenschmidt }
143bc5ad3f3SBenjamin Herrenschmidt 
144d19bd862SPaul Mackerras static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
145d19bd862SPaul Mackerras 		       struct ics_irq_state *state,
146d19bd862SPaul Mackerras 		       u32 server, u32 priority, u32 saved_priority)
147d19bd862SPaul Mackerras {
148d19bd862SPaul Mackerras 	bool deliver;
14934cb7954SSuresh Warrier 	unsigned long flags;
150d19bd862SPaul Mackerras 
15134cb7954SSuresh Warrier 	local_irq_save(flags);
15234cb7954SSuresh Warrier 	arch_spin_lock(&ics->lock);
153d19bd862SPaul Mackerras 
154d19bd862SPaul Mackerras 	state->server = server;
155d19bd862SPaul Mackerras 	state->priority = priority;
156d19bd862SPaul Mackerras 	state->saved_priority = saved_priority;
157d19bd862SPaul Mackerras 	deliver = false;
158d19bd862SPaul Mackerras 	if ((state->masked_pending || state->resend) && priority != MASKED) {
159d19bd862SPaul Mackerras 		state->masked_pending = 0;
160bf5a71d5SLi Zhong 		state->resend = 0;
161d19bd862SPaul Mackerras 		deliver = true;
162d19bd862SPaul Mackerras 	}
163d19bd862SPaul Mackerras 
16434cb7954SSuresh Warrier 	arch_spin_unlock(&ics->lock);
16534cb7954SSuresh Warrier 	local_irq_restore(flags);
166d19bd862SPaul Mackerras 
167d19bd862SPaul Mackerras 	return deliver;
168d19bd862SPaul Mackerras }
169d19bd862SPaul Mackerras 
170bc5ad3f3SBenjamin Herrenschmidt int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
171bc5ad3f3SBenjamin Herrenschmidt {
172bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = kvm->arch.xics;
173bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp;
174bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
175bc5ad3f3SBenjamin Herrenschmidt 	struct ics_irq_state *state;
176bc5ad3f3SBenjamin Herrenschmidt 	u16 src;
177bc5ad3f3SBenjamin Herrenschmidt 
178bc5ad3f3SBenjamin Herrenschmidt 	if (!xics)
179bc5ad3f3SBenjamin Herrenschmidt 		return -ENODEV;
180bc5ad3f3SBenjamin Herrenschmidt 
181bc5ad3f3SBenjamin Herrenschmidt 	ics = kvmppc_xics_find_ics(xics, irq, &src);
182bc5ad3f3SBenjamin Herrenschmidt 	if (!ics)
183bc5ad3f3SBenjamin Herrenschmidt 		return -EINVAL;
184bc5ad3f3SBenjamin Herrenschmidt 	state = &ics->irq_state[src];
185bc5ad3f3SBenjamin Herrenschmidt 
186bc5ad3f3SBenjamin Herrenschmidt 	icp = kvmppc_xics_find_server(kvm, server);
187bc5ad3f3SBenjamin Herrenschmidt 	if (!icp)
188bc5ad3f3SBenjamin Herrenschmidt 		return -EINVAL;
189bc5ad3f3SBenjamin Herrenschmidt 
190bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
191bc5ad3f3SBenjamin Herrenschmidt 		 irq, server, priority,
192bc5ad3f3SBenjamin Herrenschmidt 		 state->masked_pending, state->resend);
193bc5ad3f3SBenjamin Herrenschmidt 
194d19bd862SPaul Mackerras 	if (write_xive(xics, ics, state, server, priority, priority))
19521acd0e4SLi Zhong 		icp_deliver_irq(xics, icp, irq, false);
196bc5ad3f3SBenjamin Herrenschmidt 
197bc5ad3f3SBenjamin Herrenschmidt 	return 0;
198bc5ad3f3SBenjamin Herrenschmidt }
199bc5ad3f3SBenjamin Herrenschmidt 
200bc5ad3f3SBenjamin Herrenschmidt int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
201bc5ad3f3SBenjamin Herrenschmidt {
202bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = kvm->arch.xics;
203bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
204bc5ad3f3SBenjamin Herrenschmidt 	struct ics_irq_state *state;
205bc5ad3f3SBenjamin Herrenschmidt 	u16 src;
20634cb7954SSuresh Warrier 	unsigned long flags;
207bc5ad3f3SBenjamin Herrenschmidt 
208bc5ad3f3SBenjamin Herrenschmidt 	if (!xics)
209bc5ad3f3SBenjamin Herrenschmidt 		return -ENODEV;
210bc5ad3f3SBenjamin Herrenschmidt 
211bc5ad3f3SBenjamin Herrenschmidt 	ics = kvmppc_xics_find_ics(xics, irq, &src);
212bc5ad3f3SBenjamin Herrenschmidt 	if (!ics)
213bc5ad3f3SBenjamin Herrenschmidt 		return -EINVAL;
214bc5ad3f3SBenjamin Herrenschmidt 	state = &ics->irq_state[src];
215bc5ad3f3SBenjamin Herrenschmidt 
21634cb7954SSuresh Warrier 	local_irq_save(flags);
21734cb7954SSuresh Warrier 	arch_spin_lock(&ics->lock);
218bc5ad3f3SBenjamin Herrenschmidt 	*server = state->server;
219bc5ad3f3SBenjamin Herrenschmidt 	*priority = state->priority;
22034cb7954SSuresh Warrier 	arch_spin_unlock(&ics->lock);
22134cb7954SSuresh Warrier 	local_irq_restore(flags);
222bc5ad3f3SBenjamin Herrenschmidt 
223bc5ad3f3SBenjamin Herrenschmidt 	return 0;
224bc5ad3f3SBenjamin Herrenschmidt }
225bc5ad3f3SBenjamin Herrenschmidt 
226d19bd862SPaul Mackerras int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
227d19bd862SPaul Mackerras {
228d19bd862SPaul Mackerras 	struct kvmppc_xics *xics = kvm->arch.xics;
229d19bd862SPaul Mackerras 	struct kvmppc_icp *icp;
230d19bd862SPaul Mackerras 	struct kvmppc_ics *ics;
231d19bd862SPaul Mackerras 	struct ics_irq_state *state;
232d19bd862SPaul Mackerras 	u16 src;
233d19bd862SPaul Mackerras 
234d19bd862SPaul Mackerras 	if (!xics)
235d19bd862SPaul Mackerras 		return -ENODEV;
236d19bd862SPaul Mackerras 
237d19bd862SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &src);
238d19bd862SPaul Mackerras 	if (!ics)
239d19bd862SPaul Mackerras 		return -EINVAL;
240d19bd862SPaul Mackerras 	state = &ics->irq_state[src];
241d19bd862SPaul Mackerras 
242d19bd862SPaul Mackerras 	icp = kvmppc_xics_find_server(kvm, state->server);
243d19bd862SPaul Mackerras 	if (!icp)
244d19bd862SPaul Mackerras 		return -EINVAL;
245d19bd862SPaul Mackerras 
246d19bd862SPaul Mackerras 	if (write_xive(xics, ics, state, state->server, state->saved_priority,
247d19bd862SPaul Mackerras 		       state->saved_priority))
24821acd0e4SLi Zhong 		icp_deliver_irq(xics, icp, irq, false);
249d19bd862SPaul Mackerras 
250d19bd862SPaul Mackerras 	return 0;
251d19bd862SPaul Mackerras }
252d19bd862SPaul Mackerras 
253d19bd862SPaul Mackerras int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
254d19bd862SPaul Mackerras {
255d19bd862SPaul Mackerras 	struct kvmppc_xics *xics = kvm->arch.xics;
256d19bd862SPaul Mackerras 	struct kvmppc_ics *ics;
257d19bd862SPaul Mackerras 	struct ics_irq_state *state;
258d19bd862SPaul Mackerras 	u16 src;
259d19bd862SPaul Mackerras 
260d19bd862SPaul Mackerras 	if (!xics)
261d19bd862SPaul Mackerras 		return -ENODEV;
262d19bd862SPaul Mackerras 
263d19bd862SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &src);
264d19bd862SPaul Mackerras 	if (!ics)
265d19bd862SPaul Mackerras 		return -EINVAL;
266d19bd862SPaul Mackerras 	state = &ics->irq_state[src];
267d19bd862SPaul Mackerras 
268d19bd862SPaul Mackerras 	write_xive(xics, ics, state, state->server, MASKED, state->priority);
269d19bd862SPaul Mackerras 
270d19bd862SPaul Mackerras 	return 0;
271d19bd862SPaul Mackerras }
272d19bd862SPaul Mackerras 
273bc5ad3f3SBenjamin Herrenschmidt /* -- ICP routines, including hcalls -- */
274bc5ad3f3SBenjamin Herrenschmidt 
275bc5ad3f3SBenjamin Herrenschmidt static inline bool icp_try_update(struct kvmppc_icp *icp,
276bc5ad3f3SBenjamin Herrenschmidt 				  union kvmppc_icp_state old,
277bc5ad3f3SBenjamin Herrenschmidt 				  union kvmppc_icp_state new,
278bc5ad3f3SBenjamin Herrenschmidt 				  bool change_self)
279bc5ad3f3SBenjamin Herrenschmidt {
280bc5ad3f3SBenjamin Herrenschmidt 	bool success;
281bc5ad3f3SBenjamin Herrenschmidt 
282bc5ad3f3SBenjamin Herrenschmidt 	/* Calculate new output value */
283bc5ad3f3SBenjamin Herrenschmidt 	new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
284bc5ad3f3SBenjamin Herrenschmidt 
285bc5ad3f3SBenjamin Herrenschmidt 	/* Attempt atomic update */
286bc5ad3f3SBenjamin Herrenschmidt 	success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
287bc5ad3f3SBenjamin Herrenschmidt 	if (!success)
288bc5ad3f3SBenjamin Herrenschmidt 		goto bail;
289bc5ad3f3SBenjamin Herrenschmidt 
290ade3ac66SAlexey Kardashevskiy 	XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
291bc5ad3f3SBenjamin Herrenschmidt 		 icp->server_num,
292bc5ad3f3SBenjamin Herrenschmidt 		 old.cppr, old.mfrr, old.pending_pri, old.xisr,
293bc5ad3f3SBenjamin Herrenschmidt 		 old.need_resend, old.out_ee);
294bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("UPD        - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
295bc5ad3f3SBenjamin Herrenschmidt 		 new.cppr, new.mfrr, new.pending_pri, new.xisr,
296bc5ad3f3SBenjamin Herrenschmidt 		 new.need_resend, new.out_ee);
297bc5ad3f3SBenjamin Herrenschmidt 	/*
298bc5ad3f3SBenjamin Herrenschmidt 	 * Check for output state update
299bc5ad3f3SBenjamin Herrenschmidt 	 *
300bc5ad3f3SBenjamin Herrenschmidt 	 * Note that this is racy since another processor could be updating
301bc5ad3f3SBenjamin Herrenschmidt 	 * the state already. This is why we never clear the interrupt output
302bc5ad3f3SBenjamin Herrenschmidt 	 * here, we only ever set it. The clear only happens prior to doing
303bc5ad3f3SBenjamin Herrenschmidt 	 * an update and only by the processor itself. Currently we do it
304bc5ad3f3SBenjamin Herrenschmidt 	 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
305bc5ad3f3SBenjamin Herrenschmidt 	 *
306bc5ad3f3SBenjamin Herrenschmidt 	 * We also do not try to figure out whether the EE state has changed,
307e7d26f28SBenjamin Herrenschmidt 	 * we unconditionally set it if the new state calls for it. The reason
308e7d26f28SBenjamin Herrenschmidt 	 * for that is that we opportunistically remove the pending interrupt
309e7d26f28SBenjamin Herrenschmidt 	 * flag when raising CPPR, so we need to set it back here if an
310e7d26f28SBenjamin Herrenschmidt 	 * interrupt is still pending.
311bc5ad3f3SBenjamin Herrenschmidt 	 */
312bc5ad3f3SBenjamin Herrenschmidt 	if (new.out_ee) {
313bc5ad3f3SBenjamin Herrenschmidt 		kvmppc_book3s_queue_irqprio(icp->vcpu,
314bc5ad3f3SBenjamin Herrenschmidt 					    BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
315bc5ad3f3SBenjamin Herrenschmidt 		if (!change_self)
31654695c30SBenjamin Herrenschmidt 			kvmppc_fast_vcpu_kick(icp->vcpu);
317bc5ad3f3SBenjamin Herrenschmidt 	}
318bc5ad3f3SBenjamin Herrenschmidt  bail:
319bc5ad3f3SBenjamin Herrenschmidt 	return success;
320bc5ad3f3SBenjamin Herrenschmidt }
321bc5ad3f3SBenjamin Herrenschmidt 
322bc5ad3f3SBenjamin Herrenschmidt static void icp_check_resend(struct kvmppc_xics *xics,
323bc5ad3f3SBenjamin Herrenschmidt 			     struct kvmppc_icp *icp)
324bc5ad3f3SBenjamin Herrenschmidt {
325bc5ad3f3SBenjamin Herrenschmidt 	u32 icsid;
326bc5ad3f3SBenjamin Herrenschmidt 
327bc5ad3f3SBenjamin Herrenschmidt 	/* Order this load with the test for need_resend in the caller */
328bc5ad3f3SBenjamin Herrenschmidt 	smp_rmb();
329bc5ad3f3SBenjamin Herrenschmidt 	for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
330bc5ad3f3SBenjamin Herrenschmidt 		struct kvmppc_ics *ics = xics->ics[icsid];
331bc5ad3f3SBenjamin Herrenschmidt 
332bc5ad3f3SBenjamin Herrenschmidt 		if (!test_and_clear_bit(icsid, icp->resend_map))
333bc5ad3f3SBenjamin Herrenschmidt 			continue;
334bc5ad3f3SBenjamin Herrenschmidt 		if (!ics)
335bc5ad3f3SBenjamin Herrenschmidt 			continue;
336bc5ad3f3SBenjamin Herrenschmidt 		ics_check_resend(xics, ics, icp);
337bc5ad3f3SBenjamin Herrenschmidt 	}
338bc5ad3f3SBenjamin Herrenschmidt }
339bc5ad3f3SBenjamin Herrenschmidt 
340bc5ad3f3SBenjamin Herrenschmidt static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
341bc5ad3f3SBenjamin Herrenschmidt 			       u32 *reject)
342bc5ad3f3SBenjamin Herrenschmidt {
343bc5ad3f3SBenjamin Herrenschmidt 	union kvmppc_icp_state old_state, new_state;
344bc5ad3f3SBenjamin Herrenschmidt 	bool success;
345bc5ad3f3SBenjamin Herrenschmidt 
346ade3ac66SAlexey Kardashevskiy 	XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
347bc5ad3f3SBenjamin Herrenschmidt 		 icp->server_num);
348bc5ad3f3SBenjamin Herrenschmidt 
349bc5ad3f3SBenjamin Herrenschmidt 	do {
3505ee07612SChristian Borntraeger 		old_state = new_state = READ_ONCE(icp->state);
351bc5ad3f3SBenjamin Herrenschmidt 
352bc5ad3f3SBenjamin Herrenschmidt 		*reject = 0;
353bc5ad3f3SBenjamin Herrenschmidt 
354bc5ad3f3SBenjamin Herrenschmidt 		/* See if we can deliver */
355bc5ad3f3SBenjamin Herrenschmidt 		success = new_state.cppr > priority &&
356bc5ad3f3SBenjamin Herrenschmidt 			new_state.mfrr > priority &&
357bc5ad3f3SBenjamin Herrenschmidt 			new_state.pending_pri > priority;
358bc5ad3f3SBenjamin Herrenschmidt 
359bc5ad3f3SBenjamin Herrenschmidt 		/*
360bc5ad3f3SBenjamin Herrenschmidt 		 * If we can, check for a rejection and perform the
361bc5ad3f3SBenjamin Herrenschmidt 		 * delivery
362bc5ad3f3SBenjamin Herrenschmidt 		 */
363bc5ad3f3SBenjamin Herrenschmidt 		if (success) {
364bc5ad3f3SBenjamin Herrenschmidt 			*reject = new_state.xisr;
365bc5ad3f3SBenjamin Herrenschmidt 			new_state.xisr = irq;
366bc5ad3f3SBenjamin Herrenschmidt 			new_state.pending_pri = priority;
367bc5ad3f3SBenjamin Herrenschmidt 		} else {
368bc5ad3f3SBenjamin Herrenschmidt 			/*
369bc5ad3f3SBenjamin Herrenschmidt 			 * If we failed to deliver we set need_resend
370bc5ad3f3SBenjamin Herrenschmidt 			 * so a subsequent CPPR state change causes us
371bc5ad3f3SBenjamin Herrenschmidt 			 * to try a new delivery.
372bc5ad3f3SBenjamin Herrenschmidt 			 */
373bc5ad3f3SBenjamin Herrenschmidt 			new_state.need_resend = true;
374bc5ad3f3SBenjamin Herrenschmidt 		}
375bc5ad3f3SBenjamin Herrenschmidt 
376bc5ad3f3SBenjamin Herrenschmidt 	} while (!icp_try_update(icp, old_state, new_state, false));
377bc5ad3f3SBenjamin Herrenschmidt 
378bc5ad3f3SBenjamin Herrenschmidt 	return success;
379bc5ad3f3SBenjamin Herrenschmidt }
380bc5ad3f3SBenjamin Herrenschmidt 
381bc5ad3f3SBenjamin Herrenschmidt static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
38221acd0e4SLi Zhong 			    u32 new_irq, bool check_resend)
383bc5ad3f3SBenjamin Herrenschmidt {
384bc5ad3f3SBenjamin Herrenschmidt 	struct ics_irq_state *state;
385bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
386bc5ad3f3SBenjamin Herrenschmidt 	u32 reject;
387bc5ad3f3SBenjamin Herrenschmidt 	u16 src;
38834cb7954SSuresh Warrier 	unsigned long flags;
389bc5ad3f3SBenjamin Herrenschmidt 
390bc5ad3f3SBenjamin Herrenschmidt 	/*
391bc5ad3f3SBenjamin Herrenschmidt 	 * This is used both for initial delivery of an interrupt and
392bc5ad3f3SBenjamin Herrenschmidt 	 * for subsequent rejection.
393bc5ad3f3SBenjamin Herrenschmidt 	 *
394bc5ad3f3SBenjamin Herrenschmidt 	 * Rejection can be racy vs. resends. We have evaluated the
395bc5ad3f3SBenjamin Herrenschmidt 	 * rejection in an atomic ICP transaction which is now complete,
396bc5ad3f3SBenjamin Herrenschmidt 	 * so potentially the ICP can already accept the interrupt again.
397bc5ad3f3SBenjamin Herrenschmidt 	 *
398bc5ad3f3SBenjamin Herrenschmidt 	 * So we need to retry the delivery. Essentially the reject path
399bc5ad3f3SBenjamin Herrenschmidt 	 * boils down to a failed delivery. Always.
400bc5ad3f3SBenjamin Herrenschmidt 	 *
401bc5ad3f3SBenjamin Herrenschmidt 	 * Now the interrupt could also have moved to a different target,
402bc5ad3f3SBenjamin Herrenschmidt 	 * thus we may need to re-do the ICP lookup as well
403bc5ad3f3SBenjamin Herrenschmidt 	 */
404bc5ad3f3SBenjamin Herrenschmidt 
405bc5ad3f3SBenjamin Herrenschmidt  again:
406bc5ad3f3SBenjamin Herrenschmidt 	/* Get the ICS state and lock it */
407bc5ad3f3SBenjamin Herrenschmidt 	ics = kvmppc_xics_find_ics(xics, new_irq, &src);
408bc5ad3f3SBenjamin Herrenschmidt 	if (!ics) {
409bc5ad3f3SBenjamin Herrenschmidt 		XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
410bc5ad3f3SBenjamin Herrenschmidt 		return;
411bc5ad3f3SBenjamin Herrenschmidt 	}
412bc5ad3f3SBenjamin Herrenschmidt 	state = &ics->irq_state[src];
413bc5ad3f3SBenjamin Herrenschmidt 
414bc5ad3f3SBenjamin Herrenschmidt 	/* Get a lock on the ICS */
41534cb7954SSuresh Warrier 	local_irq_save(flags);
41634cb7954SSuresh Warrier 	arch_spin_lock(&ics->lock);
417bc5ad3f3SBenjamin Herrenschmidt 
418bc5ad3f3SBenjamin Herrenschmidt 	/* Get our server */
419bc5ad3f3SBenjamin Herrenschmidt 	if (!icp || state->server != icp->server_num) {
420bc5ad3f3SBenjamin Herrenschmidt 		icp = kvmppc_xics_find_server(xics->kvm, state->server);
421bc5ad3f3SBenjamin Herrenschmidt 		if (!icp) {
422bc5ad3f3SBenjamin Herrenschmidt 			pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
423bc5ad3f3SBenjamin Herrenschmidt 				new_irq, state->server);
424bc5ad3f3SBenjamin Herrenschmidt 			goto out;
425bc5ad3f3SBenjamin Herrenschmidt 		}
426bc5ad3f3SBenjamin Herrenschmidt 	}
427bc5ad3f3SBenjamin Herrenschmidt 
42821acd0e4SLi Zhong 	if (check_resend)
42921acd0e4SLi Zhong 		if (!state->resend)
43021acd0e4SLi Zhong 			goto out;
43121acd0e4SLi Zhong 
432bc5ad3f3SBenjamin Herrenschmidt 	/* Clear the resend bit of that interrupt */
433bc5ad3f3SBenjamin Herrenschmidt 	state->resend = 0;
434bc5ad3f3SBenjamin Herrenschmidt 
435bc5ad3f3SBenjamin Herrenschmidt 	/*
436bc5ad3f3SBenjamin Herrenschmidt 	 * If masked, bail out
437bc5ad3f3SBenjamin Herrenschmidt 	 *
438bc5ad3f3SBenjamin Herrenschmidt 	 * Note: PAPR doesn't mention anything about masked pending
439bc5ad3f3SBenjamin Herrenschmidt 	 * when doing a resend, only when doing a delivery.
440bc5ad3f3SBenjamin Herrenschmidt 	 *
441bc5ad3f3SBenjamin Herrenschmidt 	 * However that would have the effect of losing a masked
442bc5ad3f3SBenjamin Herrenschmidt 	 * interrupt that was rejected and isn't consistent with
443bc5ad3f3SBenjamin Herrenschmidt 	 * the whole masked_pending business which is about not
444bc5ad3f3SBenjamin Herrenschmidt 	 * losing interrupts that occur while masked.
445bc5ad3f3SBenjamin Herrenschmidt 	 *
446446957baSAdam Buchbinder 	 * I don't differentiate normal deliveries and resends, this
447bc5ad3f3SBenjamin Herrenschmidt 	 * implementation will differ from PAPR and not lose such
448bc5ad3f3SBenjamin Herrenschmidt 	 * interrupts.
449bc5ad3f3SBenjamin Herrenschmidt 	 */
450bc5ad3f3SBenjamin Herrenschmidt 	if (state->priority == MASKED) {
451bc5ad3f3SBenjamin Herrenschmidt 		XICS_DBG("irq %#x masked pending\n", new_irq);
452bc5ad3f3SBenjamin Herrenschmidt 		state->masked_pending = 1;
453bc5ad3f3SBenjamin Herrenschmidt 		goto out;
454bc5ad3f3SBenjamin Herrenschmidt 	}
455bc5ad3f3SBenjamin Herrenschmidt 
456bc5ad3f3SBenjamin Herrenschmidt 	/*
457bc5ad3f3SBenjamin Herrenschmidt 	 * Try the delivery, this will set the need_resend flag
458bc5ad3f3SBenjamin Herrenschmidt 	 * in the ICP as part of the atomic transaction if the
459bc5ad3f3SBenjamin Herrenschmidt 	 * delivery is not possible.
460bc5ad3f3SBenjamin Herrenschmidt 	 *
461bc5ad3f3SBenjamin Herrenschmidt 	 * Note that if successful, the new delivery might have itself
462bc5ad3f3SBenjamin Herrenschmidt 	 * rejected an interrupt that was "delivered" before we took the
46334cb7954SSuresh Warrier 	 * ics spin lock.
464bc5ad3f3SBenjamin Herrenschmidt 	 *
465bc5ad3f3SBenjamin Herrenschmidt 	 * In this case we do the whole sequence all over again for the
466bc5ad3f3SBenjamin Herrenschmidt 	 * new guy. We cannot assume that the rejected interrupt is less
467bc5ad3f3SBenjamin Herrenschmidt 	 * favored than the new one, and thus doesn't need to be delivered,
468bc5ad3f3SBenjamin Herrenschmidt 	 * because by the time we exit icp_try_to_deliver() the target
469bc5ad3f3SBenjamin Herrenschmidt 	 * processor may well have alrady consumed & completed it, and thus
470bc5ad3f3SBenjamin Herrenschmidt 	 * the rejected interrupt might actually be already acceptable.
471bc5ad3f3SBenjamin Herrenschmidt 	 */
472bc5ad3f3SBenjamin Herrenschmidt 	if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
473bc5ad3f3SBenjamin Herrenschmidt 		/*
474bc5ad3f3SBenjamin Herrenschmidt 		 * Delivery was successful, did we reject somebody else ?
475bc5ad3f3SBenjamin Herrenschmidt 		 */
476bc5ad3f3SBenjamin Herrenschmidt 		if (reject && reject != XICS_IPI) {
47734cb7954SSuresh Warrier 			arch_spin_unlock(&ics->lock);
47834cb7954SSuresh Warrier 			local_irq_restore(flags);
479bc5ad3f3SBenjamin Herrenschmidt 			new_irq = reject;
48021acd0e4SLi Zhong 			check_resend = 0;
481bc5ad3f3SBenjamin Herrenschmidt 			goto again;
482bc5ad3f3SBenjamin Herrenschmidt 		}
483bc5ad3f3SBenjamin Herrenschmidt 	} else {
484bc5ad3f3SBenjamin Herrenschmidt 		/*
485bc5ad3f3SBenjamin Herrenschmidt 		 * We failed to deliver the interrupt we need to set the
486bc5ad3f3SBenjamin Herrenschmidt 		 * resend map bit and mark the ICS state as needing a resend
487bc5ad3f3SBenjamin Herrenschmidt 		 */
488bc5ad3f3SBenjamin Herrenschmidt 		state->resend = 1;
489bc5ad3f3SBenjamin Herrenschmidt 
490bc5ad3f3SBenjamin Herrenschmidt 		/*
49121acd0e4SLi Zhong 		 * Make sure when checking resend, we don't miss the resend
49221acd0e4SLi Zhong 		 * if resend_map bit is seen and cleared.
49321acd0e4SLi Zhong 		 */
49421acd0e4SLi Zhong 		smp_wmb();
49521acd0e4SLi Zhong 		set_bit(ics->icsid, icp->resend_map);
49621acd0e4SLi Zhong 
49721acd0e4SLi Zhong 		/*
498bc5ad3f3SBenjamin Herrenschmidt 		 * If the need_resend flag got cleared in the ICP some time
499bc5ad3f3SBenjamin Herrenschmidt 		 * between icp_try_to_deliver() atomic update and now, then
500bc5ad3f3SBenjamin Herrenschmidt 		 * we know it might have missed the resend_map bit. So we
501bc5ad3f3SBenjamin Herrenschmidt 		 * retry
502bc5ad3f3SBenjamin Herrenschmidt 		 */
503bc5ad3f3SBenjamin Herrenschmidt 		smp_mb();
504bc5ad3f3SBenjamin Herrenschmidt 		if (!icp->state.need_resend) {
505bf5a71d5SLi Zhong 			state->resend = 0;
50634cb7954SSuresh Warrier 			arch_spin_unlock(&ics->lock);
50734cb7954SSuresh Warrier 			local_irq_restore(flags);
50821acd0e4SLi Zhong 			check_resend = 0;
509bc5ad3f3SBenjamin Herrenschmidt 			goto again;
510bc5ad3f3SBenjamin Herrenschmidt 		}
511bc5ad3f3SBenjamin Herrenschmidt 	}
512bc5ad3f3SBenjamin Herrenschmidt  out:
51334cb7954SSuresh Warrier 	arch_spin_unlock(&ics->lock);
51434cb7954SSuresh Warrier 	local_irq_restore(flags);
515bc5ad3f3SBenjamin Herrenschmidt }
516bc5ad3f3SBenjamin Herrenschmidt 
517bc5ad3f3SBenjamin Herrenschmidt static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
518bc5ad3f3SBenjamin Herrenschmidt 			  u8 new_cppr)
519bc5ad3f3SBenjamin Herrenschmidt {
520bc5ad3f3SBenjamin Herrenschmidt 	union kvmppc_icp_state old_state, new_state;
521bc5ad3f3SBenjamin Herrenschmidt 	bool resend;
522bc5ad3f3SBenjamin Herrenschmidt 
523bc5ad3f3SBenjamin Herrenschmidt 	/*
524bc5ad3f3SBenjamin Herrenschmidt 	 * This handles several related states in one operation:
525bc5ad3f3SBenjamin Herrenschmidt 	 *
526bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Down_CPPR
527bc5ad3f3SBenjamin Herrenschmidt 	 *
528bc5ad3f3SBenjamin Herrenschmidt 	 * Load CPPR with new value and if the XISR is 0
529bc5ad3f3SBenjamin Herrenschmidt 	 * then check for resends:
530bc5ad3f3SBenjamin Herrenschmidt 	 *
531bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Resend
532bc5ad3f3SBenjamin Herrenschmidt 	 *
533bc5ad3f3SBenjamin Herrenschmidt 	 * If MFRR is more favored than CPPR, check for IPIs
534bc5ad3f3SBenjamin Herrenschmidt 	 * and notify ICS of a potential resend. This is done
535bc5ad3f3SBenjamin Herrenschmidt 	 * asynchronously (when used in real mode, we will have
536bc5ad3f3SBenjamin Herrenschmidt 	 * to exit here).
537bc5ad3f3SBenjamin Herrenschmidt 	 *
538bc5ad3f3SBenjamin Herrenschmidt 	 * We do not handle the complete Check_IPI as documented
539bc5ad3f3SBenjamin Herrenschmidt 	 * here. In the PAPR, this state will be used for both
540bc5ad3f3SBenjamin Herrenschmidt 	 * Set_MFRR and Down_CPPR. However, we know that we aren't
541bc5ad3f3SBenjamin Herrenschmidt 	 * changing the MFRR state here so we don't need to handle
542bc5ad3f3SBenjamin Herrenschmidt 	 * the case of an MFRR causing a reject of a pending irq,
543bc5ad3f3SBenjamin Herrenschmidt 	 * this will have been handled when the MFRR was set in the
544bc5ad3f3SBenjamin Herrenschmidt 	 * first place.
545bc5ad3f3SBenjamin Herrenschmidt 	 *
546bc5ad3f3SBenjamin Herrenschmidt 	 * Thus we don't have to handle rejects, only resends.
547bc5ad3f3SBenjamin Herrenschmidt 	 *
548bc5ad3f3SBenjamin Herrenschmidt 	 * When implementing real mode for HV KVM, resend will lead to
549bc5ad3f3SBenjamin Herrenschmidt 	 * a H_TOO_HARD return and the whole transaction will be handled
550bc5ad3f3SBenjamin Herrenschmidt 	 * in virtual mode.
551bc5ad3f3SBenjamin Herrenschmidt 	 */
552bc5ad3f3SBenjamin Herrenschmidt 	do {
5535ee07612SChristian Borntraeger 		old_state = new_state = READ_ONCE(icp->state);
554bc5ad3f3SBenjamin Herrenschmidt 
555bc5ad3f3SBenjamin Herrenschmidt 		/* Down_CPPR */
556bc5ad3f3SBenjamin Herrenschmidt 		new_state.cppr = new_cppr;
557bc5ad3f3SBenjamin Herrenschmidt 
558bc5ad3f3SBenjamin Herrenschmidt 		/*
559bc5ad3f3SBenjamin Herrenschmidt 		 * Cut down Resend / Check_IPI / IPI
560bc5ad3f3SBenjamin Herrenschmidt 		 *
561bc5ad3f3SBenjamin Herrenschmidt 		 * The logic is that we cannot have a pending interrupt
562bc5ad3f3SBenjamin Herrenschmidt 		 * trumped by an IPI at this point (see above), so we
563bc5ad3f3SBenjamin Herrenschmidt 		 * know that either the pending interrupt is already an
564bc5ad3f3SBenjamin Herrenschmidt 		 * IPI (in which case we don't care to override it) or
565bc5ad3f3SBenjamin Herrenschmidt 		 * it's either more favored than us or non existent
566bc5ad3f3SBenjamin Herrenschmidt 		 */
567bc5ad3f3SBenjamin Herrenschmidt 		if (new_state.mfrr < new_cppr &&
568bc5ad3f3SBenjamin Herrenschmidt 		    new_state.mfrr <= new_state.pending_pri) {
569bc5ad3f3SBenjamin Herrenschmidt 			WARN_ON(new_state.xisr != XICS_IPI &&
570bc5ad3f3SBenjamin Herrenschmidt 				new_state.xisr != 0);
571bc5ad3f3SBenjamin Herrenschmidt 			new_state.pending_pri = new_state.mfrr;
572bc5ad3f3SBenjamin Herrenschmidt 			new_state.xisr = XICS_IPI;
573bc5ad3f3SBenjamin Herrenschmidt 		}
574bc5ad3f3SBenjamin Herrenschmidt 
575bc5ad3f3SBenjamin Herrenschmidt 		/* Latch/clear resend bit */
576bc5ad3f3SBenjamin Herrenschmidt 		resend = new_state.need_resend;
577bc5ad3f3SBenjamin Herrenschmidt 		new_state.need_resend = 0;
578bc5ad3f3SBenjamin Herrenschmidt 
579bc5ad3f3SBenjamin Herrenschmidt 	} while (!icp_try_update(icp, old_state, new_state, true));
580bc5ad3f3SBenjamin Herrenschmidt 
581bc5ad3f3SBenjamin Herrenschmidt 	/*
582bc5ad3f3SBenjamin Herrenschmidt 	 * Now handle resend checks. Those are asynchronous to the ICP
583bc5ad3f3SBenjamin Herrenschmidt 	 * state update in HW (ie bus transactions) so we can handle them
584bc5ad3f3SBenjamin Herrenschmidt 	 * separately here too
585bc5ad3f3SBenjamin Herrenschmidt 	 */
586bc5ad3f3SBenjamin Herrenschmidt 	if (resend)
587bc5ad3f3SBenjamin Herrenschmidt 		icp_check_resend(xics, icp);
588bc5ad3f3SBenjamin Herrenschmidt }
589bc5ad3f3SBenjamin Herrenschmidt 
590e7d26f28SBenjamin Herrenschmidt static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
591bc5ad3f3SBenjamin Herrenschmidt {
592bc5ad3f3SBenjamin Herrenschmidt 	union kvmppc_icp_state old_state, new_state;
593bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp = vcpu->arch.icp;
594bc5ad3f3SBenjamin Herrenschmidt 	u32 xirr;
595bc5ad3f3SBenjamin Herrenschmidt 
596bc5ad3f3SBenjamin Herrenschmidt 	/* First, remove EE from the processor */
597bc5ad3f3SBenjamin Herrenschmidt 	kvmppc_book3s_dequeue_irqprio(icp->vcpu,
598bc5ad3f3SBenjamin Herrenschmidt 				      BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
599bc5ad3f3SBenjamin Herrenschmidt 
600bc5ad3f3SBenjamin Herrenschmidt 	/*
601bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Accept_Interrupt
602bc5ad3f3SBenjamin Herrenschmidt 	 *
603bc5ad3f3SBenjamin Herrenschmidt 	 * Return the pending interrupt (if any) along with the
604bc5ad3f3SBenjamin Herrenschmidt 	 * current CPPR, then clear the XISR & set CPPR to the
605bc5ad3f3SBenjamin Herrenschmidt 	 * pending priority
606bc5ad3f3SBenjamin Herrenschmidt 	 */
607bc5ad3f3SBenjamin Herrenschmidt 	do {
6085ee07612SChristian Borntraeger 		old_state = new_state = READ_ONCE(icp->state);
609bc5ad3f3SBenjamin Herrenschmidt 
610bc5ad3f3SBenjamin Herrenschmidt 		xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
611bc5ad3f3SBenjamin Herrenschmidt 		if (!old_state.xisr)
612bc5ad3f3SBenjamin Herrenschmidt 			break;
613bc5ad3f3SBenjamin Herrenschmidt 		new_state.cppr = new_state.pending_pri;
614bc5ad3f3SBenjamin Herrenschmidt 		new_state.pending_pri = 0xff;
615bc5ad3f3SBenjamin Herrenschmidt 		new_state.xisr = 0;
616bc5ad3f3SBenjamin Herrenschmidt 
617bc5ad3f3SBenjamin Herrenschmidt 	} while (!icp_try_update(icp, old_state, new_state, true));
618bc5ad3f3SBenjamin Herrenschmidt 
619bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
620bc5ad3f3SBenjamin Herrenschmidt 
621bc5ad3f3SBenjamin Herrenschmidt 	return xirr;
622bc5ad3f3SBenjamin Herrenschmidt }
623bc5ad3f3SBenjamin Herrenschmidt 
624e7d26f28SBenjamin Herrenschmidt static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
625bc5ad3f3SBenjamin Herrenschmidt 				 unsigned long mfrr)
626bc5ad3f3SBenjamin Herrenschmidt {
627bc5ad3f3SBenjamin Herrenschmidt 	union kvmppc_icp_state old_state, new_state;
628bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
629bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp;
630bc5ad3f3SBenjamin Herrenschmidt 	u32 reject;
631bc5ad3f3SBenjamin Herrenschmidt 	bool resend;
632bc5ad3f3SBenjamin Herrenschmidt 	bool local;
633bc5ad3f3SBenjamin Herrenschmidt 
634bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
635bc5ad3f3SBenjamin Herrenschmidt 		 vcpu->vcpu_id, server, mfrr);
636bc5ad3f3SBenjamin Herrenschmidt 
637bc5ad3f3SBenjamin Herrenschmidt 	icp = vcpu->arch.icp;
638bc5ad3f3SBenjamin Herrenschmidt 	local = icp->server_num == server;
639bc5ad3f3SBenjamin Herrenschmidt 	if (!local) {
640bc5ad3f3SBenjamin Herrenschmidt 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
641bc5ad3f3SBenjamin Herrenschmidt 		if (!icp)
642bc5ad3f3SBenjamin Herrenschmidt 			return H_PARAMETER;
643bc5ad3f3SBenjamin Herrenschmidt 	}
644bc5ad3f3SBenjamin Herrenschmidt 
645bc5ad3f3SBenjamin Herrenschmidt 	/*
646bc5ad3f3SBenjamin Herrenschmidt 	 * ICP state: Set_MFRR
647bc5ad3f3SBenjamin Herrenschmidt 	 *
648bc5ad3f3SBenjamin Herrenschmidt 	 * If the CPPR is more favored than the new MFRR, then
649bc5ad3f3SBenjamin Herrenschmidt 	 * nothing needs to be rejected as there can be no XISR to
650bc5ad3f3SBenjamin Herrenschmidt 	 * reject.  If the MFRR is being made less favored then
651bc5ad3f3SBenjamin Herrenschmidt 	 * there might be a previously-rejected interrupt needing
652bc5ad3f3SBenjamin Herrenschmidt 	 * to be resent.
653bc5ad3f3SBenjamin Herrenschmidt 	 *
654bc5ad3f3SBenjamin Herrenschmidt 	 * ICP state: Check_IPI
6555b88cda6SSuresh E. Warrier 	 *
6565b88cda6SSuresh E. Warrier 	 * If the CPPR is less favored, then we might be replacing
6575b88cda6SSuresh E. Warrier 	 * an interrupt, and thus need to possibly reject it.
6585b88cda6SSuresh E. Warrier 	 *
6595b88cda6SSuresh E. Warrier 	 * ICP State: IPI
6605b88cda6SSuresh E. Warrier 	 *
6615b88cda6SSuresh E. Warrier 	 * Besides rejecting any pending interrupts, we also
6625b88cda6SSuresh E. Warrier 	 * update XISR and pending_pri to mark IPI as pending.
6635b88cda6SSuresh E. Warrier 	 *
6645b88cda6SSuresh E. Warrier 	 * PAPR does not describe this state, but if the MFRR is being
6655b88cda6SSuresh E. Warrier 	 * made less favored than its earlier value, there might be
6665b88cda6SSuresh E. Warrier 	 * a previously-rejected interrupt needing to be resent.
6675b88cda6SSuresh E. Warrier 	 * Ideally, we would want to resend only if
6685b88cda6SSuresh E. Warrier 	 *	prio(pending_interrupt) < mfrr &&
6695b88cda6SSuresh E. Warrier 	 *	prio(pending_interrupt) < cppr
6705b88cda6SSuresh E. Warrier 	 * where pending interrupt is the one that was rejected. But
6715b88cda6SSuresh E. Warrier 	 * we don't have that state, so we simply trigger a resend
6725b88cda6SSuresh E. Warrier 	 * whenever the MFRR is made less favored.
673bc5ad3f3SBenjamin Herrenschmidt 	 */
674bc5ad3f3SBenjamin Herrenschmidt 	do {
6755ee07612SChristian Borntraeger 		old_state = new_state = READ_ONCE(icp->state);
676bc5ad3f3SBenjamin Herrenschmidt 
677bc5ad3f3SBenjamin Herrenschmidt 		/* Set_MFRR */
678bc5ad3f3SBenjamin Herrenschmidt 		new_state.mfrr = mfrr;
679bc5ad3f3SBenjamin Herrenschmidt 
680bc5ad3f3SBenjamin Herrenschmidt 		/* Check_IPI */
681bc5ad3f3SBenjamin Herrenschmidt 		reject = 0;
682bc5ad3f3SBenjamin Herrenschmidt 		resend = false;
683bc5ad3f3SBenjamin Herrenschmidt 		if (mfrr < new_state.cppr) {
684bc5ad3f3SBenjamin Herrenschmidt 			/* Reject a pending interrupt if not an IPI */
6855b88cda6SSuresh E. Warrier 			if (mfrr <= new_state.pending_pri) {
686bc5ad3f3SBenjamin Herrenschmidt 				reject = new_state.xisr;
687bc5ad3f3SBenjamin Herrenschmidt 				new_state.pending_pri = mfrr;
688bc5ad3f3SBenjamin Herrenschmidt 				new_state.xisr = XICS_IPI;
689bc5ad3f3SBenjamin Herrenschmidt 			}
6905b88cda6SSuresh E. Warrier 		}
691bc5ad3f3SBenjamin Herrenschmidt 
6925b88cda6SSuresh E. Warrier 		if (mfrr > old_state.mfrr) {
693bc5ad3f3SBenjamin Herrenschmidt 			resend = new_state.need_resend;
694bc5ad3f3SBenjamin Herrenschmidt 			new_state.need_resend = 0;
695bc5ad3f3SBenjamin Herrenschmidt 		}
696bc5ad3f3SBenjamin Herrenschmidt 	} while (!icp_try_update(icp, old_state, new_state, local));
697bc5ad3f3SBenjamin Herrenschmidt 
698bc5ad3f3SBenjamin Herrenschmidt 	/* Handle reject */
699bc5ad3f3SBenjamin Herrenschmidt 	if (reject && reject != XICS_IPI)
70021acd0e4SLi Zhong 		icp_deliver_irq(xics, icp, reject, false);
701bc5ad3f3SBenjamin Herrenschmidt 
702bc5ad3f3SBenjamin Herrenschmidt 	/* Handle resend */
703bc5ad3f3SBenjamin Herrenschmidt 	if (resend)
704bc5ad3f3SBenjamin Herrenschmidt 		icp_check_resend(xics, icp);
705bc5ad3f3SBenjamin Herrenschmidt 
706bc5ad3f3SBenjamin Herrenschmidt 	return H_SUCCESS;
707bc5ad3f3SBenjamin Herrenschmidt }
708bc5ad3f3SBenjamin Herrenschmidt 
7098e44ddc3SPaul Mackerras static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
7108e44ddc3SPaul Mackerras {
7118e44ddc3SPaul Mackerras 	union kvmppc_icp_state state;
7128e44ddc3SPaul Mackerras 	struct kvmppc_icp *icp;
7138e44ddc3SPaul Mackerras 
7148e44ddc3SPaul Mackerras 	icp = vcpu->arch.icp;
7158e44ddc3SPaul Mackerras 	if (icp->server_num != server) {
7168e44ddc3SPaul Mackerras 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
7178e44ddc3SPaul Mackerras 		if (!icp)
7188e44ddc3SPaul Mackerras 			return H_PARAMETER;
7198e44ddc3SPaul Mackerras 	}
7205ee07612SChristian Borntraeger 	state = READ_ONCE(icp->state);
7218e44ddc3SPaul Mackerras 	kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
7228e44ddc3SPaul Mackerras 	kvmppc_set_gpr(vcpu, 5, state.mfrr);
7238e44ddc3SPaul Mackerras 	return H_SUCCESS;
7248e44ddc3SPaul Mackerras }
7258e44ddc3SPaul Mackerras 
726e7d26f28SBenjamin Herrenschmidt static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
727bc5ad3f3SBenjamin Herrenschmidt {
728bc5ad3f3SBenjamin Herrenschmidt 	union kvmppc_icp_state old_state, new_state;
729bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
730bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp = vcpu->arch.icp;
731bc5ad3f3SBenjamin Herrenschmidt 	u32 reject;
732bc5ad3f3SBenjamin Herrenschmidt 
733bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
734bc5ad3f3SBenjamin Herrenschmidt 
735bc5ad3f3SBenjamin Herrenschmidt 	/*
736bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Set_CPPR
737bc5ad3f3SBenjamin Herrenschmidt 	 *
738bc5ad3f3SBenjamin Herrenschmidt 	 * We can safely compare the new value with the current
739bc5ad3f3SBenjamin Herrenschmidt 	 * value outside of the transaction as the CPPR is only
740bc5ad3f3SBenjamin Herrenschmidt 	 * ever changed by the processor on itself
741bc5ad3f3SBenjamin Herrenschmidt 	 */
742bc5ad3f3SBenjamin Herrenschmidt 	if (cppr > icp->state.cppr)
743bc5ad3f3SBenjamin Herrenschmidt 		icp_down_cppr(xics, icp, cppr);
744bc5ad3f3SBenjamin Herrenschmidt 	else if (cppr == icp->state.cppr)
745bc5ad3f3SBenjamin Herrenschmidt 		return;
746bc5ad3f3SBenjamin Herrenschmidt 
747bc5ad3f3SBenjamin Herrenschmidt 	/*
748bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Up_CPPR
749bc5ad3f3SBenjamin Herrenschmidt 	 *
750bc5ad3f3SBenjamin Herrenschmidt 	 * The processor is raising its priority, this can result
751bc5ad3f3SBenjamin Herrenschmidt 	 * in a rejection of a pending interrupt:
752bc5ad3f3SBenjamin Herrenschmidt 	 *
753bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Reject_Current
754bc5ad3f3SBenjamin Herrenschmidt 	 *
755bc5ad3f3SBenjamin Herrenschmidt 	 * We can remove EE from the current processor, the update
756bc5ad3f3SBenjamin Herrenschmidt 	 * transaction will set it again if needed
757bc5ad3f3SBenjamin Herrenschmidt 	 */
758bc5ad3f3SBenjamin Herrenschmidt 	kvmppc_book3s_dequeue_irqprio(icp->vcpu,
759bc5ad3f3SBenjamin Herrenschmidt 				      BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
760bc5ad3f3SBenjamin Herrenschmidt 
761bc5ad3f3SBenjamin Herrenschmidt 	do {
7625ee07612SChristian Borntraeger 		old_state = new_state = READ_ONCE(icp->state);
763bc5ad3f3SBenjamin Herrenschmidt 
764bc5ad3f3SBenjamin Herrenschmidt 		reject = 0;
765bc5ad3f3SBenjamin Herrenschmidt 		new_state.cppr = cppr;
766bc5ad3f3SBenjamin Herrenschmidt 
767bc5ad3f3SBenjamin Herrenschmidt 		if (cppr <= new_state.pending_pri) {
768bc5ad3f3SBenjamin Herrenschmidt 			reject = new_state.xisr;
769bc5ad3f3SBenjamin Herrenschmidt 			new_state.xisr = 0;
770bc5ad3f3SBenjamin Herrenschmidt 			new_state.pending_pri = 0xff;
771bc5ad3f3SBenjamin Herrenschmidt 		}
772bc5ad3f3SBenjamin Herrenschmidt 
773bc5ad3f3SBenjamin Herrenschmidt 	} while (!icp_try_update(icp, old_state, new_state, true));
774bc5ad3f3SBenjamin Herrenschmidt 
775bc5ad3f3SBenjamin Herrenschmidt 	/*
776bc5ad3f3SBenjamin Herrenschmidt 	 * Check for rejects. They are handled by doing a new delivery
777bc5ad3f3SBenjamin Herrenschmidt 	 * attempt (see comments in icp_deliver_irq).
778bc5ad3f3SBenjamin Herrenschmidt 	 */
779bc5ad3f3SBenjamin Herrenschmidt 	if (reject && reject != XICS_IPI)
78021acd0e4SLi Zhong 		icp_deliver_irq(xics, icp, reject, false);
781bc5ad3f3SBenjamin Herrenschmidt }
782bc5ad3f3SBenjamin Herrenschmidt 
78317d48610SLi Zhong static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
784bc5ad3f3SBenjamin Herrenschmidt {
785bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
786bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp = vcpu->arch.icp;
787bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
788bc5ad3f3SBenjamin Herrenschmidt 	struct ics_irq_state *state;
789bc5ad3f3SBenjamin Herrenschmidt 	u16 src;
79017d48610SLi Zhong 	u32 pq_old, pq_new;
79117d48610SLi Zhong 
79217d48610SLi Zhong 	/*
79317d48610SLi Zhong 	 * ICS EOI handling: For LSI, if P bit is still set, we need to
79417d48610SLi Zhong 	 * resend it.
79517d48610SLi Zhong 	 *
79617d48610SLi Zhong 	 * For MSI, we move Q bit into P (and clear Q). If it is set,
79717d48610SLi Zhong 	 * resend it.
79817d48610SLi Zhong 	 */
79917d48610SLi Zhong 
80017d48610SLi Zhong 	ics = kvmppc_xics_find_ics(xics, irq, &src);
80117d48610SLi Zhong 	if (!ics) {
80217d48610SLi Zhong 		XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq);
80317d48610SLi Zhong 		return H_PARAMETER;
80417d48610SLi Zhong 	}
80517d48610SLi Zhong 	state = &ics->irq_state[src];
80617d48610SLi Zhong 
80717d48610SLi Zhong 	if (state->lsi)
80817d48610SLi Zhong 		pq_new = state->pq_state;
80917d48610SLi Zhong 	else
81017d48610SLi Zhong 		do {
81117d48610SLi Zhong 			pq_old = state->pq_state;
81217d48610SLi Zhong 			pq_new = pq_old >> 1;
81317d48610SLi Zhong 		} while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
81417d48610SLi Zhong 
81517d48610SLi Zhong 	if (pq_new & PQ_PRESENTED)
81621acd0e4SLi Zhong 		icp_deliver_irq(xics, icp, irq, false);
81717d48610SLi Zhong 
81817d48610SLi Zhong 	kvm_notify_acked_irq(vcpu->kvm, 0, irq);
81917d48610SLi Zhong 
82017d48610SLi Zhong 	return H_SUCCESS;
82117d48610SLi Zhong }
82217d48610SLi Zhong 
82317d48610SLi Zhong static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
82417d48610SLi Zhong {
82517d48610SLi Zhong 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
82617d48610SLi Zhong 	struct kvmppc_icp *icp = vcpu->arch.icp;
82717d48610SLi Zhong 	u32 irq = xirr & 0x00ffffff;
828bc5ad3f3SBenjamin Herrenschmidt 
829bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
830bc5ad3f3SBenjamin Herrenschmidt 
831bc5ad3f3SBenjamin Herrenschmidt 	/*
832bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: EOI
833bc5ad3f3SBenjamin Herrenschmidt 	 *
834bc5ad3f3SBenjamin Herrenschmidt 	 * Note: If EOI is incorrectly used by SW to lower the CPPR
835bc5ad3f3SBenjamin Herrenschmidt 	 * value (ie more favored), we do not check for rejection of
836bc5ad3f3SBenjamin Herrenschmidt 	 * a pending interrupt, this is a SW error and PAPR sepcifies
837bc5ad3f3SBenjamin Herrenschmidt 	 * that we don't have to deal with it.
838bc5ad3f3SBenjamin Herrenschmidt 	 *
839bc5ad3f3SBenjamin Herrenschmidt 	 * The sending of an EOI to the ICS is handled after the
840bc5ad3f3SBenjamin Herrenschmidt 	 * CPPR update
841bc5ad3f3SBenjamin Herrenschmidt 	 *
842bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Down_CPPR which we handle
843bc5ad3f3SBenjamin Herrenschmidt 	 * in a separate function as it's shared with H_CPPR.
844bc5ad3f3SBenjamin Herrenschmidt 	 */
845bc5ad3f3SBenjamin Herrenschmidt 	icp_down_cppr(xics, icp, xirr >> 24);
846bc5ad3f3SBenjamin Herrenschmidt 
847bc5ad3f3SBenjamin Herrenschmidt 	/* IPIs have no EOI */
848bc5ad3f3SBenjamin Herrenschmidt 	if (irq == XICS_IPI)
849bc5ad3f3SBenjamin Herrenschmidt 		return H_SUCCESS;
850bc5ad3f3SBenjamin Herrenschmidt 
85117d48610SLi Zhong 	return ics_eoi(vcpu, irq);
852bc5ad3f3SBenjamin Herrenschmidt }
853bc5ad3f3SBenjamin Herrenschmidt 
854f7af5209SSuresh Warrier int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
855e7d26f28SBenjamin Herrenschmidt {
856e7d26f28SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
857e7d26f28SBenjamin Herrenschmidt 	struct kvmppc_icp *icp = vcpu->arch.icp;
858e7d26f28SBenjamin Herrenschmidt 
859e7d26f28SBenjamin Herrenschmidt 	XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
860e7d26f28SBenjamin Herrenschmidt 		 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
861e7d26f28SBenjamin Herrenschmidt 
862878610feSSuresh E. Warrier 	if (icp->rm_action & XICS_RM_KICK_VCPU) {
863878610feSSuresh E. Warrier 		icp->n_rm_kick_vcpu++;
864e7d26f28SBenjamin Herrenschmidt 		kvmppc_fast_vcpu_kick(icp->rm_kick_target);
865878610feSSuresh E. Warrier 	}
866878610feSSuresh E. Warrier 	if (icp->rm_action & XICS_RM_CHECK_RESEND) {
867878610feSSuresh E. Warrier 		icp->n_rm_check_resend++;
8685b88cda6SSuresh E. Warrier 		icp_check_resend(xics, icp->rm_resend_icp);
869878610feSSuresh E. Warrier 	}
870878610feSSuresh E. Warrier 	if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
871878610feSSuresh E. Warrier 		icp->n_rm_notify_eoi++;
87225a2150bSPaul Mackerras 		kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
873878610feSSuresh E. Warrier 	}
874e7d26f28SBenjamin Herrenschmidt 
875e7d26f28SBenjamin Herrenschmidt 	icp->rm_action = 0;
876e7d26f28SBenjamin Herrenschmidt 
877e7d26f28SBenjamin Herrenschmidt 	return H_SUCCESS;
878e7d26f28SBenjamin Herrenschmidt }
879f7af5209SSuresh Warrier EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
880e7d26f28SBenjamin Herrenschmidt 
881bc5ad3f3SBenjamin Herrenschmidt int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
882bc5ad3f3SBenjamin Herrenschmidt {
883e7d26f28SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
884bc5ad3f3SBenjamin Herrenschmidt 	unsigned long res;
885bc5ad3f3SBenjamin Herrenschmidt 	int rc = H_SUCCESS;
886bc5ad3f3SBenjamin Herrenschmidt 
887bc5ad3f3SBenjamin Herrenschmidt 	/* Check if we have an ICP */
888e7d26f28SBenjamin Herrenschmidt 	if (!xics || !vcpu->arch.icp)
889bc5ad3f3SBenjamin Herrenschmidt 		return H_HARDWARE;
890bc5ad3f3SBenjamin Herrenschmidt 
8918e44ddc3SPaul Mackerras 	/* These requests don't have real-mode implementations at present */
8928e44ddc3SPaul Mackerras 	switch (req) {
8938e44ddc3SPaul Mackerras 	case H_XIRR_X:
8948e44ddc3SPaul Mackerras 		res = kvmppc_h_xirr(vcpu);
8958e44ddc3SPaul Mackerras 		kvmppc_set_gpr(vcpu, 4, res);
8968e44ddc3SPaul Mackerras 		kvmppc_set_gpr(vcpu, 5, get_tb());
8978e44ddc3SPaul Mackerras 		return rc;
8988e44ddc3SPaul Mackerras 	case H_IPOLL:
8998e44ddc3SPaul Mackerras 		rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
9008e44ddc3SPaul Mackerras 		return rc;
9018e44ddc3SPaul Mackerras 	}
9028e44ddc3SPaul Mackerras 
903e7d26f28SBenjamin Herrenschmidt 	/* Check for real mode returning too hard */
904a78b55d1SAneesh Kumar K.V 	if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
905e7d26f28SBenjamin Herrenschmidt 		return kvmppc_xics_rm_complete(vcpu, req);
906e7d26f28SBenjamin Herrenschmidt 
907bc5ad3f3SBenjamin Herrenschmidt 	switch (req) {
908bc5ad3f3SBenjamin Herrenschmidt 	case H_XIRR:
909e7d26f28SBenjamin Herrenschmidt 		res = kvmppc_h_xirr(vcpu);
910bc5ad3f3SBenjamin Herrenschmidt 		kvmppc_set_gpr(vcpu, 4, res);
911bc5ad3f3SBenjamin Herrenschmidt 		break;
912bc5ad3f3SBenjamin Herrenschmidt 	case H_CPPR:
913e7d26f28SBenjamin Herrenschmidt 		kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
914bc5ad3f3SBenjamin Herrenschmidt 		break;
915bc5ad3f3SBenjamin Herrenschmidt 	case H_EOI:
916e7d26f28SBenjamin Herrenschmidt 		rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
917bc5ad3f3SBenjamin Herrenschmidt 		break;
918bc5ad3f3SBenjamin Herrenschmidt 	case H_IPI:
919e7d26f28SBenjamin Herrenschmidt 		rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
920bc5ad3f3SBenjamin Herrenschmidt 				  kvmppc_get_gpr(vcpu, 5));
921bc5ad3f3SBenjamin Herrenschmidt 		break;
922bc5ad3f3SBenjamin Herrenschmidt 	}
923bc5ad3f3SBenjamin Herrenschmidt 
924bc5ad3f3SBenjamin Herrenschmidt 	return rc;
925bc5ad3f3SBenjamin Herrenschmidt }
9262ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
927bc5ad3f3SBenjamin Herrenschmidt 
928bc5ad3f3SBenjamin Herrenschmidt 
929bc5ad3f3SBenjamin Herrenschmidt /* -- Initialisation code etc. -- */
930bc5ad3f3SBenjamin Herrenschmidt 
931af893c7dSSuresh Warrier static void xics_debugfs_irqmap(struct seq_file *m,
932af893c7dSSuresh Warrier 				struct kvmppc_passthru_irqmap *pimap)
933af893c7dSSuresh Warrier {
934af893c7dSSuresh Warrier 	int i;
935af893c7dSSuresh Warrier 
936af893c7dSSuresh Warrier 	if (!pimap)
937af893c7dSSuresh Warrier 		return;
938af893c7dSSuresh Warrier 	seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
939af893c7dSSuresh Warrier 				pimap->n_mapped);
940af893c7dSSuresh Warrier 	for (i = 0; i < pimap->n_mapped; i++)  {
941af893c7dSSuresh Warrier 		seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
942af893c7dSSuresh Warrier 			pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
943af893c7dSSuresh Warrier 	}
944af893c7dSSuresh Warrier }
945af893c7dSSuresh Warrier 
946bc5ad3f3SBenjamin Herrenschmidt static int xics_debug_show(struct seq_file *m, void *private)
947bc5ad3f3SBenjamin Herrenschmidt {
948bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = m->private;
949bc5ad3f3SBenjamin Herrenschmidt 	struct kvm *kvm = xics->kvm;
950bc5ad3f3SBenjamin Herrenschmidt 	struct kvm_vcpu *vcpu;
951bc5ad3f3SBenjamin Herrenschmidt 	int icsid, i;
95234cb7954SSuresh Warrier 	unsigned long flags;
953878610feSSuresh E. Warrier 	unsigned long t_rm_kick_vcpu, t_rm_check_resend;
9545efa6605SLi Zhong 	unsigned long t_rm_notify_eoi;
9556e0365b7SSuresh Warrier 	unsigned long t_reject, t_check_resend;
956bc5ad3f3SBenjamin Herrenschmidt 
957bc5ad3f3SBenjamin Herrenschmidt 	if (!kvm)
958bc5ad3f3SBenjamin Herrenschmidt 		return 0;
959bc5ad3f3SBenjamin Herrenschmidt 
960878610feSSuresh E. Warrier 	t_rm_kick_vcpu = 0;
961878610feSSuresh E. Warrier 	t_rm_notify_eoi = 0;
962878610feSSuresh E. Warrier 	t_rm_check_resend = 0;
9636e0365b7SSuresh Warrier 	t_check_resend = 0;
9646e0365b7SSuresh Warrier 	t_reject = 0;
965878610feSSuresh E. Warrier 
966af893c7dSSuresh Warrier 	xics_debugfs_irqmap(m, kvm->arch.pimap);
967af893c7dSSuresh Warrier 
968bc5ad3f3SBenjamin Herrenschmidt 	seq_printf(m, "=========\nICP state\n=========\n");
969bc5ad3f3SBenjamin Herrenschmidt 
970bc5ad3f3SBenjamin Herrenschmidt 	kvm_for_each_vcpu(i, vcpu, kvm) {
971bc5ad3f3SBenjamin Herrenschmidt 		struct kvmppc_icp *icp = vcpu->arch.icp;
972bc5ad3f3SBenjamin Herrenschmidt 		union kvmppc_icp_state state;
973bc5ad3f3SBenjamin Herrenschmidt 
974bc5ad3f3SBenjamin Herrenschmidt 		if (!icp)
975bc5ad3f3SBenjamin Herrenschmidt 			continue;
976bc5ad3f3SBenjamin Herrenschmidt 
9775ee07612SChristian Borntraeger 		state.raw = READ_ONCE(icp->state.raw);
978bc5ad3f3SBenjamin Herrenschmidt 		seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
979bc5ad3f3SBenjamin Herrenschmidt 			   icp->server_num, state.xisr,
980bc5ad3f3SBenjamin Herrenschmidt 			   state.pending_pri, state.cppr, state.mfrr,
981bc5ad3f3SBenjamin Herrenschmidt 			   state.out_ee, state.need_resend);
982878610feSSuresh E. Warrier 		t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
983878610feSSuresh E. Warrier 		t_rm_notify_eoi += icp->n_rm_notify_eoi;
984878610feSSuresh E. Warrier 		t_rm_check_resend += icp->n_rm_check_resend;
9856e0365b7SSuresh Warrier 		t_check_resend += icp->n_check_resend;
9866e0365b7SSuresh Warrier 		t_reject += icp->n_reject;
987bc5ad3f3SBenjamin Herrenschmidt 	}
988bc5ad3f3SBenjamin Herrenschmidt 
9895efa6605SLi Zhong 	seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
990878610feSSuresh E. Warrier 			t_rm_kick_vcpu, t_rm_check_resend,
9915efa6605SLi Zhong 			t_rm_notify_eoi);
9926e0365b7SSuresh Warrier 	seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
9936e0365b7SSuresh Warrier 			t_check_resend, t_reject);
994bc5ad3f3SBenjamin Herrenschmidt 	for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
995bc5ad3f3SBenjamin Herrenschmidt 		struct kvmppc_ics *ics = xics->ics[icsid];
996bc5ad3f3SBenjamin Herrenschmidt 
997bc5ad3f3SBenjamin Herrenschmidt 		if (!ics)
998bc5ad3f3SBenjamin Herrenschmidt 			continue;
999bc5ad3f3SBenjamin Herrenschmidt 
1000bc5ad3f3SBenjamin Herrenschmidt 		seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
1001bc5ad3f3SBenjamin Herrenschmidt 			   icsid);
1002bc5ad3f3SBenjamin Herrenschmidt 
100334cb7954SSuresh Warrier 		local_irq_save(flags);
100434cb7954SSuresh Warrier 		arch_spin_lock(&ics->lock);
1005bc5ad3f3SBenjamin Herrenschmidt 
1006bc5ad3f3SBenjamin Herrenschmidt 		for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1007bc5ad3f3SBenjamin Herrenschmidt 			struct ics_irq_state *irq = &ics->irq_state[i];
1008bc5ad3f3SBenjamin Herrenschmidt 
100917d48610SLi Zhong 			seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
1010bc5ad3f3SBenjamin Herrenschmidt 				   irq->number, irq->server, irq->priority,
101117d48610SLi Zhong 				   irq->saved_priority, irq->pq_state,
1012bc5ad3f3SBenjamin Herrenschmidt 				   irq->resend, irq->masked_pending);
1013bc5ad3f3SBenjamin Herrenschmidt 
1014bc5ad3f3SBenjamin Herrenschmidt 		}
101534cb7954SSuresh Warrier 		arch_spin_unlock(&ics->lock);
101634cb7954SSuresh Warrier 		local_irq_restore(flags);
1017bc5ad3f3SBenjamin Herrenschmidt 	}
1018bc5ad3f3SBenjamin Herrenschmidt 	return 0;
1019bc5ad3f3SBenjamin Herrenschmidt }
1020bc5ad3f3SBenjamin Herrenschmidt 
1021bc5ad3f3SBenjamin Herrenschmidt static int xics_debug_open(struct inode *inode, struct file *file)
1022bc5ad3f3SBenjamin Herrenschmidt {
1023bc5ad3f3SBenjamin Herrenschmidt 	return single_open(file, xics_debug_show, inode->i_private);
1024bc5ad3f3SBenjamin Herrenschmidt }
1025bc5ad3f3SBenjamin Herrenschmidt 
1026bc5ad3f3SBenjamin Herrenschmidt static const struct file_operations xics_debug_fops = {
1027bc5ad3f3SBenjamin Herrenschmidt 	.open = xics_debug_open,
1028bc5ad3f3SBenjamin Herrenschmidt 	.read = seq_read,
1029bc5ad3f3SBenjamin Herrenschmidt 	.llseek = seq_lseek,
1030bc5ad3f3SBenjamin Herrenschmidt 	.release = single_release,
1031bc5ad3f3SBenjamin Herrenschmidt };
1032bc5ad3f3SBenjamin Herrenschmidt 
1033bc5ad3f3SBenjamin Herrenschmidt static void xics_debugfs_init(struct kvmppc_xics *xics)
1034bc5ad3f3SBenjamin Herrenschmidt {
1035bc5ad3f3SBenjamin Herrenschmidt 	char *name;
1036bc5ad3f3SBenjamin Herrenschmidt 
1037bc5ad3f3SBenjamin Herrenschmidt 	name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
1038bc5ad3f3SBenjamin Herrenschmidt 	if (!name) {
1039bc5ad3f3SBenjamin Herrenschmidt 		pr_err("%s: no memory for name\n", __func__);
1040bc5ad3f3SBenjamin Herrenschmidt 		return;
1041bc5ad3f3SBenjamin Herrenschmidt 	}
1042bc5ad3f3SBenjamin Herrenschmidt 
1043bc5ad3f3SBenjamin Herrenschmidt 	xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
1044bc5ad3f3SBenjamin Herrenschmidt 					   xics, &xics_debug_fops);
1045bc5ad3f3SBenjamin Herrenschmidt 
1046bc5ad3f3SBenjamin Herrenschmidt 	pr_debug("%s: created %s\n", __func__, name);
1047bc5ad3f3SBenjamin Herrenschmidt 	kfree(name);
1048bc5ad3f3SBenjamin Herrenschmidt }
1049bc5ad3f3SBenjamin Herrenschmidt 
10505975a2e0SPaul Mackerras static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
1051bc5ad3f3SBenjamin Herrenschmidt 					struct kvmppc_xics *xics, int irq)
1052bc5ad3f3SBenjamin Herrenschmidt {
1053bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
1054bc5ad3f3SBenjamin Herrenschmidt 	int i, icsid;
1055bc5ad3f3SBenjamin Herrenschmidt 
1056bc5ad3f3SBenjamin Herrenschmidt 	icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
1057bc5ad3f3SBenjamin Herrenschmidt 
1058bc5ad3f3SBenjamin Herrenschmidt 	mutex_lock(&kvm->lock);
1059bc5ad3f3SBenjamin Herrenschmidt 
1060bc5ad3f3SBenjamin Herrenschmidt 	/* ICS already exists - somebody else got here first */
1061bc5ad3f3SBenjamin Herrenschmidt 	if (xics->ics[icsid])
1062bc5ad3f3SBenjamin Herrenschmidt 		goto out;
1063bc5ad3f3SBenjamin Herrenschmidt 
1064bc5ad3f3SBenjamin Herrenschmidt 	/* Create the ICS */
1065bc5ad3f3SBenjamin Herrenschmidt 	ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
1066bc5ad3f3SBenjamin Herrenschmidt 	if (!ics)
1067bc5ad3f3SBenjamin Herrenschmidt 		goto out;
1068bc5ad3f3SBenjamin Herrenschmidt 
1069bc5ad3f3SBenjamin Herrenschmidt 	ics->icsid = icsid;
1070bc5ad3f3SBenjamin Herrenschmidt 
1071bc5ad3f3SBenjamin Herrenschmidt 	for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1072bc5ad3f3SBenjamin Herrenschmidt 		ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
1073bc5ad3f3SBenjamin Herrenschmidt 		ics->irq_state[i].priority = MASKED;
1074bc5ad3f3SBenjamin Herrenschmidt 		ics->irq_state[i].saved_priority = MASKED;
1075bc5ad3f3SBenjamin Herrenschmidt 	}
1076bc5ad3f3SBenjamin Herrenschmidt 	smp_wmb();
1077bc5ad3f3SBenjamin Herrenschmidt 	xics->ics[icsid] = ics;
1078bc5ad3f3SBenjamin Herrenschmidt 
1079bc5ad3f3SBenjamin Herrenschmidt 	if (icsid > xics->max_icsid)
1080bc5ad3f3SBenjamin Herrenschmidt 		xics->max_icsid = icsid;
1081bc5ad3f3SBenjamin Herrenschmidt 
1082bc5ad3f3SBenjamin Herrenschmidt  out:
1083bc5ad3f3SBenjamin Herrenschmidt 	mutex_unlock(&kvm->lock);
1084bc5ad3f3SBenjamin Herrenschmidt 	return xics->ics[icsid];
1085bc5ad3f3SBenjamin Herrenschmidt }
1086bc5ad3f3SBenjamin Herrenschmidt 
1087936774cdSBenjamin Herrenschmidt static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
1088bc5ad3f3SBenjamin Herrenschmidt {
1089bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp;
1090bc5ad3f3SBenjamin Herrenschmidt 
1091bc5ad3f3SBenjamin Herrenschmidt 	if (!vcpu->kvm->arch.xics)
1092bc5ad3f3SBenjamin Herrenschmidt 		return -ENODEV;
1093bc5ad3f3SBenjamin Herrenschmidt 
1094bc5ad3f3SBenjamin Herrenschmidt 	if (kvmppc_xics_find_server(vcpu->kvm, server_num))
1095bc5ad3f3SBenjamin Herrenschmidt 		return -EEXIST;
1096bc5ad3f3SBenjamin Herrenschmidt 
1097bc5ad3f3SBenjamin Herrenschmidt 	icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
1098bc5ad3f3SBenjamin Herrenschmidt 	if (!icp)
1099bc5ad3f3SBenjamin Herrenschmidt 		return -ENOMEM;
1100bc5ad3f3SBenjamin Herrenschmidt 
1101bc5ad3f3SBenjamin Herrenschmidt 	icp->vcpu = vcpu;
1102bc5ad3f3SBenjamin Herrenschmidt 	icp->server_num = server_num;
1103bc5ad3f3SBenjamin Herrenschmidt 	icp->state.mfrr = MASKED;
1104bc5ad3f3SBenjamin Herrenschmidt 	icp->state.pending_pri = MASKED;
1105bc5ad3f3SBenjamin Herrenschmidt 	vcpu->arch.icp = icp;
1106bc5ad3f3SBenjamin Herrenschmidt 
1107bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
1108bc5ad3f3SBenjamin Herrenschmidt 
1109bc5ad3f3SBenjamin Herrenschmidt 	return 0;
1110bc5ad3f3SBenjamin Herrenschmidt }
1111bc5ad3f3SBenjamin Herrenschmidt 
11128b78645cSPaul Mackerras u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
11138b78645cSPaul Mackerras {
11148b78645cSPaul Mackerras 	struct kvmppc_icp *icp = vcpu->arch.icp;
11158b78645cSPaul Mackerras 	union kvmppc_icp_state state;
11168b78645cSPaul Mackerras 
11178b78645cSPaul Mackerras 	if (!icp)
11188b78645cSPaul Mackerras 		return 0;
11198b78645cSPaul Mackerras 	state = icp->state;
11208b78645cSPaul Mackerras 	return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
11218b78645cSPaul Mackerras 		((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
11228b78645cSPaul Mackerras 		((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
11238b78645cSPaul Mackerras 		((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
11248b78645cSPaul Mackerras }
11258b78645cSPaul Mackerras 
11268b78645cSPaul Mackerras int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
11278b78645cSPaul Mackerras {
11288b78645cSPaul Mackerras 	struct kvmppc_icp *icp = vcpu->arch.icp;
11298b78645cSPaul Mackerras 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
11308b78645cSPaul Mackerras 	union kvmppc_icp_state old_state, new_state;
11318b78645cSPaul Mackerras 	struct kvmppc_ics *ics;
11328b78645cSPaul Mackerras 	u8 cppr, mfrr, pending_pri;
11338b78645cSPaul Mackerras 	u32 xisr;
11348b78645cSPaul Mackerras 	u16 src;
11358b78645cSPaul Mackerras 	bool resend;
11368b78645cSPaul Mackerras 
11378b78645cSPaul Mackerras 	if (!icp || !xics)
11388b78645cSPaul Mackerras 		return -ENOENT;
11398b78645cSPaul Mackerras 
11408b78645cSPaul Mackerras 	cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
11418b78645cSPaul Mackerras 	xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
11428b78645cSPaul Mackerras 		KVM_REG_PPC_ICP_XISR_MASK;
11438b78645cSPaul Mackerras 	mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
11448b78645cSPaul Mackerras 	pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
11458b78645cSPaul Mackerras 
11468b78645cSPaul Mackerras 	/* Require the new state to be internally consistent */
11478b78645cSPaul Mackerras 	if (xisr == 0) {
11488b78645cSPaul Mackerras 		if (pending_pri != 0xff)
11498b78645cSPaul Mackerras 			return -EINVAL;
11508b78645cSPaul Mackerras 	} else if (xisr == XICS_IPI) {
11518b78645cSPaul Mackerras 		if (pending_pri != mfrr || pending_pri >= cppr)
11528b78645cSPaul Mackerras 			return -EINVAL;
11538b78645cSPaul Mackerras 	} else {
11548b78645cSPaul Mackerras 		if (pending_pri >= mfrr || pending_pri >= cppr)
11558b78645cSPaul Mackerras 			return -EINVAL;
11568b78645cSPaul Mackerras 		ics = kvmppc_xics_find_ics(xics, xisr, &src);
11578b78645cSPaul Mackerras 		if (!ics)
11588b78645cSPaul Mackerras 			return -EINVAL;
11598b78645cSPaul Mackerras 	}
11608b78645cSPaul Mackerras 
11618b78645cSPaul Mackerras 	new_state.raw = 0;
11628b78645cSPaul Mackerras 	new_state.cppr = cppr;
11638b78645cSPaul Mackerras 	new_state.xisr = xisr;
11648b78645cSPaul Mackerras 	new_state.mfrr = mfrr;
11658b78645cSPaul Mackerras 	new_state.pending_pri = pending_pri;
11668b78645cSPaul Mackerras 
11678b78645cSPaul Mackerras 	/*
11688b78645cSPaul Mackerras 	 * Deassert the CPU interrupt request.
11698b78645cSPaul Mackerras 	 * icp_try_update will reassert it if necessary.
11708b78645cSPaul Mackerras 	 */
11718b78645cSPaul Mackerras 	kvmppc_book3s_dequeue_irqprio(icp->vcpu,
11728b78645cSPaul Mackerras 				      BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
11738b78645cSPaul Mackerras 
11748b78645cSPaul Mackerras 	/*
11758b78645cSPaul Mackerras 	 * Note that if we displace an interrupt from old_state.xisr,
11768b78645cSPaul Mackerras 	 * we don't mark it as rejected.  We expect userspace to set
11778b78645cSPaul Mackerras 	 * the state of the interrupt sources to be consistent with
11788b78645cSPaul Mackerras 	 * the ICP states (either before or afterwards, which doesn't
11798b78645cSPaul Mackerras 	 * matter).  We do handle resends due to CPPR becoming less
11808b78645cSPaul Mackerras 	 * favoured because that is necessary to end up with a
11818b78645cSPaul Mackerras 	 * consistent state in the situation where userspace restores
11828b78645cSPaul Mackerras 	 * the ICS states before the ICP states.
11838b78645cSPaul Mackerras 	 */
11848b78645cSPaul Mackerras 	do {
11855ee07612SChristian Borntraeger 		old_state = READ_ONCE(icp->state);
11868b78645cSPaul Mackerras 
11878b78645cSPaul Mackerras 		if (new_state.mfrr <= old_state.mfrr) {
11888b78645cSPaul Mackerras 			resend = false;
11898b78645cSPaul Mackerras 			new_state.need_resend = old_state.need_resend;
11908b78645cSPaul Mackerras 		} else {
11918b78645cSPaul Mackerras 			resend = old_state.need_resend;
11928b78645cSPaul Mackerras 			new_state.need_resend = 0;
11938b78645cSPaul Mackerras 		}
11948b78645cSPaul Mackerras 	} while (!icp_try_update(icp, old_state, new_state, false));
11958b78645cSPaul Mackerras 
11968b78645cSPaul Mackerras 	if (resend)
11978b78645cSPaul Mackerras 		icp_check_resend(xics, icp);
11988b78645cSPaul Mackerras 
11998b78645cSPaul Mackerras 	return 0;
12008b78645cSPaul Mackerras }
12018b78645cSPaul Mackerras 
12025975a2e0SPaul Mackerras static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
1203bc5ad3f3SBenjamin Herrenschmidt {
12045975a2e0SPaul Mackerras 	int ret;
12055975a2e0SPaul Mackerras 	struct kvmppc_ics *ics;
12065975a2e0SPaul Mackerras 	struct ics_irq_state *irqp;
12075975a2e0SPaul Mackerras 	u64 __user *ubufp = (u64 __user *) addr;
12085975a2e0SPaul Mackerras 	u16 idx;
12095975a2e0SPaul Mackerras 	u64 val, prio;
121034cb7954SSuresh Warrier 	unsigned long flags;
1211bc5ad3f3SBenjamin Herrenschmidt 
12125975a2e0SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
12135975a2e0SPaul Mackerras 	if (!ics)
12145975a2e0SPaul Mackerras 		return -ENOENT;
1215bc5ad3f3SBenjamin Herrenschmidt 
12165975a2e0SPaul Mackerras 	irqp = &ics->irq_state[idx];
121734cb7954SSuresh Warrier 	local_irq_save(flags);
121834cb7954SSuresh Warrier 	arch_spin_lock(&ics->lock);
12195975a2e0SPaul Mackerras 	ret = -ENOENT;
12205975a2e0SPaul Mackerras 	if (irqp->exists) {
12215975a2e0SPaul Mackerras 		val = irqp->server;
12225975a2e0SPaul Mackerras 		prio = irqp->priority;
12235975a2e0SPaul Mackerras 		if (prio == MASKED) {
12245975a2e0SPaul Mackerras 			val |= KVM_XICS_MASKED;
12255975a2e0SPaul Mackerras 			prio = irqp->saved_priority;
12265975a2e0SPaul Mackerras 		}
12275975a2e0SPaul Mackerras 		val |= prio << KVM_XICS_PRIORITY_SHIFT;
1228b1a4286bSPaul Mackerras 		if (irqp->lsi) {
1229b1a4286bSPaul Mackerras 			val |= KVM_XICS_LEVEL_SENSITIVE;
123017d48610SLi Zhong 			if (irqp->pq_state & PQ_PRESENTED)
1231b1a4286bSPaul Mackerras 				val |= KVM_XICS_PENDING;
1232b1a4286bSPaul Mackerras 		} else if (irqp->masked_pending || irqp->resend)
12335975a2e0SPaul Mackerras 			val |= KVM_XICS_PENDING;
123417d48610SLi Zhong 
123517d48610SLi Zhong 		if (irqp->pq_state & PQ_PRESENTED)
123617d48610SLi Zhong 			val |= KVM_XICS_PRESENTED;
123717d48610SLi Zhong 
123817d48610SLi Zhong 		if (irqp->pq_state & PQ_QUEUED)
123917d48610SLi Zhong 			val |= KVM_XICS_QUEUED;
124017d48610SLi Zhong 
12415975a2e0SPaul Mackerras 		ret = 0;
12425975a2e0SPaul Mackerras 	}
124334cb7954SSuresh Warrier 	arch_spin_unlock(&ics->lock);
124434cb7954SSuresh Warrier 	local_irq_restore(flags);
1245bc5ad3f3SBenjamin Herrenschmidt 
12465975a2e0SPaul Mackerras 	if (!ret && put_user(val, ubufp))
12475975a2e0SPaul Mackerras 		ret = -EFAULT;
12485975a2e0SPaul Mackerras 
12495975a2e0SPaul Mackerras 	return ret;
12505975a2e0SPaul Mackerras }
12515975a2e0SPaul Mackerras 
12525975a2e0SPaul Mackerras static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
12535975a2e0SPaul Mackerras {
12545975a2e0SPaul Mackerras 	struct kvmppc_ics *ics;
12555975a2e0SPaul Mackerras 	struct ics_irq_state *irqp;
12565975a2e0SPaul Mackerras 	u64 __user *ubufp = (u64 __user *) addr;
12575975a2e0SPaul Mackerras 	u16 idx;
12585975a2e0SPaul Mackerras 	u64 val;
12595975a2e0SPaul Mackerras 	u8 prio;
12605975a2e0SPaul Mackerras 	u32 server;
126134cb7954SSuresh Warrier 	unsigned long flags;
12625975a2e0SPaul Mackerras 
12635975a2e0SPaul Mackerras 	if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
12645975a2e0SPaul Mackerras 		return -ENOENT;
12655975a2e0SPaul Mackerras 
12665975a2e0SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
12675975a2e0SPaul Mackerras 	if (!ics) {
12685975a2e0SPaul Mackerras 		ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
12695975a2e0SPaul Mackerras 		if (!ics)
12705975a2e0SPaul Mackerras 			return -ENOMEM;
12715975a2e0SPaul Mackerras 	}
12725975a2e0SPaul Mackerras 	irqp = &ics->irq_state[idx];
12735975a2e0SPaul Mackerras 	if (get_user(val, ubufp))
12745975a2e0SPaul Mackerras 		return -EFAULT;
12755975a2e0SPaul Mackerras 
12765975a2e0SPaul Mackerras 	server = val & KVM_XICS_DESTINATION_MASK;
12775975a2e0SPaul Mackerras 	prio = val >> KVM_XICS_PRIORITY_SHIFT;
12785975a2e0SPaul Mackerras 	if (prio != MASKED &&
12795975a2e0SPaul Mackerras 	    kvmppc_xics_find_server(xics->kvm, server) == NULL)
12805975a2e0SPaul Mackerras 		return -EINVAL;
12815975a2e0SPaul Mackerras 
128234cb7954SSuresh Warrier 	local_irq_save(flags);
128334cb7954SSuresh Warrier 	arch_spin_lock(&ics->lock);
12845975a2e0SPaul Mackerras 	irqp->server = server;
12855975a2e0SPaul Mackerras 	irqp->saved_priority = prio;
12865975a2e0SPaul Mackerras 	if (val & KVM_XICS_MASKED)
12875975a2e0SPaul Mackerras 		prio = MASKED;
12885975a2e0SPaul Mackerras 	irqp->priority = prio;
12895975a2e0SPaul Mackerras 	irqp->resend = 0;
12905975a2e0SPaul Mackerras 	irqp->masked_pending = 0;
1291b1a4286bSPaul Mackerras 	irqp->lsi = 0;
129217d48610SLi Zhong 	irqp->pq_state = 0;
129317d48610SLi Zhong 	if (val & KVM_XICS_LEVEL_SENSITIVE)
1294b1a4286bSPaul Mackerras 		irqp->lsi = 1;
129517d48610SLi Zhong 	/* If PENDING, set P in case P is not saved because of old code */
129617d48610SLi Zhong 	if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
129717d48610SLi Zhong 		irqp->pq_state |= PQ_PRESENTED;
129817d48610SLi Zhong 	if (val & KVM_XICS_QUEUED)
129917d48610SLi Zhong 		irqp->pq_state |= PQ_QUEUED;
13005975a2e0SPaul Mackerras 	irqp->exists = 1;
130134cb7954SSuresh Warrier 	arch_spin_unlock(&ics->lock);
130234cb7954SSuresh Warrier 	local_irq_restore(flags);
13035975a2e0SPaul Mackerras 
13045975a2e0SPaul Mackerras 	if (val & KVM_XICS_PENDING)
130521acd0e4SLi Zhong 		icp_deliver_irq(xics, NULL, irqp->number, false);
13065975a2e0SPaul Mackerras 
13075975a2e0SPaul Mackerras 	return 0;
13085975a2e0SPaul Mackerras }
13095975a2e0SPaul Mackerras 
1310*5af50993SBenjamin Herrenschmidt int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
13115975a2e0SPaul Mackerras 			bool line_status)
13125975a2e0SPaul Mackerras {
13135975a2e0SPaul Mackerras 	struct kvmppc_xics *xics = kvm->arch.xics;
13145975a2e0SPaul Mackerras 
1315e48ba1cbSPaul Mackerras 	if (!xics)
1316e48ba1cbSPaul Mackerras 		return -ENODEV;
131725a2150bSPaul Mackerras 	return ics_deliver_irq(xics, irq, level);
131825a2150bSPaul Mackerras }
131925a2150bSPaul Mackerras 
13205975a2e0SPaul Mackerras static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
13215975a2e0SPaul Mackerras {
13225975a2e0SPaul Mackerras 	struct kvmppc_xics *xics = dev->private;
13235975a2e0SPaul Mackerras 
13245975a2e0SPaul Mackerras 	switch (attr->group) {
13255975a2e0SPaul Mackerras 	case KVM_DEV_XICS_GRP_SOURCES:
13265975a2e0SPaul Mackerras 		return xics_set_source(xics, attr->attr, attr->addr);
13275975a2e0SPaul Mackerras 	}
13285975a2e0SPaul Mackerras 	return -ENXIO;
13295975a2e0SPaul Mackerras }
13305975a2e0SPaul Mackerras 
13315975a2e0SPaul Mackerras static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
13325975a2e0SPaul Mackerras {
13335975a2e0SPaul Mackerras 	struct kvmppc_xics *xics = dev->private;
13345975a2e0SPaul Mackerras 
13355975a2e0SPaul Mackerras 	switch (attr->group) {
13365975a2e0SPaul Mackerras 	case KVM_DEV_XICS_GRP_SOURCES:
13375975a2e0SPaul Mackerras 		return xics_get_source(xics, attr->attr, attr->addr);
13385975a2e0SPaul Mackerras 	}
13395975a2e0SPaul Mackerras 	return -ENXIO;
13405975a2e0SPaul Mackerras }
13415975a2e0SPaul Mackerras 
13425975a2e0SPaul Mackerras static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
13435975a2e0SPaul Mackerras {
13445975a2e0SPaul Mackerras 	switch (attr->group) {
13455975a2e0SPaul Mackerras 	case KVM_DEV_XICS_GRP_SOURCES:
13465975a2e0SPaul Mackerras 		if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
13475975a2e0SPaul Mackerras 		    attr->attr < KVMPPC_XICS_NR_IRQS)
13485975a2e0SPaul Mackerras 			return 0;
1349bc5ad3f3SBenjamin Herrenschmidt 		break;
13505975a2e0SPaul Mackerras 	}
13515975a2e0SPaul Mackerras 	return -ENXIO;
1352bc5ad3f3SBenjamin Herrenschmidt }
1353bc5ad3f3SBenjamin Herrenschmidt 
13545975a2e0SPaul Mackerras static void kvmppc_xics_free(struct kvm_device *dev)
1355bc5ad3f3SBenjamin Herrenschmidt {
13565975a2e0SPaul Mackerras 	struct kvmppc_xics *xics = dev->private;
1357bc5ad3f3SBenjamin Herrenschmidt 	int i;
1358bc5ad3f3SBenjamin Herrenschmidt 	struct kvm *kvm = xics->kvm;
1359bc5ad3f3SBenjamin Herrenschmidt 
1360bc5ad3f3SBenjamin Herrenschmidt 	debugfs_remove(xics->dentry);
1361bc5ad3f3SBenjamin Herrenschmidt 
1362bc5ad3f3SBenjamin Herrenschmidt 	if (kvm)
1363bc5ad3f3SBenjamin Herrenschmidt 		kvm->arch.xics = NULL;
1364bc5ad3f3SBenjamin Herrenschmidt 
1365bc5ad3f3SBenjamin Herrenschmidt 	for (i = 0; i <= xics->max_icsid; i++)
1366bc5ad3f3SBenjamin Herrenschmidt 		kfree(xics->ics[i]);
1367bc5ad3f3SBenjamin Herrenschmidt 	kfree(xics);
13685975a2e0SPaul Mackerras 	kfree(dev);
1369bc5ad3f3SBenjamin Herrenschmidt }
1370bc5ad3f3SBenjamin Herrenschmidt 
13715975a2e0SPaul Mackerras static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1372bc5ad3f3SBenjamin Herrenschmidt {
1373bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics;
13745975a2e0SPaul Mackerras 	struct kvm *kvm = dev->kvm;
1375bc5ad3f3SBenjamin Herrenschmidt 	int ret = 0;
1376bc5ad3f3SBenjamin Herrenschmidt 
1377bc5ad3f3SBenjamin Herrenschmidt 	xics = kzalloc(sizeof(*xics), GFP_KERNEL);
1378bc5ad3f3SBenjamin Herrenschmidt 	if (!xics)
1379bc5ad3f3SBenjamin Herrenschmidt 		return -ENOMEM;
1380bc5ad3f3SBenjamin Herrenschmidt 
13815975a2e0SPaul Mackerras 	dev->private = xics;
13825975a2e0SPaul Mackerras 	xics->dev = dev;
1383bc5ad3f3SBenjamin Herrenschmidt 	xics->kvm = kvm;
1384bc5ad3f3SBenjamin Herrenschmidt 
1385bc5ad3f3SBenjamin Herrenschmidt 	/* Already there ? */
1386bc5ad3f3SBenjamin Herrenschmidt 	if (kvm->arch.xics)
1387bc5ad3f3SBenjamin Herrenschmidt 		ret = -EEXIST;
1388bc5ad3f3SBenjamin Herrenschmidt 	else
1389bc5ad3f3SBenjamin Herrenschmidt 		kvm->arch.xics = xics;
1390bc5ad3f3SBenjamin Herrenschmidt 
1391458ff3c0SGleb Natapov 	if (ret) {
1392458ff3c0SGleb Natapov 		kfree(xics);
1393bc5ad3f3SBenjamin Herrenschmidt 		return ret;
1394458ff3c0SGleb Natapov 	}
1395bc5ad3f3SBenjamin Herrenschmidt 
13963a167beaSAneesh Kumar K.V #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1397e7d26f28SBenjamin Herrenschmidt 	if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1398e7d26f28SBenjamin Herrenschmidt 		/* Enable real mode support */
1399e7d26f28SBenjamin Herrenschmidt 		xics->real_mode = ENABLE_REALMODE;
1400e7d26f28SBenjamin Herrenschmidt 		xics->real_mode_dbg = DEBUG_REALMODE;
1401e7d26f28SBenjamin Herrenschmidt 	}
14023a167beaSAneesh Kumar K.V #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1403e7d26f28SBenjamin Herrenschmidt 
1404bc5ad3f3SBenjamin Herrenschmidt 	return 0;
1405bc5ad3f3SBenjamin Herrenschmidt }
1406bc5ad3f3SBenjamin Herrenschmidt 
1407023e9fddSChristoffer Dall static void kvmppc_xics_init(struct kvm_device *dev)
1408023e9fddSChristoffer Dall {
1409023e9fddSChristoffer Dall 	struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
1410023e9fddSChristoffer Dall 
1411023e9fddSChristoffer Dall 	xics_debugfs_init(xics);
1412023e9fddSChristoffer Dall }
1413023e9fddSChristoffer Dall 
14145975a2e0SPaul Mackerras struct kvm_device_ops kvm_xics_ops = {
14155975a2e0SPaul Mackerras 	.name = "kvm-xics",
14165975a2e0SPaul Mackerras 	.create = kvmppc_xics_create,
1417023e9fddSChristoffer Dall 	.init = kvmppc_xics_init,
14185975a2e0SPaul Mackerras 	.destroy = kvmppc_xics_free,
14195975a2e0SPaul Mackerras 	.set_attr = xics_set_attr,
14205975a2e0SPaul Mackerras 	.get_attr = xics_get_attr,
14215975a2e0SPaul Mackerras 	.has_attr = xics_has_attr,
14225975a2e0SPaul Mackerras };
14235975a2e0SPaul Mackerras 
14245975a2e0SPaul Mackerras int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
14255975a2e0SPaul Mackerras 			     u32 xcpu)
14265975a2e0SPaul Mackerras {
14275975a2e0SPaul Mackerras 	struct kvmppc_xics *xics = dev->private;
14285975a2e0SPaul Mackerras 	int r = -EBUSY;
14295975a2e0SPaul Mackerras 
14305975a2e0SPaul Mackerras 	if (dev->ops != &kvm_xics_ops)
14315975a2e0SPaul Mackerras 		return -EPERM;
14325975a2e0SPaul Mackerras 	if (xics->kvm != vcpu->kvm)
14335975a2e0SPaul Mackerras 		return -EPERM;
14345975a2e0SPaul Mackerras 	if (vcpu->arch.irq_type)
14355975a2e0SPaul Mackerras 		return -EBUSY;
14365975a2e0SPaul Mackerras 
14375975a2e0SPaul Mackerras 	r = kvmppc_xics_create_icp(vcpu, xcpu);
14385975a2e0SPaul Mackerras 	if (!r)
14395975a2e0SPaul Mackerras 		vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
14405975a2e0SPaul Mackerras 
14415975a2e0SPaul Mackerras 	return r;
14425975a2e0SPaul Mackerras }
14435975a2e0SPaul Mackerras 
1444bc5ad3f3SBenjamin Herrenschmidt void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1445bc5ad3f3SBenjamin Herrenschmidt {
1446bc5ad3f3SBenjamin Herrenschmidt 	if (!vcpu->arch.icp)
1447bc5ad3f3SBenjamin Herrenschmidt 		return;
1448bc5ad3f3SBenjamin Herrenschmidt 	kfree(vcpu->arch.icp);
1449bc5ad3f3SBenjamin Herrenschmidt 	vcpu->arch.icp = NULL;
1450bc5ad3f3SBenjamin Herrenschmidt 	vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1451bc5ad3f3SBenjamin Herrenschmidt }
145225a2150bSPaul Mackerras 
14535d375199SPaul Mackerras void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
14545d375199SPaul Mackerras 			    unsigned long host_irq)
14555d375199SPaul Mackerras {
14565d375199SPaul Mackerras 	struct kvmppc_xics *xics = kvm->arch.xics;
14575d375199SPaul Mackerras 	struct kvmppc_ics *ics;
14585d375199SPaul Mackerras 	u16 idx;
14595d375199SPaul Mackerras 
14605d375199SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
14615d375199SPaul Mackerras 	if (!ics)
14625d375199SPaul Mackerras 		return;
14635d375199SPaul Mackerras 
14645d375199SPaul Mackerras 	ics->irq_state[idx].host_irq = host_irq;
14655d375199SPaul Mackerras 	ics->irq_state[idx].intr_cpu = -1;
14665d375199SPaul Mackerras }
14675d375199SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
14685d375199SPaul Mackerras 
14695d375199SPaul Mackerras void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
14705d375199SPaul Mackerras 			    unsigned long host_irq)
14715d375199SPaul Mackerras {
14725d375199SPaul Mackerras 	struct kvmppc_xics *xics = kvm->arch.xics;
14735d375199SPaul Mackerras 	struct kvmppc_ics *ics;
14745d375199SPaul Mackerras 	u16 idx;
14755d375199SPaul Mackerras 
14765d375199SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
14775d375199SPaul Mackerras 	if (!ics)
14785d375199SPaul Mackerras 		return;
14795d375199SPaul Mackerras 
14805d375199SPaul Mackerras 	ics->irq_state[idx].host_irq = 0;
14815d375199SPaul Mackerras }
14825d375199SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);
1483