xref: /kvm-unit-tests/lib/arm/gic-v3.c (revision 5b70cbdb7bc2ea65096b51565c75815cc95945b8)
1 /*
2  * Copyright (C) 2016, Red Hat Inc, Andrew Jones <drjones@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU LGPL, version 2.
5  */
6 #include <asm/gic.h>
7 #include <asm/io.h>
8 #include <alloc_page.h>
9 
10 void gicv3_set_redist_base(size_t stride)
11 {
12 	u32 aff = mpidr_compress(get_mpidr());
13 	u64 typer;
14 	int i = 0;
15 
16 	while (gicv3_data.redist_bases[i]) {
17 		void *ptr = gicv3_data.redist_bases[i];
18 		do {
19 			typer = gicv3_read_typer(ptr + GICR_TYPER);
20 			if ((typer >> 32) == aff) {
21 				gicv3_redist_base() = ptr;
22 				return;
23 			}
24 			ptr += stride; /* skip RD_base, SGI_base, etc. */
25 		} while (!(typer & GICR_TYPER_LAST));
26 		++i;
27 	}
28 
29 	/* should never reach here */
30 	assert(0);
31 }
32 
33 void gicv3_enable_defaults(void)
34 {
35 	void *dist = gicv3_dist_base();
36 	void *sgi_base;
37 	unsigned int i;
38 
39 	gicv3_data.irq_nr = GICD_TYPER_IRQS(readl(dist + GICD_TYPER));
40 	if (gicv3_data.irq_nr > 1020)
41 		gicv3_data.irq_nr = 1020;
42 
43 	writel(0, dist + GICD_CTLR);
44 	gicv3_dist_wait_for_rwp();
45 
46 	writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
47 	       dist + GICD_CTLR);
48 	gicv3_dist_wait_for_rwp();
49 
50 	for (i = 0; i < gicv3_data.irq_nr; i += 4)
51 		writel(~0, dist + GICD_IGROUPR + i);
52 
53 	if (!gicv3_redist_base())
54 		gicv3_set_redist_base(SZ_64K * 2);
55 	sgi_base = gicv3_sgi_base();
56 
57 	writel(~0, sgi_base + GICR_IGROUPR0);
58 
59 	for (i = 0; i < 16; i += 4)
60 		writel(GICD_INT_DEF_PRI_X4, sgi_base + GICR_IPRIORITYR0 + i);
61 
62 	writel(GICD_INT_EN_SET_SGI, sgi_base + GICR_ISENABLER0);
63 
64 	gicv3_write_pmr(GICC_INT_PRI_THRESHOLD);
65 	gicv3_write_grpen1(1);
66 }
67 
68 u32 gicv3_iar_irqnr(u32 iar)
69 {
70 	return iar & ((1 << 24) - 1);
71 }
72 
73 void gicv3_ipi_send_mask(int irq, const cpumask_t *dest)
74 {
75 	u16 tlist;
76 	int cpu;
77 
78 	assert(irq < 16);
79 
80 	/*
81 	 * For each cpu in the mask collect its peers, which are also in
82 	 * the mask, in order to form target lists.
83 	 */
84 	for_each_cpu(cpu, dest) {
85 		u64 mpidr = cpus[cpu], sgi1r;
86 		u64 cluster_id;
87 
88 		/*
89 		 * GICv3 can send IPIs to up 16 peer cpus with a single
90 		 * write to ICC_SGI1R_EL1 (using the target list). Peers
91 		 * are cpus that have nearly identical MPIDRs, the only
92 		 * difference being Aff0. The matching upper affinity
93 		 * levels form the cluster ID.
94 		 */
95 		cluster_id = mpidr & ~0xffUL;
96 		tlist = 0;
97 
98 		/*
99 		 * Sort of open code for_each_cpu in order to have a
100 		 * nested for_each_cpu loop.
101 		 */
102 		while (cpu < nr_cpus) {
103 			if ((mpidr & 0xff) >= 16) {
104 				printf("cpu%d MPIDR:aff0 is %d (>= 16)!\n",
105 					cpu, (int)(mpidr & 0xff));
106 				break;
107 			}
108 
109 			tlist |= 1 << (mpidr & 0xf);
110 
111 			cpu = cpumask_next(cpu, dest);
112 			if (cpu >= nr_cpus)
113 				break;
114 
115 			mpidr = cpus[cpu];
116 
117 			if (cluster_id != (mpidr & ~0xffUL)) {
118 				/*
119 				 * The next cpu isn't in our cluster. Roll
120 				 * back the cpu index allowing the outer
121 				 * for_each_cpu to find it again with
122 				 * cpumask_next
123 				 */
124 				--cpu;
125 				break;
126 			}
127 		}
128 
129 		/* Send the IPIs for the target list of this cluster */
130 		sgi1r = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)	|
131 			 MPIDR_TO_SGI_AFFINITY(cluster_id, 2)	|
132 			 irq << 24				|
133 			 MPIDR_TO_SGI_AFFINITY(cluster_id, 1)	|
134 			 tlist);
135 
136 		gicv3_write_sgi1r(sgi1r);
137 	}
138 
139 	/* Force the above writes to ICC_SGI1R_EL1 to be executed */
140 	isb();
141 }
142 
143 void gicv3_ipi_send_single(int irq, int cpu)
144 {
145 	cpumask_t dest;
146 
147 	cpumask_clear(&dest);
148 	cpumask_set_cpu(cpu, &dest);
149 	gicv3_ipi_send_mask(irq, &dest);
150 }
151 
152 #if defined(__aarch64__)
153 
154 /*
155  * alloc_lpi_tables - Allocate LPI config and pending tables
156  * and set PROPBASER (shared by all rdistributors) and per
157  * redistributor PENDBASER.
158  *
159  * gicv3_set_redist_base() must be called before
160  */
161 void gicv3_lpi_alloc_tables(void)
162 {
163 	unsigned long n = SZ_64K >> PAGE_SHIFT;
164 	unsigned long order = fls(n);
165 	u64 prop_val;
166 	int cpu;
167 
168 	assert(gicv3_redist_base());
169 
170 	gicv3_data.lpi_prop = alloc_pages(order);
171 
172 	/* ID bits = 13, ie. up to 14b LPI INTID */
173 	prop_val = (u64)(virt_to_phys(gicv3_data.lpi_prop)) | 13;
174 
175 	for_each_present_cpu(cpu) {
176 		u64 pend_val;
177 		void *ptr;
178 
179 		ptr = gicv3_data.redist_base[cpu];
180 
181 		writeq(prop_val, ptr + GICR_PROPBASER);
182 
183 		gicv3_data.lpi_pend[cpu] = alloc_pages(order);
184 		pend_val = (u64)(virt_to_phys(gicv3_data.lpi_pend[cpu]));
185 		writeq(pend_val, ptr + GICR_PENDBASER);
186 	}
187 }
188 
189 void gicv3_lpi_set_clr_pending(int rdist, int n, bool set)
190 {
191 	u8 *ptr = gicv3_data.lpi_pend[rdist];
192 	u8 mask = 1 << (n % 8), byte;
193 
194 	ptr += (n / 8);
195 	byte = *ptr;
196 	if (set)
197 		byte |=  mask;
198 	else
199 		byte &= ~mask;
200 	*ptr = byte;
201 }
202 
203 static void gicv3_lpi_rdist_ctrl(u32 redist, bool set)
204 {
205 	void *ptr;
206 	u64 val;
207 
208 	assert(redist < nr_cpus);
209 
210 	ptr = gicv3_data.redist_base[redist];
211 	val = readl(ptr + GICR_CTLR);
212 	if (set)
213 		val |= GICR_CTLR_ENABLE_LPIS;
214 	else
215 		val &= ~GICR_CTLR_ENABLE_LPIS;
216 	writel(val,  ptr + GICR_CTLR);
217 }
218 
219 void gicv3_lpi_rdist_enable(int redist)
220 {
221 	gicv3_lpi_rdist_ctrl(redist, true);
222 }
223 void gicv3_lpi_rdist_disable(int redist)
224 {
225 	gicv3_lpi_rdist_ctrl(redist, false);
226 }
227 #endif /* __aarch64__ */
228