1 /*
2 * QTest testcase for PowerNV 10 interrupt controller (xive2)
3 * - Test irq to hardware thread
4 * - Test 'Pull Thread Context to Odd Thread Reporting Line'
5 * - Test irq to hardware group
6 * - Test irq to hardware group going through backlog
7 * - Test irq to pool thread
8 *
9 * Copyright (c) 2024, IBM Corporation.
10 *
11 * SPDX-License-Identifier: GPL-2.0-or-later
12 */
13 #include "qemu/osdep.h"
14 #include "libqtest.h"
15
16 #include "pnv-xive2-common.h"
17 #include "hw/intc/pnv_xive2_regs.h"
18 #include "hw/ppc/xive_regs.h"
19 #include "hw/ppc/xive2_regs.h"
20
21 #define SMT 4 /* some tests will break if less than 4 */
22
23
set_table(QTestState * qts,uint64_t type,uint64_t addr)24 static void set_table(QTestState *qts, uint64_t type, uint64_t addr)
25 {
26 uint64_t vsd, size, log_size;
27
28 /*
29 * First, let's make sure that all the resources used fit in the
30 * given table.
31 */
32 switch (type) {
33 case VST_ESB:
34 size = MAX_IRQS / 4;
35 break;
36 case VST_EAS:
37 size = MAX_IRQS * 8;
38 break;
39 case VST_END:
40 size = MAX_ENDS * 32;
41 break;
42 case VST_NVP:
43 case VST_NVG:
44 case VST_NVC:
45 size = MAX_VPS * 32;
46 break;
47 case VST_SYNC:
48 size = 64 * 1024;
49 break;
50 default:
51 g_assert_not_reached();
52 }
53
54 g_assert_cmpuint(size, <=, XIVE_VST_SIZE);
55 log_size = ctzl(XIVE_VST_SIZE) - 12;
56
57 vsd = ((uint64_t) VSD_MODE_EXCLUSIVE) << 62 | addr | log_size;
58 pnv_xive_xscom_write(qts, X_VC_VSD_TABLE_ADDR, type << 48);
59 pnv_xive_xscom_write(qts, X_VC_VSD_TABLE_DATA, vsd);
60
61 if (type != VST_EAS && type != VST_IC && type != VST_ERQ) {
62 pnv_xive_xscom_write(qts, X_PC_VSD_TABLE_ADDR, type << 48);
63 pnv_xive_xscom_write(qts, X_PC_VSD_TABLE_DATA, vsd);
64 }
65 }
66
set_tima8(QTestState * qts,uint32_t pir,uint32_t offset,uint8_t b)67 static void set_tima8(QTestState *qts, uint32_t pir, uint32_t offset,
68 uint8_t b)
69 {
70 uint64_t ic_addr;
71
72 ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
73 qtest_writeb(qts, ic_addr + offset, b);
74 }
75
set_tima32(QTestState * qts,uint32_t pir,uint32_t offset,uint32_t l)76 static void set_tima32(QTestState *qts, uint32_t pir, uint32_t offset,
77 uint32_t l)
78 {
79 uint64_t ic_addr;
80
81 ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
82 qtest_writel(qts, ic_addr + offset, l);
83 }
84
get_tima8(QTestState * qts,uint32_t pir,uint32_t offset)85 static uint8_t get_tima8(QTestState *qts, uint32_t pir, uint32_t offset)
86 {
87 uint64_t ic_addr;
88
89 ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
90 return qtest_readb(qts, ic_addr + offset);
91 }
92
get_tima16(QTestState * qts,uint32_t pir,uint32_t offset)93 static uint16_t get_tima16(QTestState *qts, uint32_t pir, uint32_t offset)
94 {
95 uint64_t ic_addr;
96
97 ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
98 return qtest_readw(qts, ic_addr + offset);
99 }
100
get_tima32(QTestState * qts,uint32_t pir,uint32_t offset)101 static uint32_t get_tima32(QTestState *qts, uint32_t pir, uint32_t offset)
102 {
103 uint64_t ic_addr;
104
105 ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
106 return qtest_readl(qts, ic_addr + offset);
107 }
108
reset_pool_threads(QTestState * qts)109 static void reset_pool_threads(QTestState *qts)
110 {
111 uint8_t first_group = 0;
112 int i;
113
114 for (i = 0; i < SMT; i++) {
115 uint32_t nvp_idx = 0x100 + i;
116 set_nvp(qts, nvp_idx, first_group);
117 set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD0, 0x000000ff);
118 set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD1, 0);
119 set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | nvp_idx);
120 }
121 }
122
reset_hw_threads(QTestState * qts)123 static void reset_hw_threads(QTestState *qts)
124 {
125 uint8_t first_group = 0;
126 uint32_t w1 = 0x000000ff;
127 int i;
128
129 if (SMT >= 4) {
130 /* define 2 groups of 2, part of a bigger group of size 4 */
131 set_nvg(qts, 0x80, 0x02);
132 set_nvg(qts, 0x82, 0x02);
133 set_nvg(qts, 0x81, 0);
134 first_group = 0x01;
135 w1 = 0x000300ff;
136 }
137
138 for (i = 0; i < SMT; i++) {
139 set_nvp(qts, 0x80 + i, first_group);
140 set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0, 0x00ff00ff);
141 set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD1, w1);
142 set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD2, 0x80000000);
143 }
144 }
145
reset_state(QTestState * qts)146 static void reset_state(QTestState *qts)
147 {
148 size_t mem_used = XIVE_MEM_END - XIVE_MEM_START;
149
150 qtest_memset(qts, XIVE_MEM_START, 0, mem_used);
151 reset_hw_threads(qts);
152 reset_pool_threads(qts);
153 }
154
init_xive(QTestState * qts)155 static void init_xive(QTestState *qts)
156 {
157 uint64_t val1, val2, range;
158
159 /*
160 * We can take a few shortcuts here, as we know the default values
161 * used for xive initialization
162 */
163
164 /*
165 * Set the BARs.
166 * We reuse the same values used by firmware to ease debug.
167 */
168 pnv_xive_xscom_write(qts, X_CQ_IC_BAR, XIVE_IC_BAR);
169 pnv_xive_xscom_write(qts, X_CQ_TM_BAR, XIVE_TM_BAR);
170
171 /* ESB and NVPG use 2 pages per resource. The others only one page */
172 range = (MAX_IRQS << 17) >> 25;
173 val1 = XIVE_ESB_BAR | range;
174 pnv_xive_xscom_write(qts, X_CQ_ESB_BAR, val1);
175
176 range = (MAX_ENDS << 16) >> 25;
177 val1 = XIVE_END_BAR | range;
178 pnv_xive_xscom_write(qts, X_CQ_END_BAR, val1);
179
180 range = (MAX_VPS << 17) >> 25;
181 val1 = XIVE_NVPG_BAR | range;
182 pnv_xive_xscom_write(qts, X_CQ_NVPG_BAR, val1);
183
184 range = (MAX_VPS << 16) >> 25;
185 val1 = XIVE_NVC_BAR | range;
186 pnv_xive_xscom_write(qts, X_CQ_NVC_BAR, val1);
187
188 /*
189 * Enable hw threads.
190 * We check the value written. Useless with current
191 * implementation, but it validates the xscom read path and it's
192 * what the hardware procedure says
193 */
194 val1 = 0xF000000000000000ull; /* core 0, 4 threads */
195 pnv_xive_xscom_write(qts, X_TCTXT_EN0, val1);
196 val2 = pnv_xive_xscom_read(qts, X_TCTXT_EN0);
197 g_assert_cmphex(val1, ==, val2);
198
199 /* Memory tables */
200 set_table(qts, VST_ESB, XIVE_ESB_MEM);
201 set_table(qts, VST_EAS, XIVE_EAS_MEM);
202 set_table(qts, VST_END, XIVE_END_MEM);
203 set_table(qts, VST_NVP, XIVE_NVP_MEM);
204 set_table(qts, VST_NVG, XIVE_NVG_MEM);
205 set_table(qts, VST_NVC, XIVE_NVC_MEM);
206 set_table(qts, VST_SYNC, XIVE_SYNC_MEM);
207
208 reset_hw_threads(qts);
209 reset_pool_threads(qts);
210 }
211
test_hw_irq(QTestState * qts)212 static void test_hw_irq(QTestState *qts)
213 {
214 uint32_t irq = 2;
215 uint32_t irq_data = 0x600df00d;
216 uint32_t end_index = 5;
217 uint32_t target_pir = 1;
218 uint32_t target_nvp = 0x80 + target_pir;
219 uint8_t priority = 5;
220 uint32_t reg32;
221 uint16_t reg16;
222 uint8_t pq, nsr, cppr;
223
224 g_test_message("=========================================================");
225 g_test_message("Testing irq %d to hardware thread %d", irq, target_pir);
226
227 /* irq config */
228 set_eas(qts, irq, end_index, irq_data);
229 set_end(qts, end_index, target_nvp, priority, false /* group */);
230
231 /* enable and trigger irq */
232 get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
233 set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
234
235 /* check irq is raised on cpu */
236 pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
237 g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
238
239 reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
240 nsr = reg32 >> 24;
241 cppr = (reg32 >> 16) & 0xFF;
242 g_assert_cmphex(nsr, ==, 0x80);
243 g_assert_cmphex(cppr, ==, 0xFF);
244
245 /* ack the irq */
246 reg16 = get_tima16(qts, target_pir, TM_SPC_ACK_HV_REG);
247 nsr = reg16 >> 8;
248 cppr = reg16 & 0xFF;
249 g_assert_cmphex(nsr, ==, 0x80);
250 g_assert_cmphex(cppr, ==, priority);
251
252 /* check irq data is what was configured */
253 reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
254 g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
255
256 /* End Of Interrupt */
257 set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
258 pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
259 g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
260
261 /* reset CPPR */
262 set_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
263 reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
264 nsr = reg32 >> 24;
265 cppr = (reg32 >> 16) & 0xFF;
266 g_assert_cmphex(nsr, ==, 0x00);
267 g_assert_cmphex(cppr, ==, 0xFF);
268 }
269
test_pool_irq(QTestState * qts)270 static void test_pool_irq(QTestState *qts)
271 {
272 uint32_t irq = 2;
273 uint32_t irq_data = 0x600d0d06;
274 uint32_t end_index = 5;
275 uint32_t target_pir = 1;
276 uint32_t target_nvp = 0x100 + target_pir;
277 uint8_t priority = 5;
278 uint32_t reg32;
279 uint16_t reg16;
280 uint8_t pq, nsr, cppr, ipb;
281
282 g_test_message("=========================================================");
283 g_test_message("Testing irq %d to pool thread %d", irq, target_pir);
284
285 /* irq config */
286 set_eas(qts, irq, end_index, irq_data);
287 set_end(qts, end_index, target_nvp, priority, false /* group */);
288
289 /* enable and trigger irq */
290 get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
291 set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
292
293 /* check irq is raised on cpu */
294 pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
295 g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
296
297 /* check TIMA values in the PHYS ring (shared by POOL ring) */
298 reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
299 nsr = reg32 >> 24;
300 cppr = (reg32 >> 16) & 0xFF;
301 g_assert_cmphex(nsr, ==, 0x40);
302 g_assert_cmphex(cppr, ==, 0xFF);
303
304 /* check TIMA values in the POOL ring */
305 reg32 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD0);
306 nsr = reg32 >> 24;
307 cppr = (reg32 >> 16) & 0xFF;
308 ipb = (reg32 >> 8) & 0xFF;
309 g_assert_cmphex(nsr, ==, 0);
310 g_assert_cmphex(cppr, ==, 0);
311 g_assert_cmphex(ipb, ==, 0x80 >> priority);
312
313 /* ack the irq */
314 reg16 = get_tima16(qts, target_pir, TM_SPC_ACK_HV_REG);
315 nsr = reg16 >> 8;
316 cppr = reg16 & 0xFF;
317 g_assert_cmphex(nsr, ==, 0x40);
318 g_assert_cmphex(cppr, ==, priority);
319
320 /* check irq data is what was configured */
321 reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
322 g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
323
324 /* check IPB is cleared in the POOL ring */
325 reg32 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD0);
326 ipb = (reg32 >> 8) & 0xFF;
327 g_assert_cmphex(ipb, ==, 0);
328
329 /* End Of Interrupt */
330 set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
331 pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
332 g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
333
334 /* reset CPPR */
335 set_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
336 reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
337 nsr = reg32 >> 24;
338 cppr = (reg32 >> 16) & 0xFF;
339 g_assert_cmphex(nsr, ==, 0x00);
340 g_assert_cmphex(cppr, ==, 0xFF);
341 }
342
343 #define XIVE_ODD_CL 0x80
test_pull_thread_ctx_to_odd_thread_cl(QTestState * qts)344 static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts)
345 {
346 uint32_t target_pir = 1;
347 uint32_t target_nvp = 0x80 + target_pir;
348 Xive2Nvp nvp;
349 uint8_t cl_pair[XIVE_REPORT_SIZE];
350 uint32_t qw1w0, qw3w0, qw1w2, qw2w2;
351 uint8_t qw3b8;
352 uint32_t cl_word;
353 uint32_t word2;
354
355 g_test_message("=========================================================");
356 g_test_message("Testing 'Pull Thread Context to Odd Thread Reporting " \
357 "Line'");
358
359 /* clear odd cache line prior to pull operation */
360 memset(cl_pair, 0, sizeof(cl_pair));
361 get_nvp(qts, target_nvp, &nvp);
362 set_cl_pair(qts, &nvp, cl_pair);
363
364 /* Read some values from TIMA that we expect to see in cacheline */
365 qw1w0 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD0);
366 qw3w0 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
367 qw1w2 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD2);
368 qw2w2 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD2);
369 qw3b8 = get_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2);
370
371 /* Execute the pull operation */
372 set_tima8(qts, target_pir, TM_SPC_PULL_PHYS_CTX_OL, 0);
373
374 /* Verify odd cache line values match TIMA after pull operation */
375 get_cl_pair(qts, &nvp, cl_pair);
376 memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW1_OS + TM_WORD0], 4);
377 g_assert_cmphex(qw1w0, ==, be32_to_cpu(cl_word));
378 memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW3_HV_PHYS + TM_WORD0], 4);
379 g_assert_cmphex(qw3w0, ==, be32_to_cpu(cl_word));
380 memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW1_OS + TM_WORD2], 4);
381 g_assert_cmphex(qw1w2, ==, be32_to_cpu(cl_word));
382 memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW2_HV_POOL + TM_WORD2], 4);
383 g_assert_cmphex(qw2w2, ==, be32_to_cpu(cl_word));
384 g_assert_cmphex(qw3b8, ==,
385 cl_pair[XIVE_ODD_CL + TM_QW3_HV_PHYS + TM_WORD2]);
386
387 /* Verify that all TIMA valid bits for target thread are cleared */
388 word2 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD2);
389 g_assert_cmphex(xive_get_field32(TM_QW1W2_VO, word2), ==, 0);
390 word2 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD2);
391 g_assert_cmphex(xive_get_field32(TM_QW2W2_VP, word2), ==, 0);
392 word2 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2);
393 g_assert_cmphex(xive_get_field32(TM_QW3W2_VT, word2), ==, 0);
394 }
395
test_hw_group_irq(QTestState * qts)396 static void test_hw_group_irq(QTestState *qts)
397 {
398 uint32_t irq = 100;
399 uint32_t irq_data = 0xdeadbeef;
400 uint32_t end_index = 23;
401 uint32_t chosen_one;
402 uint32_t target_nvp = 0x81; /* group size = 4 */
403 uint8_t priority = 6;
404 uint32_t reg32;
405 uint16_t reg16;
406 uint8_t pq, nsr, cppr;
407
408 g_test_message("=========================================================");
409 g_test_message("Testing irq %d to hardware group of size 4", irq);
410
411 /* irq config */
412 set_eas(qts, irq, end_index, irq_data);
413 set_end(qts, end_index, target_nvp, priority, true /* group */);
414
415 /* enable and trigger irq */
416 get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
417 set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
418
419 /* check irq is raised on cpu */
420 pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
421 g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
422
423 /* find the targeted vCPU */
424 for (chosen_one = 0; chosen_one < SMT; chosen_one++) {
425 reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
426 nsr = reg32 >> 24;
427 if (nsr == 0x82) {
428 break;
429 }
430 }
431 g_assert_cmphex(chosen_one, <, SMT);
432 cppr = (reg32 >> 16) & 0xFF;
433 g_assert_cmphex(nsr, ==, 0x82);
434 g_assert_cmphex(cppr, ==, 0xFF);
435
436 /* ack the irq */
437 reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG);
438 nsr = reg16 >> 8;
439 cppr = reg16 & 0xFF;
440 g_assert_cmphex(nsr, ==, 0x82);
441 g_assert_cmphex(cppr, ==, priority);
442
443 /* check irq data is what was configured */
444 reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
445 g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
446
447 /* End Of Interrupt */
448 set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
449 pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
450 g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
451
452 /* reset CPPR */
453 set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
454 reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
455 nsr = reg32 >> 24;
456 cppr = (reg32 >> 16) & 0xFF;
457 g_assert_cmphex(nsr, ==, 0x00);
458 g_assert_cmphex(cppr, ==, 0xFF);
459 }
460
test_hw_group_irq_backlog(QTestState * qts)461 static void test_hw_group_irq_backlog(QTestState *qts)
462 {
463 uint32_t irq = 31;
464 uint32_t irq_data = 0x01234567;
465 uint32_t end_index = 129;
466 uint32_t target_nvp = 0x81; /* group size = 4 */
467 uint32_t chosen_one = 3;
468 uint8_t blocking_priority, priority = 3;
469 uint32_t reg32;
470 uint16_t reg16;
471 uint8_t pq, nsr, cppr, lsmfb, i;
472
473 g_test_message("=========================================================");
474 g_test_message("Testing irq %d to hardware group of size 4 going " \
475 "through backlog",
476 irq);
477
478 /*
479 * set current priority of all threads in the group to something
480 * higher than what we're about to trigger
481 */
482 blocking_priority = priority - 1;
483 for (i = 0; i < SMT; i++) {
484 set_tima8(qts, i, TM_QW3_HV_PHYS + TM_CPPR, blocking_priority);
485 }
486
487 /* irq config */
488 set_eas(qts, irq, end_index, irq_data);
489 set_end(qts, end_index, target_nvp, priority, true /* group */);
490
491 /* enable and trigger irq */
492 get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
493 set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
494
495 /* check irq is raised on cpu */
496 pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
497 g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
498
499 /* check no interrupt is pending on the 2 possible targets */
500 for (i = 0; i < SMT; i++) {
501 reg32 = get_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0);
502 nsr = reg32 >> 24;
503 cppr = (reg32 >> 16) & 0xFF;
504 lsmfb = reg32 & 0xFF;
505 g_assert_cmphex(nsr, ==, 0x0);
506 g_assert_cmphex(cppr, ==, blocking_priority);
507 g_assert_cmphex(lsmfb, ==, priority);
508 }
509
510 /* lower priority of one thread */
511 set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, priority + 1);
512
513 /* check backlogged interrupt is presented */
514 reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
515 nsr = reg32 >> 24;
516 cppr = (reg32 >> 16) & 0xFF;
517 g_assert_cmphex(nsr, ==, 0x82);
518 g_assert_cmphex(cppr, ==, priority + 1);
519
520 /* ack the irq */
521 reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG);
522 nsr = reg16 >> 8;
523 cppr = reg16 & 0xFF;
524 g_assert_cmphex(nsr, ==, 0x82);
525 g_assert_cmphex(cppr, ==, priority);
526
527 /* check irq data is what was configured */
528 reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
529 g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
530
531 /* End Of Interrupt */
532 set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
533 pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
534 g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
535
536 /* reset CPPR */
537 set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
538 reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
539 nsr = reg32 >> 24;
540 cppr = (reg32 >> 16) & 0xFF;
541 lsmfb = reg32 & 0xFF;
542 g_assert_cmphex(nsr, ==, 0x00);
543 g_assert_cmphex(cppr, ==, 0xFF);
544 g_assert_cmphex(lsmfb, ==, 0xFF);
545 }
546
test_xive(void)547 static void test_xive(void)
548 {
549 QTestState *qts;
550
551 qts = qtest_initf("-M powernv10 -smp %d,cores=1,threads=%d -nographic "
552 "-nodefaults -serial mon:stdio -S "
553 "-d guest_errors -trace '*xive*'",
554 SMT, SMT);
555 init_xive(qts);
556
557 test_hw_irq(qts);
558
559 /* omit reset_state here and use settings from test_hw_irq */
560 test_pull_thread_ctx_to_odd_thread_cl(qts);
561
562 reset_state(qts);
563 test_pool_irq(qts);
564
565 reset_state(qts);
566 test_hw_group_irq(qts);
567
568 reset_state(qts);
569 test_hw_group_irq_backlog(qts);
570
571 reset_state(qts);
572 test_flush_sync_inject(qts);
573
574 reset_state(qts);
575 test_nvpg_bar(qts);
576
577 qtest_quit(qts);
578 }
579
main(int argc,char ** argv)580 int main(int argc, char **argv)
581 {
582 g_test_init(&argc, &argv, NULL);
583 qtest_add_func("xive2", test_xive);
584 return g_test_run();
585 }
586