xref: /qemu/hw/intc/pnv_xive2.c (revision 06b40d250ecfa1633209c2e431a7a38acfd03a98)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model  (POWER10)
3  *
4  * Copyright (c) 2019-2024, IBM Corporation.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qapi/error.h"
12 #include "target/ppc/cpu.h"
13 #include "system/cpus.h"
14 #include "system/dma.h"
15 #include "hw/ppc/fdt.h"
16 #include "hw/ppc/pnv.h"
17 #include "hw/ppc/pnv_chip.h"
18 #include "hw/ppc/pnv_core.h"
19 #include "hw/ppc/pnv_xscom.h"
20 #include "hw/ppc/xive2.h"
21 #include "hw/ppc/pnv_xive.h"
22 #include "hw/ppc/xive_regs.h"
23 #include "hw/ppc/xive2_regs.h"
24 #include "hw/ppc/ppc.h"
25 #include "hw/qdev-properties.h"
26 #include "system/reset.h"
27 #include "system/qtest.h"
28 
29 #include <libfdt.h>
30 
31 #include "pnv_xive2_regs.h"
32 
33 #undef XIVE2_DEBUG
34 
35 /* XIVE Sync or Flush Notification Block */
36 typedef struct XiveSfnBlock {
37     uint8_t bytes[32];
38 } XiveSfnBlock;
39 
40 /* XIVE Thread Sync or Flush Notification Area */
41 typedef struct XiveThreadNA {
42     XiveSfnBlock topo[16];
43 } XiveThreadNA;
44 
45 /*
46  * Virtual structures table (VST)
47  */
48 #define SBE_PER_BYTE   4
49 
50 typedef struct XiveVstInfo {
51     const char *name;
52     uint32_t    size;
53     uint32_t    max_blocks;
54 } XiveVstInfo;
55 
56 static const XiveVstInfo vst_infos[] = {
57 
58     [VST_EAS]  = { "EAT",  sizeof(Xive2Eas),     16 },
59     [VST_ESB]  = { "ESB",  1,                    16 },
60     [VST_END]  = { "ENDT", sizeof(Xive2End),     16 },
61 
62     [VST_NVP]  = { "NVPT", sizeof(Xive2Nvp),     16 },
63     [VST_NVG]  = { "NVGT", sizeof(Xive2Nvgc),    16 },
64     [VST_NVC]  = { "NVCT", sizeof(Xive2Nvgc),    16 },
65 
66     [VST_IC]  =  { "IC",   1, /* ? */            16 }, /* Topology # */
67     [VST_SYNC] = { "SYNC", sizeof(XiveThreadNA), 16 }, /* Topology # */
68 
69     /*
70      * This table contains the backing store pages for the interrupt
71      * fifos of the VC sub-engine in case of overflow.
72      *
73      * 0 - IPI,
74      * 1 - HWD,
75      * 2 - NxC,
76      * 3 - INT,
77      * 4 - OS-Queue,
78      * 5 - Pool-Queue,
79      * 6 - Hard-Queue
80      */
81     [VST_ERQ]  = { "ERQ",  1,                   VC_QUEUE_COUNT },
82 };
83 
84 #define xive2_error(xive, fmt, ...)                                      \
85     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
86                   (xive)->chip->chip_id, ## __VA_ARGS__);
87 
88 /*
89  * TODO: Document block id override
90  */
pnv_xive2_block_id(PnvXive2 * xive)91 static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
92 {
93     uint8_t blk = xive->chip->chip_id;
94     uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
95 
96     if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
97         blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
98     }
99 
100     return blk;
101 }
102 
103 /*
104  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
105  * of the chips is good enough.
106  *
107  * TODO: Block scope support
108  */
pnv_xive2_get_remote(uint8_t blk)109 static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
110 {
111     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
112     int i;
113 
114     for (i = 0; i < pnv->num_chips; i++) {
115         Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
116         PnvXive2 *xive = &chip10->xive;
117 
118         if (pnv_xive2_block_id(xive) == blk) {
119             return xive;
120         }
121     }
122     return NULL;
123 }
124 
125 /*
126  * VST accessors for ESB, EAT, ENDT, NVP
127  *
128  * Indirect VST tables are arrays of VSDs pointing to a page (of same
129  * size). Each page is a direct VST table.
130  */
131 
132 #define XIVE_VSD_SIZE 8
133 
134 /* Indirect page size can be 4K, 64K, 2M, 16M. */
pnv_xive2_vst_page_size_allowed(uint32_t page_shift)135 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
136 {
137      return page_shift == 12 || page_shift == 16 ||
138          page_shift == 21 || page_shift == 24;
139 }
140 
pnv_xive2_vst_addr_direct(PnvXive2 * xive,uint32_t type,uint64_t vsd,uint32_t idx)141 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
142                                           uint64_t vsd, uint32_t idx)
143 {
144     const XiveVstInfo *info = &vst_infos[type];
145     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
146     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
147     uint32_t idx_max;
148 
149     idx_max = vst_tsize / info->size - 1;
150     if (idx > idx_max) {
151 #ifdef XIVE2_DEBUG
152         xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
153                    info->name, idx, idx_max);
154 #endif
155         return 0;
156     }
157 
158     return vst_addr + idx * info->size;
159 }
160 
pnv_xive2_vst_addr_indirect(PnvXive2 * xive,uint32_t type,uint64_t vsd,uint32_t idx)161 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
162                                             uint64_t vsd, uint32_t idx)
163 {
164     const XiveVstInfo *info = &vst_infos[type];
165     uint64_t vsd_addr;
166     uint32_t vsd_idx;
167     uint32_t page_shift;
168     uint32_t vst_per_page;
169 
170     /* Get the page size of the indirect table. */
171     vsd_addr = vsd & VSD_ADDRESS_MASK;
172     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
173 
174     if (!(vsd & VSD_ADDRESS_MASK)) {
175 #ifdef XIVE2_DEBUG
176         xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
177 #endif
178         return 0;
179     }
180 
181     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
182 
183     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
184         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
185                    page_shift);
186         return 0;
187     }
188 
189     vst_per_page = (1ull << page_shift) / info->size;
190     vsd_idx = idx / vst_per_page;
191 
192     /* Load the VSD we are looking for, if not already done */
193     if (vsd_idx) {
194         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
195         ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
196                    MEMTXATTRS_UNSPECIFIED);
197 
198         if (!(vsd & VSD_ADDRESS_MASK)) {
199 #ifdef XIVE2_DEBUG
200             xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
201 #endif
202             return 0;
203         }
204 
205         /*
206          * Check that the pages have a consistent size across the
207          * indirect table
208          */
209         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
210             xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
211                        info->name, idx);
212             return 0;
213         }
214     }
215 
216     return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
217 }
218 
pnv_xive2_nvc_table_compress_shift(PnvXive2 * xive)219 static uint8_t pnv_xive2_nvc_table_compress_shift(PnvXive2 *xive)
220 {
221     uint8_t shift =  GETFIELD(PC_NXC_PROC_CONFIG_NVC_TABLE_COMPRESS,
222                               xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
223     return shift > 8 ? 0 : shift;
224 }
225 
pnv_xive2_nvg_table_compress_shift(PnvXive2 * xive)226 static uint8_t pnv_xive2_nvg_table_compress_shift(PnvXive2 *xive)
227 {
228     uint8_t shift = GETFIELD(PC_NXC_PROC_CONFIG_NVG_TABLE_COMPRESS,
229                              xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
230     return shift > 8 ? 0 : shift;
231 }
232 
pnv_xive2_vst_addr(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx)233 static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
234                                    uint32_t idx)
235 {
236     const XiveVstInfo *info = &vst_infos[type];
237     uint64_t vsd;
238 
239     if (blk >= info->max_blocks) {
240         xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
241                    blk, info->name, idx);
242         return 0;
243     }
244 
245     vsd = xive->vsds[type][blk];
246     if (vsd == 0) {
247         xive2_error(xive, "VST: vsd == 0 block id %d for VST %s %d !?",
248                    blk, info->name, idx);
249         return 0;
250     }
251 
252     /* Remote VST access */
253     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
254         xive = pnv_xive2_get_remote(blk);
255 
256         return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
257     }
258 
259     if (type == VST_NVG) {
260         idx >>= pnv_xive2_nvg_table_compress_shift(xive);
261     } else if (type == VST_NVC) {
262         idx >>= pnv_xive2_nvc_table_compress_shift(xive);
263     }
264 
265     if (VSD_INDIRECT & vsd) {
266         return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
267     }
268 
269     return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
270 }
271 
pnv_xive2_vst_read(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx,void * data)272 static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
273                              uint32_t idx, void *data)
274 {
275     const XiveVstInfo *info = &vst_infos[type];
276     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
277     MemTxResult result;
278 
279     if (!addr) {
280         return -1;
281     }
282 
283     result = address_space_read(&address_space_memory, addr,
284                                 MEMTXATTRS_UNSPECIFIED, data,
285                                 info->size);
286     if (result != MEMTX_OK) {
287         xive2_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
288                    " for VST %s %x/%x\n", addr, info->name, blk, idx);
289         return -1;
290     }
291     return 0;
292 }
293 
294 #define XIVE_VST_WORD_ALL -1
295 
pnv_xive2_vst_write(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx,void * data,uint32_t word_number)296 static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
297                                uint32_t idx, void *data, uint32_t word_number)
298 {
299     const XiveVstInfo *info = &vst_infos[type];
300     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
301     MemTxResult result;
302 
303     if (!addr) {
304         return -1;
305     }
306 
307     if (word_number == XIVE_VST_WORD_ALL) {
308         result = address_space_write(&address_space_memory, addr,
309                                      MEMTXATTRS_UNSPECIFIED, data,
310                                      info->size);
311     } else {
312         result = address_space_write(&address_space_memory,
313                                      addr + word_number * 4,
314                                      MEMTXATTRS_UNSPECIFIED,
315                                      data + word_number * 4, 4);
316     }
317 
318     if (result != MEMTX_OK) {
319         xive2_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
320                    "for VST %s %x/%x\n", addr, info->name, blk, idx);
321         return -1;
322     }
323     return 0;
324 }
325 
pnv_xive2_get_pq(Xive2Router * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)326 static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
327                              uint8_t *pq)
328 {
329     PnvXive2 *xive = PNV_XIVE2(xrtr);
330 
331     if (pnv_xive2_block_id(xive) != blk) {
332         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
333         return -1;
334     }
335 
336     *pq = xive_source_esb_get(&xive->ipi_source, idx);
337     return 0;
338 }
339 
pnv_xive2_set_pq(Xive2Router * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)340 static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
341                              uint8_t *pq)
342 {
343     PnvXive2 *xive = PNV_XIVE2(xrtr);
344 
345     if (pnv_xive2_block_id(xive) != blk) {
346         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
347         return -1;
348     }
349 
350     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
351     return 0;
352 }
353 
pnv_xive2_get_end(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2End * end)354 static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
355                              Xive2End *end)
356 {
357     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
358 }
359 
pnv_xive2_write_end(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2End * end,uint8_t word_number)360 static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
361                                Xive2End *end, uint8_t word_number)
362 {
363     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
364                               word_number);
365 }
366 
pnv_xive2_get_current_pir(PnvXive2 * xive)367 static inline int pnv_xive2_get_current_pir(PnvXive2 *xive)
368 {
369     if (!qtest_enabled()) {
370         PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
371         return ppc_cpu_pir(cpu);
372     }
373     return 0;
374 }
375 
376 /*
377  * After SW injects a Queue Sync or Cache Flush operation, HW will notify
378  * SW of the completion of the operation by writing a byte of all 1's (0xff)
379  * to a specific memory location.  The memory location is calculated by first
380  * looking up a base address in the SYNC VSD using the Topology ID of the
381  * originating thread as the "block" number.  This points to a
382  * 64k block of memory that is further divided into 128 512 byte chunks of
383  * memory, which is indexed by the thread id of the requesting thread.
384  * Finally, this 512 byte chunk of memory is divided into 16 32 byte
385  * chunks which are indexed by the topology id of the targeted IC's chip.
386  * The values below are the offsets into that 32 byte chunk of memory for
387  * each type of cache flush or queue sync operation.
388  */
389 #define PNV_XIVE2_QUEUE_IPI              0x00
390 #define PNV_XIVE2_QUEUE_HW               0x01
391 #define PNV_XIVE2_QUEUE_NXC              0x02
392 #define PNV_XIVE2_QUEUE_INT              0x03
393 #define PNV_XIVE2_QUEUE_OS               0x04
394 #define PNV_XIVE2_QUEUE_POOL             0x05
395 #define PNV_XIVE2_QUEUE_HARD             0x06
396 #define PNV_XIVE2_CACHE_ENDC             0x08
397 #define PNV_XIVE2_CACHE_ESBC             0x09
398 #define PNV_XIVE2_CACHE_EASC             0x0a
399 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO   0x10
400 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO    0x11
401 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI   0x12
402 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI    0x13
403 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI   0x14
404 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI    0x15
405 #define PNV_XIVE2_CACHE_NXC              0x18
406 
pnv_xive2_inject_notify(PnvXive2 * xive,int type)407 static int pnv_xive2_inject_notify(PnvXive2 *xive, int type)
408 {
409     uint64_t addr;
410     int pir = pnv_xive2_get_current_pir(xive);
411     int thread_nr = PNV10_PIR2THREAD(pir);
412     int thread_topo_id = PNV10_PIR2CHIP(pir);
413     int ic_topo_id = xive->chip->chip_id;
414     uint64_t offset = ic_topo_id * sizeof(XiveSfnBlock);
415     uint8_t byte = 0xff;
416     MemTxResult result;
417 
418     /* Retrieve the address of requesting thread's notification area */
419     addr = pnv_xive2_vst_addr(xive, VST_SYNC, thread_topo_id, thread_nr);
420 
421     if (!addr) {
422         xive2_error(xive, "VST: no SYNC entry %x/%x !?",
423                     thread_topo_id, thread_nr);
424         return -1;
425     }
426 
427     address_space_stb(&address_space_memory, addr + offset + type, byte,
428                       MEMTXATTRS_UNSPECIFIED, &result);
429     assert(result == MEMTX_OK);
430 
431     return 0;
432 }
433 
pnv_xive2_end_update(PnvXive2 * xive,uint8_t watch_engine)434 static int pnv_xive2_end_update(PnvXive2 *xive, uint8_t watch_engine)
435 {
436     uint8_t  blk;
437     uint32_t idx;
438     int i, spec_reg, data_reg;
439     uint64_t endc_watch[4];
440 
441     assert(watch_engine < ARRAY_SIZE(endc_watch));
442 
443     spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
444     data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
445     blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
446     idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
447 
448     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
449         endc_watch[i] = cpu_to_be64(xive->vc_regs[data_reg + i]);
450     }
451 
452     return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
453                               XIVE_VST_WORD_ALL);
454 }
455 
pnv_xive2_end_cache_load(PnvXive2 * xive,uint8_t watch_engine)456 static void pnv_xive2_end_cache_load(PnvXive2 *xive, uint8_t watch_engine)
457 {
458     uint8_t  blk;
459     uint32_t idx;
460     uint64_t endc_watch[4] = { 0 };
461     int i, spec_reg, data_reg;
462 
463     assert(watch_engine < ARRAY_SIZE(endc_watch));
464 
465     spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
466     data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
467     blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
468     idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
469 
470     if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
471         xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
472     }
473 
474     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
475         xive->vc_regs[data_reg + i] = be64_to_cpu(endc_watch[i]);
476     }
477 }
478 
pnv_xive2_get_nvp(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Nvp * nvp)479 static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
480                              Xive2Nvp *nvp)
481 {
482     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
483 }
484 
pnv_xive2_write_nvp(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Nvp * nvp,uint8_t word_number)485 static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
486                                Xive2Nvp *nvp, uint8_t word_number)
487 {
488     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
489                               word_number);
490 }
491 
pnv_xive2_get_nvgc(Xive2Router * xrtr,bool crowd,uint8_t blk,uint32_t idx,Xive2Nvgc * nvgc)492 static int pnv_xive2_get_nvgc(Xive2Router *xrtr, bool crowd,
493                               uint8_t blk, uint32_t idx,
494                               Xive2Nvgc *nvgc)
495 {
496     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG,
497                               blk, idx, nvgc);
498 }
499 
pnv_xive2_write_nvgc(Xive2Router * xrtr,bool crowd,uint8_t blk,uint32_t idx,Xive2Nvgc * nvgc)500 static int pnv_xive2_write_nvgc(Xive2Router *xrtr, bool crowd,
501                                 uint8_t blk, uint32_t idx,
502                                 Xive2Nvgc *nvgc)
503 {
504     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG,
505                                blk, idx, nvgc,
506                                XIVE_VST_WORD_ALL);
507 }
508 
pnv_xive2_nxc_to_table_type(uint8_t nxc_type,uint32_t * table_type)509 static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type)
510 {
511     switch (nxc_type) {
512     case PC_NXC_WATCH_NXC_NVP:
513         *table_type = VST_NVP;
514         break;
515     case PC_NXC_WATCH_NXC_NVG:
516         *table_type = VST_NVG;
517         break;
518     case PC_NXC_WATCH_NXC_NVC:
519         *table_type = VST_NVC;
520         break;
521     default:
522         qemu_log_mask(LOG_GUEST_ERROR,
523                       "XIVE: invalid table type for nxc operation\n");
524         return -1;
525     }
526     return 0;
527 }
528 
pnv_xive2_nxc_update(PnvXive2 * xive,uint8_t watch_engine)529 static int pnv_xive2_nxc_update(PnvXive2 *xive, uint8_t watch_engine)
530 {
531     uint8_t  blk, nxc_type;
532     uint32_t idx, table_type = -1;
533     int i, spec_reg, data_reg;
534     uint64_t nxc_watch[4];
535 
536     assert(watch_engine < ARRAY_SIZE(nxc_watch));
537 
538     spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
539     data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
540     nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
541     blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
542     idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
543 
544     assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
545 
546     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
547         nxc_watch[i] = cpu_to_be64(xive->pc_regs[data_reg + i]);
548     }
549 
550     return pnv_xive2_vst_write(xive, table_type, blk, idx, nxc_watch,
551                               XIVE_VST_WORD_ALL);
552 }
553 
pnv_xive2_nxc_cache_load(PnvXive2 * xive,uint8_t watch_engine)554 static void pnv_xive2_nxc_cache_load(PnvXive2 *xive, uint8_t watch_engine)
555 {
556     uint8_t  blk, nxc_type;
557     uint32_t idx, table_type = -1;
558     uint64_t nxc_watch[4] = { 0 };
559     int i, spec_reg, data_reg;
560 
561     assert(watch_engine < ARRAY_SIZE(nxc_watch));
562 
563     spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
564     data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
565     nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
566     blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
567     idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
568 
569     assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
570 
571     if (pnv_xive2_vst_read(xive, table_type, blk, idx, nxc_watch)) {
572         xive2_error(xive, "VST: no NXC entry %x/%x in %s table!?",
573                     blk, idx, vst_infos[table_type].name);
574     }
575 
576     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
577         xive->pc_regs[data_reg + i] = be64_to_cpu(nxc_watch[i]);
578     }
579 }
580 
pnv_xive2_get_eas(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Eas * eas)581 static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
582                             Xive2Eas *eas)
583 {
584     PnvXive2 *xive = PNV_XIVE2(xrtr);
585 
586     if (pnv_xive2_block_id(xive) != blk) {
587         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
588         return -1;
589     }
590 
591     return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
592 }
593 
pnv_xive2_get_config(Xive2Router * xrtr)594 static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
595 {
596     PnvXive2 *xive = PNV_XIVE2(xrtr);
597     uint32_t cfg = 0;
598 
599     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
600         cfg |= XIVE2_GEN1_TIMA_OS;
601     }
602 
603     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
604         cfg |= XIVE2_VP_SAVE_RESTORE;
605     }
606 
607     if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE,
608               xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) {
609         cfg |= XIVE2_THREADID_8BITS;
610     }
611 
612     return cfg;
613 }
614 
pnv_xive2_is_cpu_enabled(PnvXive2 * xive,PowerPCCPU * cpu)615 static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
616 {
617     int pir = ppc_cpu_pir(cpu);
618     uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
619     uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
620     uint32_t bit = pir & 0x3f;
621 
622     return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
623 }
624 
pnv_xive2_match_nvt(XivePresenter * xptr,uint8_t format,uint8_t nvt_blk,uint32_t nvt_idx,bool crowd,bool cam_ignore,uint8_t priority,uint32_t logic_serv,XiveTCTXMatch * match)625 static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
626                                uint8_t nvt_blk, uint32_t nvt_idx,
627                                bool crowd, bool cam_ignore, uint8_t priority,
628                                uint32_t logic_serv, XiveTCTXMatch *match)
629 {
630     PnvXive2 *xive = PNV_XIVE2(xptr);
631     PnvChip *chip = xive->chip;
632     int count = 0;
633     int i, j;
634     bool gen1_tima_os =
635         xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
636 
637     for (i = 0; i < chip->nr_cores; i++) {
638         PnvCore *pc = chip->cores[i];
639         CPUCore *cc = CPU_CORE(pc);
640 
641         for (j = 0; j < cc->nr_threads; j++) {
642             PowerPCCPU *cpu = pc->threads[j];
643             XiveTCTX *tctx;
644             int ring;
645 
646             if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
647                 continue;
648             }
649 
650             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
651 
652             if (gen1_tima_os) {
653                 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
654                                                  nvt_idx, cam_ignore,
655                                                  logic_serv);
656             } else {
657                 ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
658                                                   nvt_idx, crowd, cam_ignore,
659                                                   logic_serv);
660             }
661 
662             if (ring != -1) {
663                 /*
664                  * For VP-specific match, finding more than one is a
665                  * problem. For group notification, it's possible.
666                  */
667                 if (!cam_ignore && match->tctx) {
668                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
669                                   "thread context NVT %x/%x\n",
670                                   nvt_blk, nvt_idx);
671                     /* Should set a FIR if we ever model it */
672                     return -1;
673                 }
674                 /*
675                  * For a group notification, we need to know if the
676                  * match is precluded first by checking the current
677                  * thread priority. If the interrupt can be delivered,
678                  * we always notify the first match (for now).
679                  */
680                 if (cam_ignore &&
681                     xive2_tm_irq_precluded(tctx, ring, priority)) {
682                         match->precluded = true;
683                 } else {
684                     if (!match->tctx) {
685                         match->ring = ring;
686                         match->tctx = tctx;
687                     }
688                     count++;
689                 }
690             }
691         }
692     }
693 
694     return count;
695 }
696 
pnv_xive2_presenter_get_config(XivePresenter * xptr)697 static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
698 {
699     PnvXive2 *xive = PNV_XIVE2(xptr);
700     uint32_t cfg = 0;
701 
702     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
703         cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
704     }
705     return cfg;
706 }
707 
pnv_xive2_broadcast(XivePresenter * xptr,uint8_t nvt_blk,uint32_t nvt_idx,bool crowd,bool ignore,uint8_t priority)708 static int pnv_xive2_broadcast(XivePresenter *xptr,
709                                uint8_t nvt_blk, uint32_t nvt_idx,
710                                bool crowd, bool ignore, uint8_t priority)
711 {
712     PnvXive2 *xive = PNV_XIVE2(xptr);
713     PnvChip *chip = xive->chip;
714     int i, j;
715     bool gen1_tima_os =
716         xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
717 
718     for (i = 0; i < chip->nr_cores; i++) {
719         PnvCore *pc = chip->cores[i];
720         CPUCore *cc = CPU_CORE(pc);
721 
722         for (j = 0; j < cc->nr_threads; j++) {
723             PowerPCCPU *cpu = pc->threads[j];
724             XiveTCTX *tctx;
725             int ring;
726 
727             if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
728                 continue;
729             }
730 
731             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
732 
733             if (gen1_tima_os) {
734                 ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk,
735                                                  nvt_idx, ignore, 0);
736             } else {
737                 ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk,
738                                                   nvt_idx, crowd, ignore, 0);
739             }
740 
741             if (ring != -1) {
742                 xive2_tm_set_lsmfb(tctx, ring, priority);
743             }
744         }
745     }
746     return 0;
747 }
748 
pnv_xive2_get_block_id(Xive2Router * xrtr)749 static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
750 {
751     return pnv_xive2_block_id(PNV_XIVE2(xrtr));
752 }
753 
754 /*
755  * The TIMA MMIO space is shared among the chips and to identify the
756  * chip from which the access is being done, we extract the chip id
757  * from the PIR.
758  */
pnv_xive2_tm_get_xive(PowerPCCPU * cpu)759 static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
760 {
761     int pir = ppc_cpu_pir(cpu);
762     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
763     PnvXive2 *xive = PNV_XIVE2(xptr);
764 
765     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
766         xive2_error(xive, "IC: CPU %x is not enabled", pir);
767     }
768     return xive;
769 }
770 
771 /*
772  * The internal sources of the interrupt controller have no knowledge
773  * of the XIVE2 chip on which they reside. Encode the block id in the
774  * source interrupt number before forwarding the source event
775  * notification to the Router. This is required on a multichip system.
776  */
pnv_xive2_notify(XiveNotifier * xn,uint32_t srcno,bool pq_checked)777 static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
778 {
779     PnvXive2 *xive = PNV_XIVE2(xn);
780     uint8_t blk = pnv_xive2_block_id(xive);
781 
782     xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
783 }
784 
785 /*
786  * Set Translation Tables
787  *
788  * TODO add support for multiple sets
789  */
pnv_xive2_stt_set_data(PnvXive2 * xive,uint64_t val)790 static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
791 {
792     uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
793     uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
794                                   xive->cq_regs[CQ_TAR >> 3]);
795 
796     switch (tsel) {
797     case CQ_TAR_NVPG:
798     case CQ_TAR_ESB:
799     case CQ_TAR_END:
800     case CQ_TAR_NVC:
801         xive->tables[tsel][entry] = val;
802         break;
803     default:
804         xive2_error(xive, "IC: unsupported table %d", tsel);
805         return -1;
806     }
807 
808     if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
809         xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
810                      xive->cq_regs[CQ_TAR >> 3], ++entry);
811     }
812 
813     return 0;
814 }
815 /*
816  * Virtual Structure Tables (VST) configuration
817  */
pnv_xive2_vst_set_exclusive(PnvXive2 * xive,uint8_t type,uint8_t blk,uint64_t vsd)818 static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
819                                         uint8_t blk, uint64_t vsd)
820 {
821     Xive2EndSource *end_xsrc = &xive->end_source;
822     XiveSource *xsrc = &xive->ipi_source;
823     const XiveVstInfo *info = &vst_infos[type];
824     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
825     uint64_t vst_tsize = 1ull << page_shift;
826     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
827 
828     /* Basic checks */
829 
830     if (VSD_INDIRECT & vsd) {
831         if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
832             xive2_error(xive, "VST: invalid %s page shift %d", info->name,
833                        page_shift);
834             return;
835         }
836     }
837 
838     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
839         xive2_error(xive, "VST: %s table address 0x%"PRIx64
840                     " is not aligned with page shift %d",
841                     info->name, vst_addr, page_shift);
842         return;
843     }
844 
845     /* Record the table configuration (in SRAM on HW) */
846     xive->vsds[type][blk] = vsd;
847 
848     /* Now tune the models with the configuration provided by the FW */
849 
850     switch (type) {
851     case VST_ESB:
852         /*
853          * Backing store pages for the source PQ bits. The model does
854          * not use these PQ bits backed in RAM because the XiveSource
855          * model has its own.
856          *
857          * If the table is direct, we can compute the number of PQ
858          * entries provisioned by FW (such as skiboot) and resize the
859          * ESB window accordingly.
860          */
861         if (memory_region_is_mapped(&xsrc->esb_mmio)) {
862             memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
863         }
864         if (!(VSD_INDIRECT & vsd)) {
865             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
866                                    * (1ull << xsrc->esb_shift));
867         }
868 
869         memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
870         break;
871 
872     case VST_EAS:  /* Nothing to be done */
873         break;
874 
875     case VST_END:
876         /*
877          * Backing store pages for the END.
878          */
879         if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
880             memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
881         }
882         if (!(VSD_INDIRECT & vsd)) {
883             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
884                                    * (1ull << end_xsrc->esb_shift));
885         }
886         memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
887         break;
888 
889     case VST_NVP:  /* Not modeled */
890     case VST_NVG:  /* Not modeled */
891     case VST_NVC:  /* Not modeled */
892     case VST_IC:   /* Not modeled */
893     case VST_SYNC: /* Not modeled */
894     case VST_ERQ:  /* Not modeled */
895         break;
896 
897     default:
898         g_assert_not_reached();
899     }
900 }
901 
902 /*
903  * Both PC and VC sub-engines are configured as each use the Virtual
904  * Structure Tables
905  */
pnv_xive2_vst_set_data(PnvXive2 * xive,uint64_t vsd,uint8_t type,uint8_t blk)906 static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd,
907                                    uint8_t type, uint8_t blk)
908 {
909     uint8_t mode = GETFIELD(VSD_MODE, vsd);
910     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
911 
912     if (type > VST_ERQ) {
913         xive2_error(xive, "VST: invalid table type %d", type);
914         return;
915     }
916 
917     if (blk >= vst_infos[type].max_blocks) {
918         xive2_error(xive, "VST: invalid block id %d for"
919                       " %s table", blk, vst_infos[type].name);
920         return;
921     }
922 
923     if (!vst_addr) {
924         xive2_error(xive, "VST: invalid %s table address",
925                    vst_infos[type].name);
926         return;
927     }
928 
929     switch (mode) {
930     case VSD_MODE_FORWARD:
931         xive->vsds[type][blk] = vsd;
932         break;
933 
934     case VSD_MODE_EXCLUSIVE:
935         pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
936         break;
937 
938     default:
939         xive2_error(xive, "VST: unsupported table mode %d", mode);
940         return;
941     }
942 }
943 
pnv_xive2_vc_vst_set_data(PnvXive2 * xive,uint64_t vsd)944 static void pnv_xive2_vc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
945 {
946     uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
947                             xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
948     uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
949                            xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
950 
951     pnv_xive2_vst_set_data(xive, vsd, type, blk);
952 }
953 
954 /*
955  * MMIO handlers
956  */
957 
958 
959 /*
960  * IC BAR layout
961  *
962  * Page 0: Internal CQ register accesses (reads & writes)
963  * Page 1: Internal PC register accesses (reads & writes)
964  * Page 2: Internal VC register accesses (reads & writes)
965  * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
966  * Page 4: Notify Port page (writes only, w/data),
967  * Page 5: Reserved
968  * Page 6: Sync Poll page (writes only, dataless)
969  * Page 7: Sync Inject page (writes only, dataless)
970  * Page 8: LSI Trigger page (writes only, dataless)
971  * Page 9: LSI SB Management page (reads & writes dataless)
972  * Pages 10-255: Reserved
973  * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
974  *                covering the 128 threads in P10.
975  * Pages 384-511: Reserved
976  */
977 typedef struct PnvXive2Region {
978     const char *name;
979     uint32_t pgoff;
980     uint32_t pgsize;
981     const MemoryRegionOps *ops;
982 } PnvXive2Region;
983 
984 static const MemoryRegionOps pnv_xive2_ic_cq_ops;
985 static const MemoryRegionOps pnv_xive2_ic_pc_ops;
986 static const MemoryRegionOps pnv_xive2_ic_vc_ops;
987 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
988 static const MemoryRegionOps pnv_xive2_ic_notify_ops;
989 static const MemoryRegionOps pnv_xive2_ic_sync_ops;
990 static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
991 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
992 
993 /* 512 pages. 4K: 2M range, 64K: 32M range */
994 static const PnvXive2Region pnv_xive2_ic_regions[] = {
995     { "xive-ic-cq",        0,   1,   &pnv_xive2_ic_cq_ops     },
996     { "xive-ic-vc",        1,   1,   &pnv_xive2_ic_vc_ops     },
997     { "xive-ic-pc",        2,   1,   &pnv_xive2_ic_pc_ops     },
998     { "xive-ic-tctxt",     3,   1,   &pnv_xive2_ic_tctxt_ops  },
999     { "xive-ic-notify",    4,   1,   &pnv_xive2_ic_notify_ops },
1000     /* page 5 reserved */
1001     { "xive-ic-sync",      6,   2,   &pnv_xive2_ic_sync_ops   },
1002     { "xive-ic-lsi",       8,   2,   &pnv_xive2_ic_lsi_ops    },
1003     /* pages 10-255 reserved */
1004     { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops  },
1005     /* pages 384-511 reserved */
1006 };
1007 
1008 /*
1009  * CQ operations
1010  */
1011 
pnv_xive2_ic_cq_read(void * opaque,hwaddr offset,unsigned size)1012 static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
1013                                         unsigned size)
1014 {
1015     PnvXive2 *xive = PNV_XIVE2(opaque);
1016     uint32_t reg = offset >> 3;
1017     uint64_t val = 0;
1018 
1019     switch (offset) {
1020     case CQ_XIVE_CAP: /* Set at reset */
1021     case CQ_XIVE_CFG:
1022         val = xive->cq_regs[reg];
1023         break;
1024     case CQ_MSGSND: /* TODO check the #cores of the machine */
1025         val = 0xffffffff00000000;
1026         break;
1027     case CQ_CFG_PB_GEN:
1028         val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
1029         break;
1030     default:
1031         xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
1032     }
1033 
1034     return val;
1035 }
1036 
pnv_xive2_bar_size(uint64_t val)1037 static uint64_t pnv_xive2_bar_size(uint64_t val)
1038 {
1039     return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
1040 }
1041 
pnv_xive2_ic_cq_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1042 static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
1043                                   uint64_t val, unsigned size)
1044 {
1045     PnvXive2 *xive = PNV_XIVE2(opaque);
1046     MemoryRegion *sysmem = get_system_memory();
1047     uint32_t reg = offset >> 3;
1048     int i;
1049 
1050     switch (offset) {
1051     case CQ_XIVE_CFG:
1052     case CQ_RST_CTL: /* TODO: reset all BARs */
1053         break;
1054 
1055     case CQ_IC_BAR:
1056         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
1057         if (!(val & CQ_IC_BAR_VALID)) {
1058             xive->ic_base = 0;
1059             if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
1060                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
1061                     memory_region_del_subregion(&xive->ic_mmio,
1062                                                 &xive->ic_mmios[i]);
1063                 }
1064                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
1065             }
1066         } else {
1067             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
1068             if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
1069                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
1070                     memory_region_add_subregion(&xive->ic_mmio,
1071                                pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
1072                                &xive->ic_mmios[i]);
1073                 }
1074                 memory_region_add_subregion(sysmem, xive->ic_base,
1075                                             &xive->ic_mmio);
1076             }
1077         }
1078         break;
1079 
1080     case CQ_TM_BAR:
1081         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1082         if (!(val & CQ_TM_BAR_VALID)) {
1083             xive->tm_base = 0;
1084             if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
1085                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
1086             }
1087         } else {
1088             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1089             if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
1090                 memory_region_add_subregion(sysmem, xive->tm_base,
1091                                             &xive->tm_mmio);
1092             }
1093         }
1094         break;
1095 
1096     case CQ_ESB_BAR:
1097         xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
1098         if (!(val & CQ_BAR_VALID)) {
1099             xive->esb_base = 0;
1100             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1101                 memory_region_del_subregion(sysmem, &xive->esb_mmio);
1102             }
1103         } else {
1104             xive->esb_base = val & CQ_BAR_ADDR;
1105             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1106                 memory_region_set_size(&xive->esb_mmio,
1107                                        pnv_xive2_bar_size(val));
1108                 memory_region_add_subregion(sysmem, xive->esb_base,
1109                                             &xive->esb_mmio);
1110             }
1111         }
1112         break;
1113 
1114     case CQ_END_BAR:
1115         xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
1116         if (!(val & CQ_BAR_VALID)) {
1117             xive->end_base = 0;
1118             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1119                 memory_region_del_subregion(sysmem, &xive->end_mmio);
1120             }
1121         } else {
1122             xive->end_base = val & CQ_BAR_ADDR;
1123             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1124                 memory_region_set_size(&xive->end_mmio,
1125                                        pnv_xive2_bar_size(val));
1126                 memory_region_add_subregion(sysmem, xive->end_base,
1127                                             &xive->end_mmio);
1128             }
1129         }
1130         break;
1131 
1132     case CQ_NVC_BAR:
1133         xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
1134         if (!(val & CQ_BAR_VALID)) {
1135             xive->nvc_base = 0;
1136             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1137                 memory_region_del_subregion(sysmem, &xive->nvc_mmio);
1138             }
1139         } else {
1140             xive->nvc_base = val & CQ_BAR_ADDR;
1141             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1142                 memory_region_set_size(&xive->nvc_mmio,
1143                                        pnv_xive2_bar_size(val));
1144                 memory_region_add_subregion(sysmem, xive->nvc_base,
1145                                             &xive->nvc_mmio);
1146             }
1147         }
1148         break;
1149 
1150     case CQ_NVPG_BAR:
1151         xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
1152         if (!(val & CQ_BAR_VALID)) {
1153             xive->nvpg_base = 0;
1154             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1155                 memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
1156             }
1157         } else {
1158             xive->nvpg_base = val & CQ_BAR_ADDR;
1159             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1160                 memory_region_set_size(&xive->nvpg_mmio,
1161                                        pnv_xive2_bar_size(val));
1162                 memory_region_add_subregion(sysmem, xive->nvpg_base,
1163                                             &xive->nvpg_mmio);
1164             }
1165         }
1166         break;
1167 
1168     case CQ_TAR: /* Set Translation Table Address */
1169         break;
1170     case CQ_TDR: /* Set Translation Table Data */
1171         pnv_xive2_stt_set_data(xive, val);
1172         break;
1173     case CQ_FIRMASK_OR: /* FIR error reporting */
1174         break;
1175     default:
1176         xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
1177         return;
1178     }
1179 
1180     xive->cq_regs[reg] = val;
1181 }
1182 
1183 static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
1184     .read = pnv_xive2_ic_cq_read,
1185     .write = pnv_xive2_ic_cq_write,
1186     .endianness = DEVICE_BIG_ENDIAN,
1187     .valid = {
1188         .min_access_size = 8,
1189         .max_access_size = 8,
1190     },
1191     .impl = {
1192         .min_access_size = 8,
1193         .max_access_size = 8,
1194     },
1195 };
1196 
pnv_xive2_cache_watch_assign(uint64_t engine_mask,uint64_t * state)1197 static uint8_t pnv_xive2_cache_watch_assign(uint64_t engine_mask,
1198                                             uint64_t *state)
1199 {
1200     uint8_t val = 0xFF;
1201     int i;
1202 
1203     for (i = 3; i >= 0; i--) {
1204         if (BIT(i) & engine_mask) {
1205             if (!(BIT(i) & *state)) {
1206                 *state |= BIT(i);
1207                 val = 3 - i;
1208                 break;
1209             }
1210         }
1211     }
1212     return val;
1213 }
1214 
pnv_xive2_cache_watch_release(uint64_t * state,uint8_t watch_engine)1215 static void pnv_xive2_cache_watch_release(uint64_t *state, uint8_t watch_engine)
1216 {
1217     uint8_t engine_bit = 3 - watch_engine;
1218 
1219     if (*state & BIT(engine_bit)) {
1220         *state &= ~BIT(engine_bit);
1221     }
1222 }
1223 
pnv_xive2_endc_cache_watch_assign(PnvXive2 * xive)1224 static uint8_t pnv_xive2_endc_cache_watch_assign(PnvXive2 *xive)
1225 {
1226     uint64_t engine_mask = GETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN,
1227                                     xive->vc_regs[VC_ENDC_CFG >> 3]);
1228     uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1229     uint8_t val;
1230 
1231     /*
1232      * We keep track of which engines are currently busy in the
1233      * VC_ENDC_WATCH_ASSIGN register directly. When the firmware reads
1234      * the register, we don't return its value but the ID of an engine
1235      * it can use.
1236      * There are 4 engines. 0xFF means no engine is available.
1237      */
1238     val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1239     if (val != 0xFF) {
1240         xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1241     }
1242     return val;
1243 }
1244 
pnv_xive2_endc_cache_watch_release(PnvXive2 * xive,uint8_t watch_engine)1245 static void pnv_xive2_endc_cache_watch_release(PnvXive2 *xive,
1246                                                uint8_t watch_engine)
1247 {
1248     uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1249 
1250     pnv_xive2_cache_watch_release(&state, watch_engine);
1251     xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1252 }
1253 
pnv_xive2_ic_vc_read(void * opaque,hwaddr offset,unsigned size)1254 static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
1255                                      unsigned size)
1256 {
1257     PnvXive2 *xive = PNV_XIVE2(opaque);
1258     uint64_t val = 0;
1259     uint32_t reg = offset >> 3;
1260     uint8_t watch_engine;
1261 
1262     switch (offset) {
1263     /*
1264      * VSD table settings.
1265      */
1266     case VC_VSD_TABLE_ADDR:
1267     case VC_VSD_TABLE_DATA:
1268         val = xive->vc_regs[reg];
1269         break;
1270 
1271     /*
1272      * ESB cache updates (not modeled)
1273      */
1274     case VC_ESBC_FLUSH_CTRL:
1275         xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
1276         val = xive->vc_regs[reg];
1277         break;
1278 
1279     case VC_ESBC_CFG:
1280         val = xive->vc_regs[reg];
1281         break;
1282 
1283     /*
1284      * EAS cache updates (not modeled)
1285      */
1286     case VC_EASC_FLUSH_CTRL:
1287         xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
1288         val = xive->vc_regs[reg];
1289         break;
1290 
1291     case VC_ENDC_WATCH_ASSIGN:
1292         val = pnv_xive2_endc_cache_watch_assign(xive);
1293         break;
1294 
1295     case VC_ENDC_CFG:
1296         val = xive->vc_regs[reg];
1297         break;
1298 
1299     /*
1300      * END cache updates
1301      */
1302     case VC_ENDC_WATCH0_SPEC:
1303     case VC_ENDC_WATCH1_SPEC:
1304     case VC_ENDC_WATCH2_SPEC:
1305     case VC_ENDC_WATCH3_SPEC:
1306         watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6;
1307         xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
1308         pnv_xive2_endc_cache_watch_release(xive, watch_engine);
1309         val = xive->vc_regs[reg];
1310         break;
1311 
1312     case VC_ENDC_WATCH0_DATA0:
1313     case VC_ENDC_WATCH1_DATA0:
1314     case VC_ENDC_WATCH2_DATA0:
1315     case VC_ENDC_WATCH3_DATA0:
1316         /*
1317          * Load DATA registers from cache with data requested by the
1318          * SPEC register
1319          */
1320         watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1321         pnv_xive2_end_cache_load(xive, watch_engine);
1322         val = xive->vc_regs[reg];
1323         break;
1324 
1325     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1326     case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1327     case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1328     case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1329         val = xive->vc_regs[reg];
1330         break;
1331 
1332     case VC_ENDC_FLUSH_CTRL:
1333         xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
1334         val = xive->vc_regs[reg];
1335         break;
1336 
1337     /*
1338      * Indirect invalidation
1339      */
1340     case VC_AT_MACRO_KILL_MASK:
1341         val = xive->vc_regs[reg];
1342         break;
1343 
1344     case VC_AT_MACRO_KILL:
1345         xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
1346         val = xive->vc_regs[reg];
1347         break;
1348 
1349     /*
1350      * Interrupt fifo overflow in memory backing store (Not modeled)
1351      */
1352     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1353         val = xive->vc_regs[reg];
1354         break;
1355 
1356     /*
1357      * Synchronisation
1358      */
1359     case VC_ENDC_SYNC_DONE:
1360         val = VC_ENDC_SYNC_POLL_DONE;
1361         break;
1362     default:
1363         xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
1364     }
1365 
1366     return val;
1367 }
1368 
pnv_xive2_ic_vc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1369 static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
1370                                   uint64_t val, unsigned size)
1371 {
1372     PnvXive2 *xive = PNV_XIVE2(opaque);
1373     uint32_t reg = offset >> 3;
1374     uint8_t watch_engine;
1375 
1376     switch (offset) {
1377     /*
1378      * VSD table settings.
1379      */
1380     case VC_VSD_TABLE_ADDR:
1381        break;
1382     case VC_VSD_TABLE_DATA:
1383         pnv_xive2_vc_vst_set_data(xive, val);
1384         break;
1385 
1386     /*
1387      * ESB cache updates (not modeled)
1388      */
1389     /* case VC_ESBC_FLUSH_CTRL: */
1390     case VC_ESBC_FLUSH_POLL:
1391         xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
1392         /* ESB update */
1393         break;
1394 
1395     case VC_ESBC_FLUSH_INJECT:
1396         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ESBC);
1397         break;
1398 
1399     case VC_ESBC_CFG:
1400         break;
1401 
1402     /*
1403      * EAS cache updates (not modeled)
1404      */
1405     /* case VC_EASC_FLUSH_CTRL: */
1406     case VC_EASC_FLUSH_POLL:
1407         xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
1408         /* EAS update */
1409         break;
1410 
1411     case VC_EASC_FLUSH_INJECT:
1412         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_EASC);
1413         break;
1414 
1415     case VC_ENDC_CFG:
1416         break;
1417 
1418     /*
1419      * END cache updates
1420      */
1421     case VC_ENDC_WATCH0_SPEC:
1422     case VC_ENDC_WATCH1_SPEC:
1423     case VC_ENDC_WATCH2_SPEC:
1424     case VC_ENDC_WATCH3_SPEC:
1425          val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
1426         break;
1427 
1428     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1429     case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1430     case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1431     case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1432         break;
1433     case VC_ENDC_WATCH0_DATA0:
1434     case VC_ENDC_WATCH1_DATA0:
1435     case VC_ENDC_WATCH2_DATA0:
1436     case VC_ENDC_WATCH3_DATA0:
1437         /* writing to DATA0 triggers the cache write */
1438         watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1439         xive->vc_regs[reg] = val;
1440         pnv_xive2_end_update(xive, watch_engine);
1441         break;
1442 
1443 
1444     /* case VC_ENDC_FLUSH_CTRL: */
1445     case VC_ENDC_FLUSH_POLL:
1446         xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
1447         break;
1448 
1449     case VC_ENDC_FLUSH_INJECT:
1450         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ENDC);
1451         break;
1452 
1453     /*
1454      * Indirect invalidation
1455      */
1456     case VC_AT_MACRO_KILL:
1457     case VC_AT_MACRO_KILL_MASK:
1458         break;
1459 
1460     /*
1461      * Interrupt fifo overflow in memory backing store (Not modeled)
1462      */
1463     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1464         break;
1465 
1466     /*
1467      * Synchronisation
1468      */
1469     case VC_ENDC_SYNC_DONE:
1470         break;
1471 
1472     default:
1473         xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
1474         return;
1475     }
1476 
1477     xive->vc_regs[reg] = val;
1478 }
1479 
1480 static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
1481     .read = pnv_xive2_ic_vc_read,
1482     .write = pnv_xive2_ic_vc_write,
1483     .endianness = DEVICE_BIG_ENDIAN,
1484     .valid = {
1485         .min_access_size = 8,
1486         .max_access_size = 8,
1487     },
1488     .impl = {
1489         .min_access_size = 8,
1490         .max_access_size = 8,
1491     },
1492 };
1493 
pnv_xive2_nxc_cache_watch_assign(PnvXive2 * xive)1494 static uint8_t pnv_xive2_nxc_cache_watch_assign(PnvXive2 *xive)
1495 {
1496     uint64_t engine_mask = GETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN,
1497                                     xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
1498     uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1499     uint8_t val;
1500 
1501     /*
1502      * We keep track of which engines are currently busy in the
1503      * PC_NXC_WATCH_ASSIGN register directly. When the firmware reads
1504      * the register, we don't return its value but the ID of an engine
1505      * it can use.
1506      * There are 4 engines. 0xFF means no engine is available.
1507      */
1508     val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1509     if (val != 0xFF) {
1510         xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1511     }
1512     return val;
1513 }
1514 
pnv_xive2_nxc_cache_watch_release(PnvXive2 * xive,uint8_t watch_engine)1515 static void pnv_xive2_nxc_cache_watch_release(PnvXive2 *xive,
1516                                               uint8_t watch_engine)
1517 {
1518     uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1519 
1520     pnv_xive2_cache_watch_release(&state, watch_engine);
1521     xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1522 }
1523 
pnv_xive2_ic_pc_read(void * opaque,hwaddr offset,unsigned size)1524 static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
1525                                      unsigned size)
1526 {
1527     PnvXive2 *xive = PNV_XIVE2(opaque);
1528     uint64_t val = -1;
1529     uint32_t reg = offset >> 3;
1530     uint8_t watch_engine;
1531 
1532     switch (offset) {
1533     /*
1534      * VSD table settings.
1535      */
1536     case PC_VSD_TABLE_ADDR:
1537     case PC_VSD_TABLE_DATA:
1538         val = xive->pc_regs[reg];
1539         break;
1540 
1541     case PC_NXC_WATCH_ASSIGN:
1542         val = pnv_xive2_nxc_cache_watch_assign(xive);
1543         break;
1544 
1545     case PC_NXC_PROC_CONFIG:
1546         val = xive->pc_regs[reg];
1547         break;
1548 
1549     /*
1550      * cache updates
1551      */
1552     case PC_NXC_WATCH0_SPEC:
1553     case PC_NXC_WATCH1_SPEC:
1554     case PC_NXC_WATCH2_SPEC:
1555     case PC_NXC_WATCH3_SPEC:
1556         watch_engine = (offset - PC_NXC_WATCH0_SPEC) >> 6;
1557         xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
1558         pnv_xive2_nxc_cache_watch_release(xive, watch_engine);
1559         val = xive->pc_regs[reg];
1560         break;
1561 
1562     case PC_NXC_WATCH0_DATA0:
1563     case PC_NXC_WATCH1_DATA0:
1564     case PC_NXC_WATCH2_DATA0:
1565     case PC_NXC_WATCH3_DATA0:
1566        /*
1567         * Load DATA registers from cache with data requested by the
1568         * SPEC register
1569         */
1570         watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1571         pnv_xive2_nxc_cache_load(xive, watch_engine);
1572         val = xive->pc_regs[reg];
1573         break;
1574 
1575     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1576     case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1577     case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1578     case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1579         val = xive->pc_regs[reg];
1580         break;
1581 
1582     case PC_NXC_FLUSH_CTRL:
1583         xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
1584         val = xive->pc_regs[reg];
1585         break;
1586 
1587     /*
1588      * Indirect invalidation
1589      */
1590     case PC_AT_KILL:
1591         xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
1592         val = xive->pc_regs[reg];
1593         break;
1594 
1595     default:
1596         xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
1597     }
1598 
1599     return val;
1600 }
1601 
pnv_xive2_pc_vst_set_data(PnvXive2 * xive,uint64_t vsd)1602 static void pnv_xive2_pc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
1603 {
1604     uint8_t type = GETFIELD(PC_VSD_TABLE_SELECT,
1605                             xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1606     uint8_t blk = GETFIELD(PC_VSD_TABLE_ADDRESS,
1607                            xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1608 
1609     pnv_xive2_vst_set_data(xive, vsd, type, blk);
1610 }
1611 
pnv_xive2_ic_pc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1612 static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
1613                                   uint64_t val, unsigned size)
1614 {
1615     PnvXive2 *xive = PNV_XIVE2(opaque);
1616     uint32_t reg = offset >> 3;
1617     uint8_t watch_engine;
1618 
1619     switch (offset) {
1620 
1621     /*
1622      * VSD table settings.
1623      * The Xive2Router model combines both VC and PC sub-engines. We
1624      * allow to configure the tables through both, for the rare cases
1625      * where a table only really needs to be configured for one of
1626      * them (e.g. the NVG table for the presenter). It assumes that
1627      * firmware passes the same address to the VC and PC when tables
1628      * are defined for both, which seems acceptable.
1629      */
1630     case PC_VSD_TABLE_ADDR:
1631         break;
1632     case PC_VSD_TABLE_DATA:
1633         pnv_xive2_pc_vst_set_data(xive, val);
1634         break;
1635 
1636     case PC_NXC_PROC_CONFIG:
1637         break;
1638 
1639     /*
1640      * cache updates
1641      */
1642     case PC_NXC_WATCH0_SPEC:
1643     case PC_NXC_WATCH1_SPEC:
1644     case PC_NXC_WATCH2_SPEC:
1645     case PC_NXC_WATCH3_SPEC:
1646         val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
1647         break;
1648 
1649     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1650     case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1651     case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1652     case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1653         break;
1654     case PC_NXC_WATCH0_DATA0:
1655     case PC_NXC_WATCH1_DATA0:
1656     case PC_NXC_WATCH2_DATA0:
1657     case PC_NXC_WATCH3_DATA0:
1658         /* writing to DATA0 triggers the cache write */
1659         watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1660         xive->pc_regs[reg] = val;
1661         pnv_xive2_nxc_update(xive, watch_engine);
1662         break;
1663 
1664    /* case PC_NXC_FLUSH_CTRL: */
1665     case PC_NXC_FLUSH_POLL:
1666         xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
1667         break;
1668 
1669     case PC_NXC_FLUSH_INJECT:
1670         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_NXC);
1671         break;
1672 
1673     /*
1674      * Indirect invalidation
1675      */
1676     case PC_AT_KILL:
1677     case PC_AT_KILL_MASK:
1678         break;
1679 
1680     default:
1681         xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
1682         return;
1683     }
1684 
1685     xive->pc_regs[reg] = val;
1686 }
1687 
1688 static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
1689     .read = pnv_xive2_ic_pc_read,
1690     .write = pnv_xive2_ic_pc_write,
1691     .endianness = DEVICE_BIG_ENDIAN,
1692     .valid = {
1693         .min_access_size = 8,
1694         .max_access_size = 8,
1695     },
1696     .impl = {
1697         .min_access_size = 8,
1698         .max_access_size = 8,
1699     },
1700 };
1701 
1702 
pnv_xive2_ic_tctxt_read(void * opaque,hwaddr offset,unsigned size)1703 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
1704                                         unsigned size)
1705 {
1706     PnvXive2 *xive = PNV_XIVE2(opaque);
1707     uint64_t val = -1;
1708     uint32_t reg = offset >> 3;
1709 
1710     switch (offset) {
1711     /*
1712      * XIVE2 hardware thread enablement
1713      */
1714     case TCTXT_EN0:
1715     case TCTXT_EN1:
1716         val = xive->tctxt_regs[reg];
1717         break;
1718 
1719     case TCTXT_EN0_SET:
1720     case TCTXT_EN0_RESET:
1721         val = xive->tctxt_regs[TCTXT_EN0 >> 3];
1722         break;
1723     case TCTXT_EN1_SET:
1724     case TCTXT_EN1_RESET:
1725         val = xive->tctxt_regs[TCTXT_EN1 >> 3];
1726         break;
1727     case TCTXT_CFG:
1728         val = xive->tctxt_regs[reg];
1729         break;
1730     default:
1731         xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
1732     }
1733 
1734     return val;
1735 }
1736 
pnv_xive2_ic_tctxt_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1737 static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
1738                                      uint64_t val, unsigned size)
1739 {
1740     PnvXive2 *xive = PNV_XIVE2(opaque);
1741     uint32_t reg = offset >> 3;
1742 
1743     switch (offset) {
1744     /*
1745      * XIVE2 hardware thread enablement
1746      */
1747     case TCTXT_EN0: /* Physical Thread Enable */
1748     case TCTXT_EN1: /* Physical Thread Enable (fused core) */
1749         xive->tctxt_regs[reg] = val;
1750         break;
1751 
1752     case TCTXT_EN0_SET:
1753         xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
1754         break;
1755     case TCTXT_EN1_SET:
1756         xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
1757         break;
1758     case TCTXT_EN0_RESET:
1759         xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
1760         break;
1761     case TCTXT_EN1_RESET:
1762         xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
1763         break;
1764     case TCTXT_CFG:
1765         xive->tctxt_regs[reg] = val;
1766         break;
1767     default:
1768         xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
1769         return;
1770     }
1771 }
1772 
1773 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
1774     .read = pnv_xive2_ic_tctxt_read,
1775     .write = pnv_xive2_ic_tctxt_write,
1776     .endianness = DEVICE_BIG_ENDIAN,
1777     .valid = {
1778         .min_access_size = 8,
1779         .max_access_size = 8,
1780     },
1781     .impl = {
1782         .min_access_size = 8,
1783         .max_access_size = 8,
1784     },
1785 };
1786 
1787 /*
1788  * Redirect XSCOM to MMIO handlers
1789  */
pnv_xive2_xscom_read(void * opaque,hwaddr offset,unsigned size)1790 static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
1791                                      unsigned size)
1792 {
1793     PnvXive2 *xive = PNV_XIVE2(opaque);
1794     uint64_t val = -1;
1795     uint32_t xscom_reg = offset >> 3;
1796     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1797 
1798     switch (xscom_reg) {
1799     case 0x000 ... 0x0FF:
1800         val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
1801         break;
1802     case 0x100 ... 0x1FF:
1803         val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
1804         break;
1805     case 0x200 ... 0x2FF:
1806         val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
1807         break;
1808     case 0x300 ... 0x3FF:
1809         val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
1810         break;
1811     default:
1812         xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
1813     }
1814 
1815     return val;
1816 }
1817 
pnv_xive2_xscom_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1818 static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
1819                                   uint64_t val, unsigned size)
1820 {
1821     PnvXive2 *xive = PNV_XIVE2(opaque);
1822     uint32_t xscom_reg = offset >> 3;
1823     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1824 
1825     switch (xscom_reg) {
1826     case 0x000 ... 0x0FF:
1827         pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
1828         break;
1829     case 0x100 ... 0x1FF:
1830         pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
1831         break;
1832     case 0x200 ... 0x2FF:
1833         pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
1834         break;
1835     case 0x300 ... 0x3FF:
1836         pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
1837         break;
1838     default:
1839         xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
1840     }
1841 }
1842 
1843 static const MemoryRegionOps pnv_xive2_xscom_ops = {
1844     .read = pnv_xive2_xscom_read,
1845     .write = pnv_xive2_xscom_write,
1846     .endianness = DEVICE_BIG_ENDIAN,
1847     .valid = {
1848         .min_access_size = 8,
1849         .max_access_size = 8,
1850     },
1851     .impl = {
1852         .min_access_size = 8,
1853         .max_access_size = 8,
1854     },
1855 };
1856 
1857 /*
1858  * Notify port page. The layout is compatible between 4K and 64K pages :
1859  *
1860  * Page 1           Notify page (writes only)
1861  *  0x000 - 0x7FF   IPI interrupt (NPU)
1862  *  0x800 - 0xFFF   HW interrupt triggers (PSI, PHB)
1863  */
1864 
pnv_xive2_ic_hw_trigger(PnvXive2 * xive,hwaddr addr,uint64_t val)1865 static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
1866                                     uint64_t val)
1867 {
1868     uint8_t blk;
1869     uint32_t idx;
1870 
1871     if (val & XIVE_TRIGGER_END) {
1872         xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1873                    addr, val);
1874         return;
1875     }
1876 
1877     /*
1878      * Forward the source event notification directly to the Router.
1879      * The source interrupt number should already be correctly encoded
1880      * with the chip block id by the sending device (PHB, PSI).
1881      */
1882     blk = XIVE_EAS_BLOCK(val);
1883     idx = XIVE_EAS_INDEX(val);
1884 
1885     xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1886                          !!(val & XIVE_TRIGGER_PQ));
1887 }
1888 
pnv_xive2_ic_notify_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1889 static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
1890                                       uint64_t val, unsigned size)
1891 {
1892     PnvXive2 *xive = PNV_XIVE2(opaque);
1893 
1894     /* VC: IPI triggers */
1895     switch (offset) {
1896     case 0x000 ... 0x7FF:
1897         /* TODO: check IPI notify sub-page routing */
1898         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1899         break;
1900 
1901     /* VC: HW triggers */
1902     case 0x800 ... 0xFFF:
1903         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1904         break;
1905 
1906     default:
1907         xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
1908     }
1909 }
1910 
pnv_xive2_ic_notify_read(void * opaque,hwaddr offset,unsigned size)1911 static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
1912                                          unsigned size)
1913 {
1914     PnvXive2 *xive = PNV_XIVE2(opaque);
1915 
1916    /* loads are invalid */
1917     xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
1918     return -1;
1919 }
1920 
1921 static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
1922     .read = pnv_xive2_ic_notify_read,
1923     .write = pnv_xive2_ic_notify_write,
1924     .endianness = DEVICE_BIG_ENDIAN,
1925     .valid = {
1926         .min_access_size = 8,
1927         .max_access_size = 8,
1928     },
1929     .impl = {
1930         .min_access_size = 8,
1931         .max_access_size = 8,
1932     },
1933 };
1934 
pnv_xive2_ic_lsi_read(void * opaque,hwaddr offset,unsigned size)1935 static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
1936                                       unsigned size)
1937 {
1938     PnvXive2 *xive = PNV_XIVE2(opaque);
1939 
1940     xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
1941     return -1;
1942 }
1943 
pnv_xive2_ic_lsi_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1944 static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
1945                                    uint64_t val, unsigned size)
1946 {
1947     PnvXive2 *xive = PNV_XIVE2(opaque);
1948 
1949     xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
1950 }
1951 
1952 static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
1953     .read = pnv_xive2_ic_lsi_read,
1954     .write = pnv_xive2_ic_lsi_write,
1955     .endianness = DEVICE_BIG_ENDIAN,
1956     .valid = {
1957         .min_access_size = 8,
1958         .max_access_size = 8,
1959     },
1960     .impl = {
1961         .min_access_size = 8,
1962         .max_access_size = 8,
1963     },
1964 };
1965 
1966 /*
1967  * Sync MMIO page (write only)
1968  */
1969 #define PNV_XIVE2_SYNC_IPI              0x000
1970 #define PNV_XIVE2_SYNC_HW               0x080
1971 #define PNV_XIVE2_SYNC_NxC              0x100
1972 #define PNV_XIVE2_SYNC_INT              0x180
1973 #define PNV_XIVE2_SYNC_OS_ESC           0x200
1974 #define PNV_XIVE2_SYNC_POOL_ESC         0x280
1975 #define PNV_XIVE2_SYNC_HARD_ESC         0x300
1976 #define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO   0x800
1977 #define PNV_XIVE2_SYNC_NXC_LD_LCL_CO    0x880
1978 #define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI   0x900
1979 #define PNV_XIVE2_SYNC_NXC_ST_LCL_CI    0x980
1980 #define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI   0xA00
1981 #define PNV_XIVE2_SYNC_NXC_ST_RMT_CI    0xA80
1982 
pnv_xive2_ic_sync_read(void * opaque,hwaddr offset,unsigned size)1983 static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
1984                                        unsigned size)
1985 {
1986     PnvXive2 *xive = PNV_XIVE2(opaque);
1987 
1988     /* loads are invalid */
1989     xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
1990     return -1;
1991 }
1992 
1993 /*
1994  * The sync MMIO space spans two pages.  The lower page is use for
1995  * queue sync "poll" requests while the upper page is used for queue
1996  * sync "inject" requests.  Inject requests require the HW to write
1997  * a byte of all 1's to a predetermined location in memory in order
1998  * to signal completion of the request.  Both pages have the same
1999  * layout, so it is easiest to handle both with a single function.
2000  */
pnv_xive2_ic_sync_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)2001 static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
2002                                     uint64_t val, unsigned size)
2003 {
2004     PnvXive2 *xive = PNV_XIVE2(opaque);
2005     int inject_type;
2006     hwaddr pg_offset_mask = (1ull << xive->ic_shift) - 1;
2007 
2008     /* adjust offset for inject page */
2009     hwaddr adj_offset = offset & pg_offset_mask;
2010 
2011     switch (adj_offset) {
2012     case PNV_XIVE2_SYNC_IPI:
2013         inject_type = PNV_XIVE2_QUEUE_IPI;
2014         break;
2015     case PNV_XIVE2_SYNC_HW:
2016         inject_type = PNV_XIVE2_QUEUE_HW;
2017         break;
2018     case PNV_XIVE2_SYNC_NxC:
2019         inject_type = PNV_XIVE2_QUEUE_NXC;
2020         break;
2021     case PNV_XIVE2_SYNC_INT:
2022         inject_type = PNV_XIVE2_QUEUE_INT;
2023         break;
2024     case PNV_XIVE2_SYNC_OS_ESC:
2025         inject_type = PNV_XIVE2_QUEUE_OS;
2026         break;
2027     case PNV_XIVE2_SYNC_POOL_ESC:
2028         inject_type = PNV_XIVE2_QUEUE_POOL;
2029         break;
2030     case PNV_XIVE2_SYNC_HARD_ESC:
2031         inject_type = PNV_XIVE2_QUEUE_HARD;
2032         break;
2033     case PNV_XIVE2_SYNC_NXC_LD_LCL_NCO:
2034         inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO;
2035         break;
2036     case PNV_XIVE2_SYNC_NXC_LD_LCL_CO:
2037         inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_CO;
2038         break;
2039     case PNV_XIVE2_SYNC_NXC_ST_LCL_NCI:
2040         inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI;
2041         break;
2042     case PNV_XIVE2_SYNC_NXC_ST_LCL_CI:
2043         inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_CI;
2044         break;
2045     case PNV_XIVE2_SYNC_NXC_ST_RMT_NCI:
2046         inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI;
2047         break;
2048     case PNV_XIVE2_SYNC_NXC_ST_RMT_CI:
2049         inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI;
2050         break;
2051     default:
2052         xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
2053         return;
2054     }
2055 
2056     /* Write Queue Sync notification byte if writing to sync inject page */
2057     if ((offset & ~pg_offset_mask) != 0) {
2058         pnv_xive2_inject_notify(xive, inject_type);
2059     }
2060 }
2061 
2062 static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
2063     .read = pnv_xive2_ic_sync_read,
2064     .write = pnv_xive2_ic_sync_write,
2065     .endianness = DEVICE_BIG_ENDIAN,
2066     .valid = {
2067         .min_access_size = 8,
2068         .max_access_size = 8,
2069     },
2070     .impl = {
2071         .min_access_size = 8,
2072         .max_access_size = 8,
2073     },
2074 };
2075 
2076 /*
2077  * When the TM direct pages of the IC controller are accessed, the
2078  * target HW thread is deduced from the page offset.
2079  */
pnv_xive2_ic_tm_get_pir(PnvXive2 * xive,hwaddr offset)2080 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2 *xive, hwaddr offset)
2081 {
2082     /* On P10, the node ID shift in the PIR register is 8 bits */
2083     return xive->chip->chip_id << 8 | offset >> xive->ic_shift;
2084 }
2085 
pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 * xive,hwaddr offset)2086 static uint32_t pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 *xive,
2087                                                    hwaddr offset)
2088 {
2089     /*
2090      * Indirect TIMA accesses are similar to direct accesses for
2091      * privilege ring 0. So remove any traces of the hw thread ID from
2092      * the offset in the IC BAR as it could be interpreted as the ring
2093      * privilege when calling the underlying direct access functions.
2094      */
2095     return offset & ((1ull << xive->ic_shift) - 1);
2096 }
2097 
pnv_xive2_get_indirect_tctx(PnvXive2 * xive,uint32_t pir)2098 static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
2099 {
2100     PnvChip *chip = xive->chip;
2101     PowerPCCPU *cpu = NULL;
2102 
2103     cpu = pnv_chip_find_cpu(chip, pir);
2104     if (!cpu) {
2105         xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
2106         return NULL;
2107     }
2108 
2109     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
2110         xive2_error(xive, "IC: CPU %x is not enabled", pir);
2111     }
2112 
2113     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2114 }
2115 
pnv_xive2_ic_tm_indirect_read(void * opaque,hwaddr offset,unsigned size)2116 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
2117                                               unsigned size)
2118 {
2119     PnvXive2 *xive = PNV_XIVE2(opaque);
2120     XivePresenter *xptr = XIVE_PRESENTER(xive);
2121     hwaddr hw_page_offset;
2122     uint32_t pir;
2123     XiveTCTX *tctx;
2124     uint64_t val = -1;
2125 
2126     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2127     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2128     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2129     if (tctx) {
2130         val = xive_tctx_tm_read(xptr, tctx, hw_page_offset, size);
2131     }
2132 
2133     return val;
2134 }
2135 
pnv_xive2_ic_tm_indirect_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)2136 static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
2137                                            uint64_t val, unsigned size)
2138 {
2139     PnvXive2 *xive = PNV_XIVE2(opaque);
2140     XivePresenter *xptr = XIVE_PRESENTER(xive);
2141     hwaddr hw_page_offset;
2142     uint32_t pir;
2143     XiveTCTX *tctx;
2144 
2145     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2146     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2147     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2148     if (tctx) {
2149         xive_tctx_tm_write(xptr, tctx, hw_page_offset, val, size);
2150     }
2151 }
2152 
2153 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
2154     .read = pnv_xive2_ic_tm_indirect_read,
2155     .write = pnv_xive2_ic_tm_indirect_write,
2156     .endianness = DEVICE_BIG_ENDIAN,
2157     .valid = {
2158         .min_access_size = 1,
2159         .max_access_size = 8,
2160     },
2161     .impl = {
2162         .min_access_size = 1,
2163         .max_access_size = 8,
2164     },
2165 };
2166 
2167 /*
2168  * TIMA ops
2169  */
pnv_xive2_tm_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)2170 static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
2171                                uint64_t value, unsigned size)
2172 {
2173     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2174     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2175     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2176     XivePresenter *xptr = XIVE_PRESENTER(xive);
2177 
2178     xive_tctx_tm_write(xptr, tctx, offset, value, size);
2179 }
2180 
pnv_xive2_tm_read(void * opaque,hwaddr offset,unsigned size)2181 static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
2182 {
2183     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2184     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2185     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2186     XivePresenter *xptr = XIVE_PRESENTER(xive);
2187 
2188     return xive_tctx_tm_read(xptr, tctx, offset, size);
2189 }
2190 
2191 static const MemoryRegionOps pnv_xive2_tm_ops = {
2192     .read = pnv_xive2_tm_read,
2193     .write = pnv_xive2_tm_write,
2194     .endianness = DEVICE_BIG_ENDIAN,
2195     .valid = {
2196         .min_access_size = 1,
2197         .max_access_size = 8,
2198     },
2199     .impl = {
2200         .min_access_size = 1,
2201         .max_access_size = 8,
2202     },
2203 };
2204 
pnv_xive2_nvc_read(void * opaque,hwaddr addr,unsigned size)2205 static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr addr,
2206                                    unsigned size)
2207 {
2208     PnvXive2 *xive = PNV_XIVE2(opaque);
2209     XivePresenter *xptr = XIVE_PRESENTER(xive);
2210     uint32_t page = addr >> xive->nvpg_shift;
2211     uint16_t op = addr & 0xFFF;
2212     uint8_t blk = pnv_xive2_block_id(xive);
2213 
2214     if (size != 2) {
2215         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc load size %d\n",
2216                       size);
2217         return -1;
2218     }
2219 
2220     return xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, 1);
2221 }
2222 
pnv_xive2_nvc_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)2223 static void pnv_xive2_nvc_write(void *opaque, hwaddr addr,
2224                                 uint64_t val, unsigned size)
2225 {
2226     PnvXive2 *xive = PNV_XIVE2(opaque);
2227     XivePresenter *xptr = XIVE_PRESENTER(xive);
2228     uint32_t page = addr >> xive->nvc_shift;
2229     uint16_t op = addr & 0xFFF;
2230     uint8_t blk = pnv_xive2_block_id(xive);
2231 
2232     if (size != 1) {
2233         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc write size %d\n",
2234                       size);
2235         return;
2236     }
2237 
2238     (void)xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, val);
2239 }
2240 
2241 static const MemoryRegionOps pnv_xive2_nvc_ops = {
2242     .read = pnv_xive2_nvc_read,
2243     .write = pnv_xive2_nvc_write,
2244     .endianness = DEVICE_BIG_ENDIAN,
2245     .valid = {
2246         .min_access_size = 1,
2247         .max_access_size = 8,
2248     },
2249     .impl = {
2250         .min_access_size = 1,
2251         .max_access_size = 8,
2252     },
2253 };
2254 
pnv_xive2_nvpg_read(void * opaque,hwaddr addr,unsigned size)2255 static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr addr,
2256                                     unsigned size)
2257 {
2258     PnvXive2 *xive = PNV_XIVE2(opaque);
2259     XivePresenter *xptr = XIVE_PRESENTER(xive);
2260     uint32_t page = addr >> xive->nvpg_shift;
2261     uint16_t op = addr & 0xFFF;
2262     uint32_t index = page >> 1;
2263     uint8_t blk = pnv_xive2_block_id(xive);
2264 
2265     if (size != 2) {
2266         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg load size %d\n",
2267                       size);
2268         return -1;
2269     }
2270 
2271     if (page % 2) {
2272         /* odd page - NVG */
2273         return xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, 1);
2274     } else {
2275         /* even page - NVP */
2276         return xive2_presenter_nvp_backlog_op(xptr, blk, index, op);
2277     }
2278 }
2279 
pnv_xive2_nvpg_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)2280 static void pnv_xive2_nvpg_write(void *opaque, hwaddr addr,
2281                                  uint64_t val, unsigned size)
2282 {
2283     PnvXive2 *xive = PNV_XIVE2(opaque);
2284     XivePresenter *xptr = XIVE_PRESENTER(xive);
2285     uint32_t page = addr >> xive->nvpg_shift;
2286     uint16_t op = addr & 0xFFF;
2287     uint32_t index = page >> 1;
2288     uint8_t blk = pnv_xive2_block_id(xive);
2289 
2290     if (size != 1) {
2291         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg write size %d\n",
2292                       size);
2293         return;
2294     }
2295 
2296     if (page % 2) {
2297         /* odd page - NVG */
2298         (void)xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, val);
2299     } else {
2300         /* even page - NVP */
2301         (void)xive2_presenter_nvp_backlog_op(xptr, blk, index, op);
2302     }
2303 }
2304 
2305 static const MemoryRegionOps pnv_xive2_nvpg_ops = {
2306     .read = pnv_xive2_nvpg_read,
2307     .write = pnv_xive2_nvpg_write,
2308     .endianness = DEVICE_BIG_ENDIAN,
2309     .valid = {
2310         .min_access_size = 1,
2311         .max_access_size = 8,
2312     },
2313     .impl = {
2314         .min_access_size = 1,
2315         .max_access_size = 8,
2316     },
2317 };
2318 
2319 /*
2320  * POWER10 default capabilities: 0x2000120076f000FC
2321  */
2322 #define PNV_XIVE2_CAPABILITIES  0x2000120076f000FC
2323 
2324 /*
2325  * POWER10 default configuration: 0x0030000033000000
2326  *
2327  * 8bits thread id was dropped for P10
2328  */
2329 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
2330 
pnv_xive2_reset(void * dev)2331 static void pnv_xive2_reset(void *dev)
2332 {
2333     PnvXive2 *xive = PNV_XIVE2(dev);
2334     XiveSource *xsrc = &xive->ipi_source;
2335     Xive2EndSource *end_xsrc = &xive->end_source;
2336 
2337     xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
2338     xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
2339 
2340     /* HW hardwires the #Topology of the chip in the block field */
2341     xive->cq_regs[CQ_XIVE_CFG >> 3] |=
2342         SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
2343 
2344     /* VC and PC cache watch assign mechanism */
2345     xive->vc_regs[VC_ENDC_CFG >> 3] =
2346         SETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN, 0ull, 0b0111);
2347     xive->pc_regs[PC_NXC_PROC_CONFIG >> 3] =
2348         SETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN, 0ull, 0b0111);
2349 
2350     /* Set default page size to 64k */
2351     xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
2352     xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
2353 
2354     /* Clear source MMIOs */
2355     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
2356         memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
2357     }
2358 
2359     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
2360         memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
2361     }
2362 }
2363 
2364 /*
2365  *  Maximum number of IRQs and ENDs supported by HW. Will be tuned by
2366  *  software.
2367  */
2368 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2369 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2370 
pnv_xive2_realize(DeviceState * dev,Error ** errp)2371 static void pnv_xive2_realize(DeviceState *dev, Error **errp)
2372 {
2373     PnvXive2 *xive = PNV_XIVE2(dev);
2374     PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
2375     XiveSource *xsrc = &xive->ipi_source;
2376     Xive2EndSource *end_xsrc = &xive->end_source;
2377     Error *local_err = NULL;
2378     int i;
2379 
2380     pxc->parent_realize(dev, &local_err);
2381     if (local_err) {
2382         error_propagate(errp, local_err);
2383         return;
2384     }
2385 
2386     assert(xive->chip);
2387 
2388     /*
2389      * The XiveSource and Xive2EndSource objects are realized with the
2390      * maximum allowed HW configuration. The ESB MMIO regions will be
2391      * resized dynamically when the controller is configured by the FW
2392      * to limit accesses to resources not provisioned.
2393      */
2394     object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
2395                             &error_fatal);
2396     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
2397                             &error_fatal);
2398     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
2399                              &error_fatal);
2400     qdev_realize(DEVICE(xsrc), NULL, &local_err);
2401     if (local_err) {
2402         error_propagate(errp, local_err);
2403         return;
2404     }
2405 
2406     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
2407                             &error_fatal);
2408     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
2409                              &error_abort);
2410     qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
2411     if (local_err) {
2412         error_propagate(errp, local_err);
2413         return;
2414     }
2415 
2416     /* XSCOM region, used for initial configuration of the BARs */
2417     memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
2418                           &pnv_xive2_xscom_ops, xive, "xscom-xive",
2419                           PNV10_XSCOM_XIVE2_SIZE << 3);
2420 
2421     /* Interrupt controller MMIO regions */
2422     xive->ic_shift = 16;
2423     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
2424                        PNV10_XIVE2_IC_SIZE);
2425 
2426     for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
2427         memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
2428                          pnv_xive2_ic_regions[i].ops, xive,
2429                          pnv_xive2_ic_regions[i].name,
2430                          pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
2431     }
2432 
2433     /*
2434      * VC MMIO regions.
2435      */
2436     xive->esb_shift = 16;
2437     xive->end_shift = 16;
2438     memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
2439                        PNV10_XIVE2_ESB_SIZE);
2440     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
2441                        PNV10_XIVE2_END_SIZE);
2442 
2443     /* Presenter Controller MMIO region (not modeled) */
2444     xive->nvc_shift = 16;
2445     xive->nvpg_shift = 16;
2446     memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
2447                           &pnv_xive2_nvc_ops, xive,
2448                           "xive-nvc", PNV10_XIVE2_NVC_SIZE);
2449 
2450     memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
2451                           &pnv_xive2_nvpg_ops, xive,
2452                           "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
2453 
2454     /* Thread Interrupt Management Area (Direct) */
2455     xive->tm_shift = 16;
2456     memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
2457                           xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
2458 
2459     qemu_register_reset(pnv_xive2_reset, dev);
2460 }
2461 
2462 static const Property pnv_xive2_properties[] = {
2463     DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
2464     DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
2465     DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
2466     DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
2467     DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
2468     DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
2469     DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
2470                        PNV_XIVE2_CAPABILITIES),
2471     DEFINE_PROP_UINT64("config", PnvXive2, config,
2472                        PNV_XIVE2_CONFIGURATION),
2473     DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
2474 };
2475 
pnv_xive2_instance_init(Object * obj)2476 static void pnv_xive2_instance_init(Object *obj)
2477 {
2478     PnvXive2 *xive = PNV_XIVE2(obj);
2479 
2480     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
2481                             TYPE_XIVE_SOURCE);
2482     object_initialize_child(obj, "end_source", &xive->end_source,
2483                             TYPE_XIVE2_END_SOURCE);
2484 }
2485 
pnv_xive2_dt_xscom(PnvXScomInterface * dev,void * fdt,int xscom_offset)2486 static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
2487                               int xscom_offset)
2488 {
2489     const char compat_p10[] = "ibm,power10-xive-x";
2490     char *name;
2491     int offset;
2492     uint32_t reg[] = {
2493         cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
2494         cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
2495     };
2496 
2497     name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
2498     offset = fdt_add_subnode(fdt, xscom_offset, name);
2499     _FDT(offset);
2500     g_free(name);
2501 
2502     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
2503     _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
2504                      sizeof(compat_p10)));
2505     return 0;
2506 }
2507 
pnv_xive2_class_init(ObjectClass * klass,const void * data)2508 static void pnv_xive2_class_init(ObjectClass *klass, const void *data)
2509 {
2510     DeviceClass *dc = DEVICE_CLASS(klass);
2511     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2512     Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
2513     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2514     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2515     PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
2516 
2517     xdc->dt_xscom  = pnv_xive2_dt_xscom;
2518 
2519     dc->desc       = "PowerNV XIVE2 Interrupt Controller (POWER10)";
2520     device_class_set_parent_realize(dc, pnv_xive2_realize,
2521                                     &pxc->parent_realize);
2522     device_class_set_props(dc, pnv_xive2_properties);
2523 
2524     xrc->get_eas   = pnv_xive2_get_eas;
2525     xrc->get_pq    = pnv_xive2_get_pq;
2526     xrc->set_pq    = pnv_xive2_set_pq;
2527     xrc->get_end   = pnv_xive2_get_end;
2528     xrc->write_end = pnv_xive2_write_end;
2529     xrc->get_nvp   = pnv_xive2_get_nvp;
2530     xrc->write_nvp = pnv_xive2_write_nvp;
2531     xrc->get_nvgc   = pnv_xive2_get_nvgc;
2532     xrc->write_nvgc = pnv_xive2_write_nvgc;
2533     xrc->get_config  = pnv_xive2_get_config;
2534     xrc->get_block_id = pnv_xive2_get_block_id;
2535 
2536     xnc->notify    = pnv_xive2_notify;
2537 
2538     xpc->match_nvt  = pnv_xive2_match_nvt;
2539     xpc->get_config = pnv_xive2_presenter_get_config;
2540     xpc->broadcast  = pnv_xive2_broadcast;
2541 };
2542 
2543 static const TypeInfo pnv_xive2_info = {
2544     .name          = TYPE_PNV_XIVE2,
2545     .parent        = TYPE_XIVE2_ROUTER,
2546     .instance_init = pnv_xive2_instance_init,
2547     .instance_size = sizeof(PnvXive2),
2548     .class_init    = pnv_xive2_class_init,
2549     .class_size    = sizeof(PnvXive2Class),
2550     .interfaces    = (const InterfaceInfo[]) {
2551         { TYPE_PNV_XSCOM_INTERFACE },
2552         { }
2553     }
2554 };
2555 
pnv_xive2_register_types(void)2556 static void pnv_xive2_register_types(void)
2557 {
2558     type_register_static(&pnv_xive2_info);
2559 }
2560 
type_init(pnv_xive2_register_types)2561 type_init(pnv_xive2_register_types)
2562 
2563 /*
2564  * If the table is direct, we can compute the number of PQ entries
2565  * provisioned by FW.
2566  */
2567 static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
2568 {
2569     uint8_t blk = pnv_xive2_block_id(xive);
2570     uint64_t vsd = xive->vsds[VST_ESB][blk];
2571     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
2572 
2573     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
2574 }
2575 
2576 /*
2577  * Compute the number of entries per indirect subpage.
2578  */
pnv_xive2_vst_per_subpage(PnvXive2 * xive,uint32_t type)2579 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
2580 {
2581     uint8_t blk = pnv_xive2_block_id(xive);
2582     uint64_t vsd = xive->vsds[type][blk];
2583     const XiveVstInfo *info = &vst_infos[type];
2584     uint64_t vsd_addr;
2585     uint32_t page_shift;
2586 
2587     /* For direct tables, fake a valid value */
2588     if (!(VSD_INDIRECT & vsd)) {
2589         return 1;
2590     }
2591 
2592     /* Get the page size of the indirect table. */
2593     vsd_addr = vsd & VSD_ADDRESS_MASK;
2594     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
2595 
2596     if (!(vsd & VSD_ADDRESS_MASK)) {
2597 #ifdef XIVE2_DEBUG
2598         xive2_error(xive, "VST: invalid %s entry!?", info->name);
2599 #endif
2600         return 0;
2601     }
2602 
2603     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
2604 
2605     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
2606         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
2607                    page_shift);
2608         return 0;
2609     }
2610 
2611     return (1ull << page_shift) / info->size;
2612 }
2613 
pnv_xive2_pic_print_info(PnvXive2 * xive,GString * buf)2614 void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf)
2615 {
2616     Xive2Router *xrtr = XIVE2_ROUTER(xive);
2617     uint8_t blk = pnv_xive2_block_id(xive);
2618     uint8_t chip_id = xive->chip->chip_id;
2619     uint32_t srcno0 = XIVE_EAS(blk, 0);
2620     uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
2621     Xive2Eas eas;
2622     Xive2End end;
2623     Xive2Nvp nvp;
2624     Xive2Nvgc nvgc;
2625     int i;
2626     uint64_t entries_per_subpage;
2627 
2628     g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n",
2629                            blk, srcno0, srcno0 + nr_esbs - 1);
2630     xive_source_pic_print_info(&xive->ipi_source, srcno0, buf);
2631 
2632     g_string_append_printf(buf, "XIVE[%x] EAT %08x .. %08x\n",
2633                            blk, srcno0, srcno0 + nr_esbs - 1);
2634     for (i = 0; i < nr_esbs; i++) {
2635         if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
2636             break;
2637         }
2638         if (!xive2_eas_is_masked(&eas)) {
2639             xive2_eas_pic_print_info(&eas, i, buf);
2640         }
2641     }
2642 
2643     g_string_append_printf(buf, "XIVE[%x] #%d END Escalation EAT\n",
2644                            chip_id, blk);
2645     i = 0;
2646     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2647         xive2_end_eas_pic_print_info(&end, i++, buf);
2648     }
2649 
2650     g_string_append_printf(buf, "XIVE[%x] #%d ENDT\n", chip_id, blk);
2651     i = 0;
2652     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2653         xive2_end_pic_print_info(&end, i++, buf);
2654     }
2655 
2656     g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n",
2657                            chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2658     entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
2659     for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
2660         while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
2661             xive2_nvp_pic_print_info(&nvp, i++, buf);
2662         }
2663     }
2664 
2665     g_string_append_printf(buf, "XIVE[%x] #%d NVGT %08x .. %08x\n",
2666                            chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2667     entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVG);
2668     for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
2669         while (!xive2_router_get_nvgc(xrtr, false, blk, i, &nvgc)) {
2670             xive2_nvgc_pic_print_info(&nvgc, i++, buf);
2671         }
2672     }
2673 
2674     g_string_append_printf(buf, "XIVE[%x] #%d NVCT %08x .. %08x\n",
2675                           chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2676     entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVC);
2677     for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
2678         while (!xive2_router_get_nvgc(xrtr, true, blk, i, &nvgc)) {
2679             xive2_nvgc_pic_print_info(&nvgc, i++, buf);
2680         }
2681     }
2682 }
2683