xref: /qemu/hw/s390x/s390-pci-inst.c (revision fc524567087c2537b5103cdfc1d41e4f442892b6)
1 /*
2  * s390 PCI instructions
3  *
4  * Copyright 2014 IBM Corp.
5  * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6  *            Hong Bo Li <lihbbj@cn.ibm.com>
7  *            Yi Min Zhao <zyimin@cn.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at
10  * your option) any later version. See the COPYING file in the top-level
11  * directory.
12  */
13 
14 #include "qemu/osdep.h"
15 #include "exec/memop.h"
16 #include "exec/target_page.h"
17 #include "system/memory.h"
18 #include "qemu/error-report.h"
19 #include "system/hw_accel.h"
20 #include "hw/boards.h"
21 #include "hw/pci/pci_device.h"
22 #include "hw/s390x/s390-pci-inst.h"
23 #include "hw/s390x/s390-pci-bus.h"
24 #include "hw/s390x/s390-pci-kvm.h"
25 #include "hw/s390x/s390-pci-vfio.h"
26 #include "hw/s390x/tod.h"
27 
28 #include "trace.h"
29 
inc_dma_avail(S390PCIIOMMU * iommu)30 static inline void inc_dma_avail(S390PCIIOMMU *iommu)
31 {
32     if (iommu->dma_limit) {
33         iommu->dma_limit->avail++;
34     }
35 }
36 
dec_dma_avail(S390PCIIOMMU * iommu)37 static inline void dec_dma_avail(S390PCIIOMMU *iommu)
38 {
39     if (iommu->dma_limit) {
40         iommu->dma_limit->avail--;
41     }
42 }
43 
s390_set_status_code(CPUS390XState * env,uint8_t r,uint64_t status_code)44 static void s390_set_status_code(CPUS390XState *env,
45                                  uint8_t r, uint64_t status_code)
46 {
47     env->regs[r] &= ~0xff000000ULL;
48     env->regs[r] |= (status_code & 0xff) << 24;
49 }
50 
list_pci(ClpReqRspListPci * rrb,uint8_t * cc)51 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
52 {
53     S390PCIBusDevice *pbdev = NULL;
54     S390pciState *s = s390_get_phb();
55     uint32_t res_code, initial_l2, g_l2;
56     int rc, i;
57     uint64_t resume_token;
58 
59     rc = 0;
60     if (lduw_be_p(&rrb->request.hdr.len) != 32) {
61         res_code = CLP_RC_LEN;
62         rc = -EINVAL;
63         goto out;
64     }
65 
66     if ((ldl_be_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
67         res_code = CLP_RC_FMT;
68         rc = -EINVAL;
69         goto out;
70     }
71 
72     if ((ldl_be_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
73         ldq_be_p(&rrb->request.reserved1) != 0) {
74         res_code = CLP_RC_RESNOT0;
75         rc = -EINVAL;
76         goto out;
77     }
78 
79     resume_token = ldq_be_p(&rrb->request.resume_token);
80 
81     if (resume_token) {
82         pbdev = s390_pci_find_dev_by_idx(s, resume_token);
83         if (!pbdev) {
84             res_code = CLP_RC_LISTPCI_BADRT;
85             rc = -EINVAL;
86             goto out;
87         }
88     } else {
89         pbdev = s390_pci_find_next_avail_dev(s, NULL);
90     }
91 
92     if (lduw_be_p(&rrb->response.hdr.len) < 48) {
93         res_code = CLP_RC_8K;
94         rc = -EINVAL;
95         goto out;
96     }
97 
98     initial_l2 = lduw_be_p(&rrb->response.hdr.len);
99     if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
100         != 0) {
101         res_code = CLP_RC_LEN;
102         rc = -EINVAL;
103         *cc = 3;
104         goto out;
105     }
106 
107     stl_be_p(&rrb->response.fmt, 0);
108     stq_be_p(&rrb->response.reserved1, 0);
109     stl_be_p(&rrb->response.mdd, FH_MASK_SHM);
110     stw_be_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
111     rrb->response.flags = UID_CHECKING_ENABLED;
112     rrb->response.entry_size = sizeof(ClpFhListEntry);
113 
114     i = 0;
115     g_l2 = LIST_PCI_HDR_LEN;
116     while (g_l2 < initial_l2 && pbdev) {
117         stw_be_p(&rrb->response.fh_list[i].device_id,
118             pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
119         stw_be_p(&rrb->response.fh_list[i].vendor_id,
120             pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
121         /* Ignore RESERVED devices. */
122         stl_be_p(&rrb->response.fh_list[i].config,
123             pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31);
124         stl_be_p(&rrb->response.fh_list[i].fid, pbdev->fid);
125         stl_be_p(&rrb->response.fh_list[i].fh, pbdev->fh);
126 
127         g_l2 += sizeof(ClpFhListEntry);
128         /* Add endian check for DPRINTF? */
129         trace_s390_pci_list_entry(g_l2,
130                 lduw_be_p(&rrb->response.fh_list[i].vendor_id),
131                 lduw_be_p(&rrb->response.fh_list[i].device_id),
132                 ldl_be_p(&rrb->response.fh_list[i].fid),
133                 ldl_be_p(&rrb->response.fh_list[i].fh));
134         pbdev = s390_pci_find_next_avail_dev(s, pbdev);
135         i++;
136     }
137 
138     if (!pbdev) {
139         resume_token = 0;
140     } else {
141         resume_token = pbdev->fh & FH_MASK_INDEX;
142     }
143     stq_be_p(&rrb->response.resume_token, resume_token);
144     stw_be_p(&rrb->response.hdr.len, g_l2);
145     stw_be_p(&rrb->response.hdr.rsp, CLP_RC_OK);
146 out:
147     if (rc) {
148         trace_s390_pci_list(rc);
149         stw_be_p(&rrb->response.hdr.rsp, res_code);
150     }
151     return rc;
152 }
153 
clp_service_call(S390CPU * cpu,uint8_t r2,uintptr_t ra)154 int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
155 {
156     ClpReqHdr *reqh;
157     ClpRspHdr *resh;
158     S390PCIBusDevice *pbdev;
159     uint32_t req_len;
160     uint32_t res_len;
161     uint8_t buffer[4096 * 2];
162     uint8_t cc = 0;
163     CPUS390XState *env = &cpu->env;
164     S390pciState *s = s390_get_phb();
165     int i;
166 
167     if (env->psw.mask & PSW_MASK_PSTATE) {
168         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
169         return 0;
170     }
171 
172     if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
173         s390_cpu_virt_mem_handle_exc(cpu, ra);
174         return 0;
175     }
176     reqh = (ClpReqHdr *)buffer;
177     req_len = lduw_be_p(&reqh->len);
178     if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
179         s390_program_interrupt(env, PGM_OPERAND, ra);
180         return 0;
181     }
182 
183     if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
184                                req_len + sizeof(*resh))) {
185         s390_cpu_virt_mem_handle_exc(cpu, ra);
186         return 0;
187     }
188     resh = (ClpRspHdr *)(buffer + req_len);
189     res_len = lduw_be_p(&resh->len);
190     if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
191         s390_program_interrupt(env, PGM_OPERAND, ra);
192         return 0;
193     }
194     if ((req_len + res_len) > 8192) {
195         s390_program_interrupt(env, PGM_OPERAND, ra);
196         return 0;
197     }
198 
199     if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
200                                req_len + res_len)) {
201         s390_cpu_virt_mem_handle_exc(cpu, ra);
202         return 0;
203     }
204 
205     if (req_len != 32) {
206         stw_be_p(&resh->rsp, CLP_RC_LEN);
207         goto out;
208     }
209 
210     switch (lduw_be_p(&reqh->cmd)) {
211     case CLP_LIST_PCI: {
212         ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
213         list_pci(rrb, &cc);
214         break;
215     }
216     case CLP_SET_PCI_FN: {
217         ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
218         ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
219 
220         pbdev = s390_pci_find_dev_by_fh(s, ldl_be_p(&reqsetpci->fh));
221         if (!pbdev) {
222                 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
223                 goto out;
224         }
225 
226         switch (reqsetpci->oc) {
227         case CLP_SET_ENABLE_PCI_FN:
228             switch (reqsetpci->ndas) {
229             case 0:
230                 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS);
231                 goto out;
232             case 1:
233                 break;
234             default:
235                 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES);
236                 goto out;
237             }
238 
239             if (pbdev->fh & FH_MASK_ENABLE) {
240                 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
241                 goto out;
242             }
243 
244             /*
245              * Take this opportunity to make sure we still have an accurate
246              * host fh.  It's possible part of the handle changed while the
247              * device was disabled to the guest (e.g. vfio hot reset for
248              * ISM during plug)
249              */
250             if (pbdev->interp) {
251                 /* Take this opportunity to make sure we are sync'd with host */
252                 if (!s390_pci_get_host_fh(pbdev, &pbdev->fh) ||
253                     !(pbdev->fh & FH_MASK_ENABLE)) {
254                     stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
255                     goto out;
256                 }
257             }
258             pbdev->fh |= FH_MASK_ENABLE;
259             pbdev->state = ZPCI_FS_ENABLED;
260             stl_be_p(&ressetpci->fh, pbdev->fh);
261             stw_be_p(&ressetpci->hdr.rsp, CLP_RC_OK);
262             break;
263         case CLP_SET_DISABLE_PCI_FN:
264             if (!(pbdev->fh & FH_MASK_ENABLE)) {
265                 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
266                 goto out;
267             }
268             device_cold_reset(DEVICE(pbdev));
269             pbdev->fh &= ~FH_MASK_ENABLE;
270             pbdev->state = ZPCI_FS_DISABLED;
271             stl_be_p(&ressetpci->fh, pbdev->fh);
272             stw_be_p(&ressetpci->hdr.rsp, CLP_RC_OK);
273             break;
274         default:
275             trace_s390_pci_unknown("set-pci", reqsetpci->oc);
276             stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
277             break;
278         }
279         break;
280     }
281     case CLP_QUERY_PCI_FN: {
282         ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
283         ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
284 
285         pbdev = s390_pci_find_dev_by_fh(s, ldl_be_p(&reqquery->fh));
286         if (!pbdev) {
287             trace_s390_pci_nodev("query", ldl_be_p(&reqquery->fh));
288             stw_be_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
289             goto out;
290         }
291 
292         stq_be_p(&resquery->sdma, pbdev->zpci_fn.sdma);
293         stq_be_p(&resquery->edma, pbdev->zpci_fn.edma);
294         stw_be_p(&resquery->pchid, pbdev->zpci_fn.pchid);
295         stw_be_p(&resquery->vfn, pbdev->zpci_fn.vfn);
296         resquery->flags = pbdev->zpci_fn.flags;
297         resquery->pfgid = pbdev->zpci_fn.pfgid;
298         resquery->pft = pbdev->zpci_fn.pft;
299         resquery->fmbl = pbdev->zpci_fn.fmbl;
300         stl_be_p(&resquery->fid, pbdev->zpci_fn.fid);
301         stl_be_p(&resquery->uid, pbdev->zpci_fn.uid);
302         memcpy(resquery->pfip, pbdev->zpci_fn.pfip, CLP_PFIP_NR_SEGMENTS);
303         memcpy(resquery->util_str, pbdev->zpci_fn.util_str, CLP_UTIL_STR_LEN);
304 
305         for (i = 0; i < PCI_BAR_COUNT; i++) {
306             uint32_t data = pci_get_long(pbdev->pdev->config +
307                 PCI_BASE_ADDRESS_0 + (i * 4));
308 
309             stl_be_p(&resquery->bar[i], data);
310             resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
311                                     ctz64(pbdev->pdev->io_regions[i].size) : 0;
312             trace_s390_pci_bar(i,
313                     ldl_be_p(&resquery->bar[i]),
314                     pbdev->pdev->io_regions[i].size,
315                     resquery->bar_size[i]);
316         }
317 
318         stw_be_p(&resquery->hdr.rsp, CLP_RC_OK);
319         break;
320     }
321     case CLP_QUERY_PCI_FNGRP: {
322         ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
323 
324         ClpReqQueryPciGrp *reqgrp = (ClpReqQueryPciGrp *)reqh;
325         S390PCIGroup *group;
326 
327         group = s390_group_find(reqgrp->g);
328         if (!group) {
329             /* We do not allow access to unknown groups */
330             /* The group must have been obtained with a vfio device */
331             stw_be_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID);
332             goto out;
333         }
334         resgrp->fr = group->zpci_group.fr;
335         stq_be_p(&resgrp->dasm, group->zpci_group.dasm);
336         stq_be_p(&resgrp->msia, group->zpci_group.msia);
337         stw_be_p(&resgrp->mui, group->zpci_group.mui);
338         stw_be_p(&resgrp->i, group->zpci_group.i);
339         stw_be_p(&resgrp->maxstbl, group->zpci_group.maxstbl);
340         resgrp->version = group->zpci_group.version;
341         resgrp->dtsm = group->zpci_group.dtsm;
342         stw_be_p(&resgrp->hdr.rsp, CLP_RC_OK);
343         break;
344     }
345     default:
346         trace_s390_pci_unknown("clp", lduw_be_p(&reqh->cmd));
347         stw_be_p(&resh->rsp, CLP_RC_CMD);
348         break;
349     }
350 
351 out:
352     if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
353                                 req_len + res_len)) {
354         s390_cpu_virt_mem_handle_exc(cpu, ra);
355         return 0;
356     }
357     setcc(cpu, cc);
358     return 0;
359 }
360 
361 /**
362  * Swap data contained in s390x big endian registers to little endian
363  * PCI bars.
364  *
365  * @ptr: a pointer to a uint64_t data field
366  * @len: the length of the valid data, must be 1,2,4 or 8
367  */
zpci_endian_swap(uint64_t * ptr,uint8_t len)368 static int zpci_endian_swap(uint64_t *ptr, uint8_t len)
369 {
370     uint64_t data = *ptr;
371 
372     switch (len) {
373     case 1:
374         break;
375     case 2:
376         data = bswap16(data);
377         break;
378     case 4:
379         data = bswap32(data);
380         break;
381     case 8:
382         data = bswap64(data);
383         break;
384     default:
385         return -EINVAL;
386     }
387     *ptr = data;
388     return 0;
389 }
390 
s390_get_subregion(MemoryRegion * mr,uint64_t offset,uint8_t len)391 static MemoryRegion *s390_get_subregion(MemoryRegion *mr, uint64_t offset,
392                                         uint8_t len)
393 {
394     MemoryRegion *subregion;
395     uint64_t subregion_size;
396 
397     QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
398         subregion_size = int128_get64(subregion->size);
399         if ((offset >= subregion->addr) &&
400             (offset + len) <= (subregion->addr + subregion_size)) {
401             mr = subregion;
402             break;
403         }
404     }
405     return mr;
406 }
407 
zpci_read_bar(S390PCIBusDevice * pbdev,uint8_t pcias,uint64_t offset,uint64_t * data,uint8_t len)408 static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
409                                  uint64_t offset, uint64_t *data, uint8_t len)
410 {
411     MemoryRegion *mr;
412 
413     mr = pbdev->pdev->io_regions[pcias].memory;
414     mr = s390_get_subregion(mr, offset, len);
415     offset -= mr->addr;
416     return memory_region_dispatch_read(mr, offset, data,
417                                        size_memop(len) | MO_BE,
418                                        MEMTXATTRS_UNSPECIFIED);
419 }
420 
pcilg_service_call(S390CPU * cpu,uint8_t r1,uint8_t r2,uintptr_t ra)421 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
422 {
423     CPUS390XState *env = &cpu->env;
424     S390PCIBusDevice *pbdev;
425     uint64_t offset;
426     uint64_t data;
427     MemTxResult result;
428     uint8_t len;
429     uint32_t fh;
430     uint8_t pcias;
431 
432     if (env->psw.mask & PSW_MASK_PSTATE) {
433         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
434         return 0;
435     }
436 
437     if (r2 & 0x1) {
438         s390_program_interrupt(env, PGM_SPECIFICATION, ra);
439         return 0;
440     }
441 
442     fh = env->regs[r2] >> 32;
443     pcias = (env->regs[r2] >> 16) & 0xf;
444     len = env->regs[r2] & 0xf;
445     offset = env->regs[r2 + 1];
446 
447     if (!(fh & FH_MASK_ENABLE)) {
448         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
449         return 0;
450     }
451 
452     pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
453     if (!pbdev) {
454         trace_s390_pci_nodev("pcilg", fh);
455         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
456         return 0;
457     }
458 
459     switch (pbdev->state) {
460     case ZPCI_FS_PERMANENT_ERROR:
461     case ZPCI_FS_ERROR:
462         setcc(cpu, ZPCI_PCI_LS_ERR);
463         s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
464         return 0;
465     default:
466         break;
467     }
468 
469     switch (pcias) {
470     case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
471         if (!len || (len > (8 - (offset & 0x7)))) {
472             s390_program_interrupt(env, PGM_OPERAND, ra);
473             return 0;
474         }
475         result = zpci_read_bar(pbdev, pcias, offset, &data, len);
476         if (result != MEMTX_OK) {
477             s390_program_interrupt(env, PGM_OPERAND, ra);
478             return 0;
479         }
480         break;
481     case ZPCI_CONFIG_BAR:
482         if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
483             s390_program_interrupt(env, PGM_OPERAND, ra);
484             return 0;
485         }
486         data =  pci_host_config_read_common(
487                    pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
488 
489         if (zpci_endian_swap(&data, len)) {
490             s390_program_interrupt(env, PGM_OPERAND, ra);
491             return 0;
492         }
493         break;
494     default:
495         trace_s390_pci_invalid("pcilg", fh);
496         setcc(cpu, ZPCI_PCI_LS_ERR);
497         s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
498         return 0;
499     }
500 
501     pbdev->fmb.counter[ZPCI_FMB_CNT_LD]++;
502 
503     env->regs[r1] = data;
504     setcc(cpu, ZPCI_PCI_LS_OK);
505     return 0;
506 }
507 
zpci_write_bar(S390PCIBusDevice * pbdev,uint8_t pcias,uint64_t offset,uint64_t data,uint8_t len)508 static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
509                                   uint64_t offset, uint64_t data, uint8_t len)
510 {
511     MemoryRegion *mr;
512 
513     mr = pbdev->pdev->io_regions[pcias].memory;
514     mr = s390_get_subregion(mr, offset, len);
515     offset -= mr->addr;
516     return memory_region_dispatch_write(mr, offset, data,
517                                         size_memop(len) | MO_BE,
518                                         MEMTXATTRS_UNSPECIFIED);
519 }
520 
pcistg_service_call(S390CPU * cpu,uint8_t r1,uint8_t r2,uintptr_t ra)521 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
522 {
523     CPUS390XState *env = &cpu->env;
524     uint64_t offset, data;
525     S390PCIBusDevice *pbdev;
526     MemTxResult result;
527     uint8_t len;
528     uint32_t fh;
529     uint8_t pcias;
530 
531     if (env->psw.mask & PSW_MASK_PSTATE) {
532         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
533         return 0;
534     }
535 
536     if (r2 & 0x1) {
537         s390_program_interrupt(env, PGM_SPECIFICATION, ra);
538         return 0;
539     }
540 
541     fh = env->regs[r2] >> 32;
542     pcias = (env->regs[r2] >> 16) & 0xf;
543     len = env->regs[r2] & 0xf;
544     offset = env->regs[r2 + 1];
545     data = env->regs[r1];
546 
547     if (!(fh & FH_MASK_ENABLE)) {
548         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
549         return 0;
550     }
551 
552     pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
553     if (!pbdev) {
554         trace_s390_pci_nodev("pcistg", fh);
555         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
556         return 0;
557     }
558 
559     switch (pbdev->state) {
560     /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED
561      * are already covered by the FH_MASK_ENABLE check above
562      */
563     case ZPCI_FS_PERMANENT_ERROR:
564     case ZPCI_FS_ERROR:
565         setcc(cpu, ZPCI_PCI_LS_ERR);
566         s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
567         return 0;
568     default:
569         break;
570     }
571 
572     switch (pcias) {
573         /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */
574     case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
575         /* Check length:
576          * A length of 0 is invalid and length should not cross a double word
577          */
578         if (!len || (len > (8 - (offset & 0x7)))) {
579             s390_program_interrupt(env, PGM_OPERAND, ra);
580             return 0;
581         }
582 
583         result = zpci_write_bar(pbdev, pcias, offset, data, len);
584         if (result != MEMTX_OK) {
585             s390_program_interrupt(env, PGM_OPERAND, ra);
586             return 0;
587         }
588         break;
589     case ZPCI_CONFIG_BAR:
590         /* ZPCI uses the pseudo BAR number 15 as configuration space */
591         /* possible access lengths are 1,2,4 and must not cross a word */
592         if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
593             s390_program_interrupt(env, PGM_OPERAND, ra);
594             return 0;
595         }
596         /* len = 1,2,4 so we do not need to test */
597         zpci_endian_swap(&data, len);
598         pci_host_config_write_common(pbdev->pdev, offset,
599                                      pci_config_size(pbdev->pdev),
600                                      data, len);
601         break;
602     default:
603         trace_s390_pci_invalid("pcistg", fh);
604         setcc(cpu, ZPCI_PCI_LS_ERR);
605         s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
606         return 0;
607     }
608 
609     pbdev->fmb.counter[ZPCI_FMB_CNT_ST]++;
610 
611     setcc(cpu, ZPCI_PCI_LS_OK);
612     return 0;
613 }
614 
s390_pci_update_iotlb(S390PCIIOMMU * iommu,S390IOTLBEntry * entry)615 static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu,
616                                       S390IOTLBEntry *entry)
617 {
618     S390IOTLBEntry *cache = g_hash_table_lookup(iommu->iotlb, &entry->iova);
619     IOMMUTLBEvent event = {
620         .type = entry->perm ? IOMMU_NOTIFIER_MAP : IOMMU_NOTIFIER_UNMAP,
621         .entry = {
622             .target_as = &address_space_memory,
623             .iova = entry->iova,
624             .translated_addr = entry->translated_addr,
625             .perm = entry->perm,
626             .addr_mask = ~TARGET_PAGE_MASK,
627         },
628     };
629 
630     if (event.type == IOMMU_NOTIFIER_UNMAP) {
631         if (!cache) {
632             goto out;
633         }
634         g_hash_table_remove(iommu->iotlb, &entry->iova);
635         inc_dma_avail(iommu);
636         /* Don't notify the iommu yet, maybe we can bundle contiguous unmaps */
637         goto out;
638     } else {
639         if (cache) {
640             if (cache->perm == entry->perm &&
641                 cache->translated_addr == entry->translated_addr) {
642                 goto out;
643             }
644 
645             event.type = IOMMU_NOTIFIER_UNMAP;
646             event.entry.perm = IOMMU_NONE;
647             memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
648             event.type = IOMMU_NOTIFIER_MAP;
649             event.entry.perm = entry->perm;
650         }
651 
652         cache = g_new(S390IOTLBEntry, 1);
653         cache->iova = entry->iova;
654         cache->translated_addr = entry->translated_addr;
655         cache->len = TARGET_PAGE_SIZE;
656         cache->perm = entry->perm;
657         g_hash_table_replace(iommu->iotlb, &cache->iova, cache);
658         dec_dma_avail(iommu);
659     }
660 
661     /*
662      * All associated iotlb entries have already been cleared, trigger the
663      * unmaps.
664      */
665     memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
666 
667 out:
668     return iommu->dma_limit ? iommu->dma_limit->avail : 1;
669 }
670 
s390_pci_batch_unmap(S390PCIIOMMU * iommu,uint64_t iova,uint64_t len)671 static void s390_pci_batch_unmap(S390PCIIOMMU *iommu, uint64_t iova,
672                                  uint64_t len)
673 {
674     uint64_t remain = len, start = iova, end = start + len - 1, mask, size;
675     IOMMUTLBEvent event = {
676         .type = IOMMU_NOTIFIER_UNMAP,
677         .entry = {
678             .target_as = &address_space_memory,
679             .translated_addr = 0,
680             .perm = IOMMU_NONE,
681         },
682     };
683 
684     while (remain >= TARGET_PAGE_SIZE) {
685         mask = dma_aligned_pow2_mask(start, end, 64);
686         size = mask + 1;
687         event.entry.iova = start;
688         event.entry.addr_mask = mask;
689         memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
690         start += size;
691         remain -= size;
692     }
693 }
694 
rpcit_service_call(S390CPU * cpu,uint8_t r1,uint8_t r2,uintptr_t ra)695 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
696 {
697     CPUS390XState *env = &cpu->env;
698     uint64_t iova, coalesce = 0;
699     uint32_t fh;
700     uint16_t error = 0;
701     S390PCIBusDevice *pbdev;
702     S390PCIIOMMU *iommu;
703     S390IOTLBEntry entry;
704     hwaddr start, end, sstart;
705     uint32_t dma_avail;
706     bool again;
707 
708     if (env->psw.mask & PSW_MASK_PSTATE) {
709         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
710         return 0;
711     }
712 
713     if (r2 & 0x1) {
714         s390_program_interrupt(env, PGM_SPECIFICATION, ra);
715         return 0;
716     }
717 
718     fh = env->regs[r1] >> 32;
719     sstart = start = env->regs[r2];
720     end = start + env->regs[r2 + 1];
721 
722     pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
723     if (!pbdev) {
724         trace_s390_pci_nodev("rpcit", fh);
725         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
726         return 0;
727     }
728 
729     switch (pbdev->state) {
730     case ZPCI_FS_RESERVED:
731     case ZPCI_FS_STANDBY:
732     case ZPCI_FS_DISABLED:
733     case ZPCI_FS_PERMANENT_ERROR:
734         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
735         return 0;
736     case ZPCI_FS_ERROR:
737         setcc(cpu, ZPCI_PCI_LS_ERR);
738         s390_set_status_code(env, r1, ZPCI_MOD_ST_ERROR_RECOVER);
739         return 0;
740     default:
741         break;
742     }
743 
744     iommu = pbdev->iommu;
745     if (iommu->dma_limit) {
746         dma_avail = iommu->dma_limit->avail;
747     } else {
748         dma_avail = 1;
749     }
750     if (!iommu->g_iota) {
751         error = ERR_EVENT_INVALAS;
752         goto err;
753     }
754 
755     if (end < iommu->pba || start > iommu->pal) {
756         error = ERR_EVENT_OORANGE;
757         goto err;
758     }
759 
760  retry:
761     start = sstart;
762     again = false;
763     while (start < end) {
764         error = s390_guest_io_table_walk(iommu->g_iota, start, &entry);
765         if (error) {
766             break;
767         }
768 
769         /*
770          * If this is an unmap of a PTE, let's try to coalesce multiple unmaps
771          * into as few notifier events as possible.
772          */
773         if (entry.perm == IOMMU_NONE && entry.len == TARGET_PAGE_SIZE) {
774             if (coalesce == 0) {
775                 iova = entry.iova;
776             }
777             coalesce += entry.len;
778         } else if (coalesce > 0) {
779             /* Unleash the coalesced unmap before processing a new map */
780             s390_pci_batch_unmap(iommu, iova, coalesce);
781             coalesce = 0;
782         }
783 
784         start += entry.len;
785         while (entry.iova < start && entry.iova < end) {
786             if (dma_avail > 0 || entry.perm == IOMMU_NONE) {
787                 dma_avail = s390_pci_update_iotlb(iommu, &entry);
788                 entry.iova += TARGET_PAGE_SIZE;
789                 entry.translated_addr += TARGET_PAGE_SIZE;
790             } else {
791                 /*
792                  * We are unable to make a new mapping at this time, continue
793                  * on and hopefully free up more space.  Then attempt another
794                  * pass.
795                  */
796                 again = true;
797                 break;
798             }
799         }
800     }
801     if (coalesce) {
802             /* Unleash the coalesced unmap before finishing rpcit */
803             s390_pci_batch_unmap(iommu, iova, coalesce);
804             coalesce = 0;
805     }
806     if (again && dma_avail > 0)
807         goto retry;
808 err:
809     if (error) {
810         pbdev->state = ZPCI_FS_ERROR;
811         setcc(cpu, ZPCI_PCI_LS_ERR);
812         s390_set_status_code(env, r1, ZPCI_PCI_ST_FUNC_IN_ERR);
813         s390_pci_generate_error_event(error, pbdev->fh, pbdev->fid, start, 0);
814     } else {
815         pbdev->fmb.counter[ZPCI_FMB_CNT_RPCIT]++;
816         if (dma_avail > 0) {
817             setcc(cpu, ZPCI_PCI_LS_OK);
818         } else {
819             /* vfio DMA mappings are exhausted, trigger a RPCIT */
820             setcc(cpu, ZPCI_PCI_LS_ERR);
821             s390_set_status_code(env, r1, ZPCI_RPCIT_ST_INSUFF_RES);
822         }
823     }
824     return 0;
825 }
826 
pcistb_service_call(S390CPU * cpu,uint8_t r1,uint8_t r3,uint64_t gaddr,uint8_t ar,uintptr_t ra)827 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
828                         uint8_t ar, uintptr_t ra)
829 {
830     CPUS390XState *env = &cpu->env;
831     S390PCIBusDevice *pbdev;
832     MemoryRegion *mr;
833     MemTxResult result;
834     uint64_t offset;
835     int i;
836     uint32_t fh;
837     uint8_t pcias;
838     uint16_t len;
839     uint8_t buffer[128];
840 
841     if (env->psw.mask & PSW_MASK_PSTATE) {
842         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
843         return 0;
844     }
845 
846     fh = env->regs[r1] >> 32;
847     pcias = (env->regs[r1] >> 16) & 0xf;
848     len = env->regs[r1] & 0x1fff;
849     offset = env->regs[r3];
850 
851     if (!(fh & FH_MASK_ENABLE)) {
852         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
853         return 0;
854     }
855 
856     pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
857     if (!pbdev) {
858         trace_s390_pci_nodev("pcistb", fh);
859         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
860         return 0;
861     }
862 
863     switch (pbdev->state) {
864     case ZPCI_FS_PERMANENT_ERROR:
865     case ZPCI_FS_ERROR:
866         setcc(cpu, ZPCI_PCI_LS_ERR);
867         s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
868         return 0;
869     default:
870         break;
871     }
872 
873     if (pcias > ZPCI_IO_BAR_MAX) {
874         trace_s390_pci_invalid("pcistb", fh);
875         setcc(cpu, ZPCI_PCI_LS_ERR);
876         s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
877         return 0;
878     }
879 
880     /* Verify the address, offset and length */
881     /* offset must be a multiple of 8 */
882     if (offset % 8) {
883         goto specification_error;
884     }
885     /* Length must be greater than 8, a multiple of 8 */
886     /* and not greater than maxstbl */
887     if ((len <= 8) || (len % 8) ||
888         (len > pbdev->pci_group->zpci_group.maxstbl)) {
889         goto specification_error;
890     }
891     /* Do not cross a 4K-byte boundary */
892     if (((offset & 0xfff) + len) > 0x1000) {
893         goto specification_error;
894     }
895     /* Guest address must be double word aligned */
896     if (gaddr & 0x07UL) {
897         goto specification_error;
898     }
899 
900     mr = pbdev->pdev->io_regions[pcias].memory;
901     mr = s390_get_subregion(mr, offset, len);
902     offset -= mr->addr;
903 
904     for (i = 0; i < len; i += 8) {
905         if (!memory_region_access_valid(mr, offset + i, 8, true,
906                                         MEMTXATTRS_UNSPECIFIED)) {
907             s390_program_interrupt(env, PGM_OPERAND, ra);
908             return 0;
909         }
910     }
911 
912     if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
913         s390_cpu_virt_mem_handle_exc(cpu, ra);
914         return 0;
915     }
916 
917     for (i = 0; i < len / 8; i++) {
918         result = memory_region_dispatch_write(mr, offset + i * 8,
919                                               ldq_be_p(buffer + i * 8),
920                                               MO_64, MEMTXATTRS_UNSPECIFIED);
921         if (result != MEMTX_OK) {
922             s390_program_interrupt(env, PGM_OPERAND, ra);
923             return 0;
924         }
925     }
926 
927     pbdev->fmb.counter[ZPCI_FMB_CNT_STB]++;
928 
929     setcc(cpu, ZPCI_PCI_LS_OK);
930     return 0;
931 
932 specification_error:
933     s390_program_interrupt(env, PGM_SPECIFICATION, ra);
934     return 0;
935 }
936 
reg_irqs(CPUS390XState * env,S390PCIBusDevice * pbdev,ZpciFib fib)937 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
938 {
939     int ret, len;
940     uint8_t isc = FIB_DATA_ISC(ldl_be_p(&fib.data));
941 
942     pbdev->routes.adapter.adapter_id = css_get_adapter_id(
943                                        CSS_IO_ADAPTER_PCI, isc);
944     pbdev->summary_ind = get_indicator(ldq_be_p(&fib.aisb), sizeof(uint64_t));
945     len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_be_p(&fib.data))) * sizeof(unsigned long);
946     pbdev->indicator = get_indicator(ldq_be_p(&fib.aibv), len);
947 
948     ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
949     if (ret) {
950         goto out;
951     }
952 
953     ret = map_indicator(&pbdev->routes.adapter, pbdev->indicator);
954     if (ret) {
955         goto out;
956     }
957 
958     pbdev->routes.adapter.summary_addr = ldq_be_p(&fib.aisb);
959     pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_be_p(&fib.data));
960     pbdev->routes.adapter.ind_addr = ldq_be_p(&fib.aibv);
961     pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_be_p(&fib.data));
962     pbdev->isc = isc;
963     pbdev->noi = FIB_DATA_NOI(ldl_be_p(&fib.data));
964     pbdev->sum = FIB_DATA_SUM(ldl_be_p(&fib.data));
965 
966     trace_s390_pci_irqs("register", pbdev->routes.adapter.adapter_id);
967     return 0;
968 out:
969     release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
970     release_indicator(&pbdev->routes.adapter, pbdev->indicator);
971     pbdev->summary_ind = NULL;
972     pbdev->indicator = NULL;
973     return ret;
974 }
975 
pci_dereg_irqs(S390PCIBusDevice * pbdev)976 int pci_dereg_irqs(S390PCIBusDevice *pbdev)
977 {
978     release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
979     release_indicator(&pbdev->routes.adapter, pbdev->indicator);
980 
981     pbdev->summary_ind = NULL;
982     pbdev->indicator = NULL;
983     pbdev->routes.adapter.summary_addr = 0;
984     pbdev->routes.adapter.summary_offset = 0;
985     pbdev->routes.adapter.ind_addr = 0;
986     pbdev->routes.adapter.ind_offset = 0;
987     pbdev->isc = 0;
988     pbdev->noi = 0;
989     pbdev->sum = 0;
990 
991     trace_s390_pci_irqs("unregister", pbdev->routes.adapter.adapter_id);
992     return 0;
993 }
994 
reg_ioat(CPUS390XState * env,S390PCIBusDevice * pbdev,ZpciFib fib,uintptr_t ra)995 static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib,
996                     uintptr_t ra)
997 {
998     S390PCIIOMMU *iommu = pbdev->iommu;
999     uint64_t pba = ldq_be_p(&fib.pba);
1000     uint64_t pal = ldq_be_p(&fib.pal);
1001     uint64_t g_iota = ldq_be_p(&fib.iota);
1002     uint8_t dt = (g_iota >> 2) & 0x7;
1003     uint8_t t = (g_iota >> 11) & 0x1;
1004 
1005     pba &= ~0xfff;
1006     pal |= 0xfff;
1007     if (pba > pal || pba < pbdev->zpci_fn.sdma || pal > pbdev->zpci_fn.edma) {
1008         s390_program_interrupt(env, PGM_OPERAND, ra);
1009         return -EINVAL;
1010     }
1011 
1012     /* currently we only support designation type 1 with translation */
1013     if (t && dt != ZPCI_IOTA_RTTO) {
1014         error_report("unsupported ioat dt %d t %d", dt, t);
1015         s390_program_interrupt(env, PGM_OPERAND, ra);
1016         return -EINVAL;
1017     } else if (!t && !pbdev->rtr_avail) {
1018         error_report("relaxed translation not allowed");
1019         s390_program_interrupt(env, PGM_OPERAND, ra);
1020         return -EINVAL;
1021     }
1022 
1023     iommu->pba = pba;
1024     iommu->pal = pal;
1025     iommu->g_iota = g_iota;
1026 
1027     if (t) {
1028         s390_pci_iommu_enable(iommu);
1029     } else {
1030         s390_pci_iommu_direct_map_enable(iommu);
1031     }
1032 
1033     return 0;
1034 }
1035 
pci_dereg_ioat(S390PCIIOMMU * iommu)1036 void pci_dereg_ioat(S390PCIIOMMU *iommu)
1037 {
1038     s390_pci_iommu_disable(iommu);
1039     iommu->pba = 0;
1040     iommu->pal = 0;
1041     iommu->g_iota = 0;
1042 }
1043 
fmb_timer_free(S390PCIBusDevice * pbdev)1044 void fmb_timer_free(S390PCIBusDevice *pbdev)
1045 {
1046     if (pbdev->fmb_timer) {
1047         timer_free(pbdev->fmb_timer);
1048         pbdev->fmb_timer = NULL;
1049     }
1050     pbdev->fmb_addr = 0;
1051     memset(&pbdev->fmb, 0, sizeof(ZpciFmb));
1052 }
1053 
fmb_do_update(S390PCIBusDevice * pbdev,int offset,uint64_t val,int len)1054 static int fmb_do_update(S390PCIBusDevice *pbdev, int offset, uint64_t val,
1055                          int len)
1056 {
1057     MemTxResult ret;
1058     uint64_t dst = pbdev->fmb_addr + offset;
1059 
1060     switch (len) {
1061     case 8:
1062         address_space_stq_be(&address_space_memory, dst, val,
1063                              MEMTXATTRS_UNSPECIFIED,
1064                              &ret);
1065         break;
1066     case 4:
1067         address_space_stl_be(&address_space_memory, dst, val,
1068                              MEMTXATTRS_UNSPECIFIED,
1069                              &ret);
1070         break;
1071     case 2:
1072         address_space_stw_be(&address_space_memory, dst, val,
1073                              MEMTXATTRS_UNSPECIFIED,
1074                              &ret);
1075         break;
1076     case 1:
1077         address_space_stb(&address_space_memory, dst, val,
1078                           MEMTXATTRS_UNSPECIFIED,
1079                           &ret);
1080         break;
1081     default:
1082         ret = MEMTX_ERROR;
1083         break;
1084     }
1085     if (ret != MEMTX_OK) {
1086         s390_pci_generate_error_event(ERR_EVENT_FMBA, pbdev->fh, pbdev->fid,
1087                                       pbdev->fmb_addr, 0);
1088         fmb_timer_free(pbdev);
1089     }
1090 
1091     return ret;
1092 }
1093 
fmb_update(void * opaque)1094 static void fmb_update(void *opaque)
1095 {
1096     S390PCIBusDevice *pbdev = opaque;
1097     int64_t t = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1098     int i;
1099 
1100     /* Update U bit */
1101     pbdev->fmb.last_update *= 2;
1102     pbdev->fmb.last_update |= UPDATE_U_BIT;
1103     if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1104                       pbdev->fmb.last_update,
1105                       sizeof(pbdev->fmb.last_update))) {
1106         return;
1107     }
1108 
1109     /* Update FMB sample count */
1110     if (fmb_do_update(pbdev, offsetof(ZpciFmb, sample),
1111                       pbdev->fmb.sample++,
1112                       sizeof(pbdev->fmb.sample))) {
1113         return;
1114     }
1115 
1116     /* Update FMB counters */
1117     for (i = 0; i < ZPCI_FMB_CNT_MAX; i++) {
1118         if (fmb_do_update(pbdev, offsetof(ZpciFmb, counter[i]),
1119                           pbdev->fmb.counter[i],
1120                           sizeof(pbdev->fmb.counter[0]))) {
1121             return;
1122         }
1123     }
1124 
1125     /* Clear U bit and update the time */
1126     pbdev->fmb.last_update = time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
1127     pbdev->fmb.last_update *= 2;
1128     if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1129                       pbdev->fmb.last_update,
1130                       sizeof(pbdev->fmb.last_update))) {
1131         return;
1132     }
1133     timer_mod(pbdev->fmb_timer, t + pbdev->pci_group->zpci_group.mui);
1134 }
1135 
mpcifc_reg_int_interp(S390PCIBusDevice * pbdev,ZpciFib * fib)1136 static int mpcifc_reg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib)
1137 {
1138     int rc;
1139 
1140     rc = s390_pci_kvm_aif_enable(pbdev, fib, pbdev->forwarding_assist);
1141     if (rc) {
1142         trace_s390_pci_kvm_aif("enable");
1143         return rc;
1144     }
1145 
1146     return 0;
1147 }
1148 
mpcifc_dereg_int_interp(S390PCIBusDevice * pbdev,ZpciFib * fib)1149 static int mpcifc_dereg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib)
1150 {
1151     int rc;
1152 
1153     rc = s390_pci_kvm_aif_disable(pbdev);
1154     if (rc) {
1155         trace_s390_pci_kvm_aif("disable");
1156         return rc;
1157     }
1158 
1159     return 0;
1160 }
1161 
mpcifc_service_call(S390CPU * cpu,uint8_t r1,uint64_t fiba,uint8_t ar,uintptr_t ra)1162 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1163                         uintptr_t ra)
1164 {
1165     CPUS390XState *env = &cpu->env;
1166     uint8_t oc, dmaas;
1167     uint32_t fh;
1168     ZpciFib fib;
1169     S390PCIBusDevice *pbdev;
1170     uint64_t cc = ZPCI_PCI_LS_OK;
1171 
1172     if (env->psw.mask & PSW_MASK_PSTATE) {
1173         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1174         return 0;
1175     }
1176 
1177     oc = env->regs[r1] & 0xff;
1178     dmaas = (env->regs[r1] >> 16) & 0xff;
1179     fh = env->regs[r1] >> 32;
1180 
1181     if (fiba & 0x7) {
1182         s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1183         return 0;
1184     }
1185 
1186     pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
1187     if (!pbdev) {
1188         trace_s390_pci_nodev("mpcifc", fh);
1189         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1190         return 0;
1191     }
1192 
1193     switch (pbdev->state) {
1194     case ZPCI_FS_RESERVED:
1195     case ZPCI_FS_STANDBY:
1196     case ZPCI_FS_DISABLED:
1197     case ZPCI_FS_PERMANENT_ERROR:
1198         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1199         return 0;
1200     default:
1201         break;
1202     }
1203 
1204     if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1205         s390_cpu_virt_mem_handle_exc(cpu, ra);
1206         return 0;
1207     }
1208 
1209     if (fib.fmt != 0) {
1210         s390_program_interrupt(env, PGM_OPERAND, ra);
1211         return 0;
1212     }
1213 
1214     switch (oc) {
1215     case ZPCI_MOD_FC_REG_INT:
1216         if (pbdev->interp) {
1217             if (mpcifc_reg_int_interp(pbdev, &fib)) {
1218                 cc = ZPCI_PCI_LS_ERR;
1219                 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1220             }
1221         } else if (pbdev->summary_ind) {
1222             cc = ZPCI_PCI_LS_ERR;
1223             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1224         } else if (reg_irqs(env, pbdev, fib)) {
1225             cc = ZPCI_PCI_LS_ERR;
1226             s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL);
1227         }
1228         break;
1229     case ZPCI_MOD_FC_DEREG_INT:
1230         if (pbdev->interp) {
1231             if (mpcifc_dereg_int_interp(pbdev, &fib)) {
1232                 cc = ZPCI_PCI_LS_ERR;
1233                 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1234             }
1235         } else if (!pbdev->summary_ind) {
1236             cc = ZPCI_PCI_LS_ERR;
1237             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1238         } else {
1239             pci_dereg_irqs(pbdev);
1240         }
1241         break;
1242     case ZPCI_MOD_FC_REG_IOAT:
1243         if (dmaas != 0) {
1244             cc = ZPCI_PCI_LS_ERR;
1245             s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1246         } else if (pbdev->iommu->enabled) {
1247             cc = ZPCI_PCI_LS_ERR;
1248             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1249         } else if (reg_ioat(env, pbdev, fib, ra)) {
1250             cc = ZPCI_PCI_LS_ERR;
1251             s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1252         }
1253         break;
1254     case ZPCI_MOD_FC_DEREG_IOAT:
1255         if (dmaas != 0) {
1256             cc = ZPCI_PCI_LS_ERR;
1257             s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1258         } else if (!pbdev->iommu->enabled) {
1259             cc = ZPCI_PCI_LS_ERR;
1260             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1261         } else {
1262             pci_dereg_ioat(pbdev->iommu);
1263         }
1264         break;
1265     case ZPCI_MOD_FC_REREG_IOAT:
1266         if (dmaas != 0) {
1267             cc = ZPCI_PCI_LS_ERR;
1268             s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1269         } else if (!pbdev->iommu->enabled) {
1270             cc = ZPCI_PCI_LS_ERR;
1271             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1272         } else {
1273             pci_dereg_ioat(pbdev->iommu);
1274             if (reg_ioat(env, pbdev, fib, ra)) {
1275                 cc = ZPCI_PCI_LS_ERR;
1276                 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1277             }
1278         }
1279         break;
1280     case ZPCI_MOD_FC_RESET_ERROR:
1281         switch (pbdev->state) {
1282         case ZPCI_FS_BLOCKED:
1283         case ZPCI_FS_ERROR:
1284             pbdev->state = ZPCI_FS_ENABLED;
1285             break;
1286         default:
1287             cc = ZPCI_PCI_LS_ERR;
1288             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1289         }
1290         break;
1291     case ZPCI_MOD_FC_RESET_BLOCK:
1292         switch (pbdev->state) {
1293         case ZPCI_FS_ERROR:
1294             pbdev->state = ZPCI_FS_BLOCKED;
1295             break;
1296         default:
1297             cc = ZPCI_PCI_LS_ERR;
1298             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1299         }
1300         break;
1301     case ZPCI_MOD_FC_SET_MEASURE: {
1302         uint64_t fmb_addr = ldq_be_p(&fib.fmb_addr);
1303 
1304         if (fmb_addr & FMBK_MASK) {
1305             cc = ZPCI_PCI_LS_ERR;
1306             s390_pci_generate_error_event(ERR_EVENT_FMBPRO, pbdev->fh,
1307                                           pbdev->fid, fmb_addr, 0);
1308             fmb_timer_free(pbdev);
1309             break;
1310         }
1311 
1312         if (!fmb_addr) {
1313             /* Stop updating FMB. */
1314             fmb_timer_free(pbdev);
1315             break;
1316         }
1317 
1318         if (!pbdev->fmb_timer) {
1319             pbdev->fmb_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1320                                             fmb_update, pbdev);
1321         } else if (timer_pending(pbdev->fmb_timer)) {
1322             /* Remove pending timer to update FMB address. */
1323             timer_del(pbdev->fmb_timer);
1324         }
1325         pbdev->fmb_addr = fmb_addr;
1326         timer_mod(pbdev->fmb_timer,
1327                   qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
1328                                     pbdev->pci_group->zpci_group.mui);
1329         break;
1330     }
1331     default:
1332         s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
1333         cc = ZPCI_PCI_LS_ERR;
1334     }
1335 
1336     setcc(cpu, cc);
1337     return 0;
1338 }
1339 
stpcifc_service_call(S390CPU * cpu,uint8_t r1,uint64_t fiba,uint8_t ar,uintptr_t ra)1340 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1341                          uintptr_t ra)
1342 {
1343     CPUS390XState *env = &cpu->env;
1344     uint8_t dmaas;
1345     uint32_t fh;
1346     ZpciFib fib;
1347     S390PCIBusDevice *pbdev;
1348     uint32_t data;
1349     uint64_t cc = ZPCI_PCI_LS_OK;
1350 
1351     if (env->psw.mask & PSW_MASK_PSTATE) {
1352         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1353         return 0;
1354     }
1355 
1356     fh = env->regs[r1] >> 32;
1357     dmaas = (env->regs[r1] >> 16) & 0xff;
1358 
1359     if (dmaas) {
1360         setcc(cpu, ZPCI_PCI_LS_ERR);
1361         s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_INVAL_DMAAS);
1362         return 0;
1363     }
1364 
1365     if (fiba & 0x7) {
1366         s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1367         return 0;
1368     }
1369 
1370     pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), fh & FH_MASK_INDEX);
1371     if (!pbdev) {
1372         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1373         return 0;
1374     }
1375 
1376     memset(&fib, 0, sizeof(fib));
1377 
1378     switch (pbdev->state) {
1379     case ZPCI_FS_RESERVED:
1380     case ZPCI_FS_STANDBY:
1381         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1382         return 0;
1383     case ZPCI_FS_DISABLED:
1384         if (fh & FH_MASK_ENABLE) {
1385             setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1386             return 0;
1387         }
1388         goto out;
1389     /* BLOCKED bit is set to one coincident with the setting of ERROR bit.
1390      * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */
1391     case ZPCI_FS_ERROR:
1392         fib.fc |= 0x20;
1393         /* fallthrough */
1394     case ZPCI_FS_BLOCKED:
1395         fib.fc |= 0x40;
1396         /* fallthrough */
1397     case ZPCI_FS_ENABLED:
1398         fib.fc |= 0x80;
1399         if (pbdev->iommu->enabled) {
1400             fib.fc |= 0x10;
1401         }
1402         if (!(fh & FH_MASK_ENABLE)) {
1403             env->regs[r1] |= 1ULL << 63;
1404         }
1405         break;
1406     case ZPCI_FS_PERMANENT_ERROR:
1407         setcc(cpu, ZPCI_PCI_LS_ERR);
1408         s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR);
1409         return 0;
1410     }
1411 
1412     stq_be_p(&fib.pba, pbdev->iommu->pba);
1413     stq_be_p(&fib.pal, pbdev->iommu->pal);
1414     stq_be_p(&fib.iota, pbdev->iommu->g_iota);
1415     stq_be_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
1416     stq_be_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
1417     stq_be_p(&fib.fmb_addr, pbdev->fmb_addr);
1418 
1419     data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
1420            ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
1421            ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
1422     stl_be_p(&fib.data, data);
1423 
1424 out:
1425     if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1426         s390_cpu_virt_mem_handle_exc(cpu, ra);
1427         return 0;
1428     }
1429 
1430     setcc(cpu, cc);
1431     return 0;
1432 }
1433