1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * IOMMU API for s390 PCI devices
4 *
5 * Copyright IBM Corp. 2015
6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7 */
8
9 #include <linux/pci.h>
10 #include <linux/iommu.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/sizes.h>
13 #include <linux/rculist.h>
14 #include <linux/rcupdate.h>
15 #include <asm/pci_dma.h>
16
17 #include "dma-iommu.h"
18
19 static const struct iommu_ops s390_iommu_ops, s390_iommu_rtr_ops;
20
21 static struct kmem_cache *dma_region_table_cache;
22 static struct kmem_cache *dma_page_table_cache;
23
24 static u64 s390_iommu_aperture;
25 static u32 s390_iommu_aperture_factor = 1;
26
27 struct s390_domain {
28 struct iommu_domain domain;
29 struct list_head devices;
30 struct zpci_iommu_ctrs ctrs;
31 unsigned long *dma_table;
32 spinlock_t list_lock;
33 struct rcu_head rcu;
34 u8 origin_type;
35 };
36
37 static struct iommu_domain blocking_domain;
38
calc_rfx(dma_addr_t ptr)39 static inline unsigned int calc_rfx(dma_addr_t ptr)
40 {
41 return ((unsigned long)ptr >> ZPCI_RF_SHIFT) & ZPCI_INDEX_MASK;
42 }
43
calc_rsx(dma_addr_t ptr)44 static inline unsigned int calc_rsx(dma_addr_t ptr)
45 {
46 return ((unsigned long)ptr >> ZPCI_RS_SHIFT) & ZPCI_INDEX_MASK;
47 }
48
calc_rtx(dma_addr_t ptr)49 static inline unsigned int calc_rtx(dma_addr_t ptr)
50 {
51 return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
52 }
53
calc_sx(dma_addr_t ptr)54 static inline unsigned int calc_sx(dma_addr_t ptr)
55 {
56 return ((unsigned long)ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
57 }
58
calc_px(dma_addr_t ptr)59 static inline unsigned int calc_px(dma_addr_t ptr)
60 {
61 return ((unsigned long)ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
62 }
63
set_pt_pfaa(unsigned long * entry,phys_addr_t pfaa)64 static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
65 {
66 *entry &= ZPCI_PTE_FLAG_MASK;
67 *entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
68 }
69
set_rf_rso(unsigned long * entry,phys_addr_t rso)70 static inline void set_rf_rso(unsigned long *entry, phys_addr_t rso)
71 {
72 *entry &= ZPCI_RTE_FLAG_MASK;
73 *entry |= (rso & ZPCI_RTE_ADDR_MASK);
74 *entry |= ZPCI_TABLE_TYPE_RFX;
75 }
76
set_rs_rto(unsigned long * entry,phys_addr_t rto)77 static inline void set_rs_rto(unsigned long *entry, phys_addr_t rto)
78 {
79 *entry &= ZPCI_RTE_FLAG_MASK;
80 *entry |= (rto & ZPCI_RTE_ADDR_MASK);
81 *entry |= ZPCI_TABLE_TYPE_RSX;
82 }
83
set_rt_sto(unsigned long * entry,phys_addr_t sto)84 static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
85 {
86 *entry &= ZPCI_RTE_FLAG_MASK;
87 *entry |= (sto & ZPCI_RTE_ADDR_MASK);
88 *entry |= ZPCI_TABLE_TYPE_RTX;
89 }
90
set_st_pto(unsigned long * entry,phys_addr_t pto)91 static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
92 {
93 *entry &= ZPCI_STE_FLAG_MASK;
94 *entry |= (pto & ZPCI_STE_ADDR_MASK);
95 *entry |= ZPCI_TABLE_TYPE_SX;
96 }
97
validate_rf_entry(unsigned long * entry)98 static inline void validate_rf_entry(unsigned long *entry)
99 {
100 *entry &= ~ZPCI_TABLE_VALID_MASK;
101 *entry &= ~ZPCI_TABLE_OFFSET_MASK;
102 *entry |= ZPCI_TABLE_VALID;
103 *entry |= ZPCI_TABLE_LEN_RFX;
104 }
105
validate_rs_entry(unsigned long * entry)106 static inline void validate_rs_entry(unsigned long *entry)
107 {
108 *entry &= ~ZPCI_TABLE_VALID_MASK;
109 *entry &= ~ZPCI_TABLE_OFFSET_MASK;
110 *entry |= ZPCI_TABLE_VALID;
111 *entry |= ZPCI_TABLE_LEN_RSX;
112 }
113
validate_rt_entry(unsigned long * entry)114 static inline void validate_rt_entry(unsigned long *entry)
115 {
116 *entry &= ~ZPCI_TABLE_VALID_MASK;
117 *entry &= ~ZPCI_TABLE_OFFSET_MASK;
118 *entry |= ZPCI_TABLE_VALID;
119 *entry |= ZPCI_TABLE_LEN_RTX;
120 }
121
validate_st_entry(unsigned long * entry)122 static inline void validate_st_entry(unsigned long *entry)
123 {
124 *entry &= ~ZPCI_TABLE_VALID_MASK;
125 *entry |= ZPCI_TABLE_VALID;
126 }
127
invalidate_pt_entry(unsigned long * entry)128 static inline void invalidate_pt_entry(unsigned long *entry)
129 {
130 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
131 *entry &= ~ZPCI_PTE_VALID_MASK;
132 *entry |= ZPCI_PTE_INVALID;
133 }
134
validate_pt_entry(unsigned long * entry)135 static inline void validate_pt_entry(unsigned long *entry)
136 {
137 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
138 *entry &= ~ZPCI_PTE_VALID_MASK;
139 *entry |= ZPCI_PTE_VALID;
140 }
141
entry_set_protected(unsigned long * entry)142 static inline void entry_set_protected(unsigned long *entry)
143 {
144 *entry &= ~ZPCI_TABLE_PROT_MASK;
145 *entry |= ZPCI_TABLE_PROTECTED;
146 }
147
entry_clr_protected(unsigned long * entry)148 static inline void entry_clr_protected(unsigned long *entry)
149 {
150 *entry &= ~ZPCI_TABLE_PROT_MASK;
151 *entry |= ZPCI_TABLE_UNPROTECTED;
152 }
153
reg_entry_isvalid(unsigned long entry)154 static inline int reg_entry_isvalid(unsigned long entry)
155 {
156 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
157 }
158
pt_entry_isvalid(unsigned long entry)159 static inline int pt_entry_isvalid(unsigned long entry)
160 {
161 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
162 }
163
get_rf_rso(unsigned long entry)164 static inline unsigned long *get_rf_rso(unsigned long entry)
165 {
166 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RFX)
167 return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
168 else
169 return NULL;
170 }
171
get_rs_rto(unsigned long entry)172 static inline unsigned long *get_rs_rto(unsigned long entry)
173 {
174 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RSX)
175 return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
176 else
177 return NULL;
178 }
179
get_rt_sto(unsigned long entry)180 static inline unsigned long *get_rt_sto(unsigned long entry)
181 {
182 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
183 return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
184 else
185 return NULL;
186 }
187
get_st_pto(unsigned long entry)188 static inline unsigned long *get_st_pto(unsigned long entry)
189 {
190 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
191 return phys_to_virt(entry & ZPCI_STE_ADDR_MASK);
192 else
193 return NULL;
194 }
195
dma_alloc_cpu_table_caches(void)196 static int __init dma_alloc_cpu_table_caches(void)
197 {
198 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
199 ZPCI_TABLE_SIZE,
200 ZPCI_TABLE_ALIGN,
201 0, NULL);
202 if (!dma_region_table_cache)
203 return -ENOMEM;
204
205 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
206 ZPCI_PT_SIZE,
207 ZPCI_PT_ALIGN,
208 0, NULL);
209 if (!dma_page_table_cache) {
210 kmem_cache_destroy(dma_region_table_cache);
211 return -ENOMEM;
212 }
213 return 0;
214 }
215
dma_alloc_cpu_table(gfp_t gfp)216 static unsigned long *dma_alloc_cpu_table(gfp_t gfp)
217 {
218 unsigned long *table, *entry;
219
220 table = kmem_cache_alloc(dma_region_table_cache, gfp);
221 if (!table)
222 return NULL;
223
224 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
225 *entry = ZPCI_TABLE_INVALID;
226 return table;
227 }
228
dma_free_cpu_table(void * table)229 static void dma_free_cpu_table(void *table)
230 {
231 kmem_cache_free(dma_region_table_cache, table);
232 }
233
dma_free_page_table(void * table)234 static void dma_free_page_table(void *table)
235 {
236 kmem_cache_free(dma_page_table_cache, table);
237 }
238
dma_free_seg_table(unsigned long entry)239 static void dma_free_seg_table(unsigned long entry)
240 {
241 unsigned long *sto = get_rt_sto(entry);
242 int sx;
243
244 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
245 if (reg_entry_isvalid(sto[sx]))
246 dma_free_page_table(get_st_pto(sto[sx]));
247
248 dma_free_cpu_table(sto);
249 }
250
dma_free_rt_table(unsigned long entry)251 static void dma_free_rt_table(unsigned long entry)
252 {
253 unsigned long *rto = get_rs_rto(entry);
254 int rtx;
255
256 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
257 if (reg_entry_isvalid(rto[rtx]))
258 dma_free_seg_table(rto[rtx]);
259
260 dma_free_cpu_table(rto);
261 }
262
dma_free_rs_table(unsigned long entry)263 static void dma_free_rs_table(unsigned long entry)
264 {
265 unsigned long *rso = get_rf_rso(entry);
266 int rsx;
267
268 for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
269 if (reg_entry_isvalid(rso[rsx]))
270 dma_free_rt_table(rso[rsx]);
271
272 dma_free_cpu_table(rso);
273 }
274
dma_cleanup_tables(struct s390_domain * domain)275 static void dma_cleanup_tables(struct s390_domain *domain)
276 {
277 int rtx, rsx, rfx;
278
279 if (!domain->dma_table)
280 return;
281
282 switch (domain->origin_type) {
283 case ZPCI_TABLE_TYPE_RFX:
284 for (rfx = 0; rfx < ZPCI_TABLE_ENTRIES; rfx++)
285 if (reg_entry_isvalid(domain->dma_table[rfx]))
286 dma_free_rs_table(domain->dma_table[rfx]);
287 break;
288 case ZPCI_TABLE_TYPE_RSX:
289 for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
290 if (reg_entry_isvalid(domain->dma_table[rsx]))
291 dma_free_rt_table(domain->dma_table[rsx]);
292 break;
293 case ZPCI_TABLE_TYPE_RTX:
294 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
295 if (reg_entry_isvalid(domain->dma_table[rtx]))
296 dma_free_seg_table(domain->dma_table[rtx]);
297 break;
298 default:
299 WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
300 return;
301 }
302
303 dma_free_cpu_table(domain->dma_table);
304 }
305
dma_alloc_page_table(gfp_t gfp)306 static unsigned long *dma_alloc_page_table(gfp_t gfp)
307 {
308 unsigned long *table, *entry;
309
310 table = kmem_cache_alloc(dma_page_table_cache, gfp);
311 if (!table)
312 return NULL;
313
314 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
315 *entry = ZPCI_PTE_INVALID;
316 return table;
317 }
318
dma_walk_rs_table(unsigned long * rso,dma_addr_t dma_addr,gfp_t gfp)319 static unsigned long *dma_walk_rs_table(unsigned long *rso,
320 dma_addr_t dma_addr, gfp_t gfp)
321 {
322 unsigned int rsx = calc_rsx(dma_addr);
323 unsigned long old_rse, rse;
324 unsigned long *rsep, *rto;
325
326 rsep = &rso[rsx];
327 rse = READ_ONCE(*rsep);
328 if (reg_entry_isvalid(rse)) {
329 rto = get_rs_rto(rse);
330 } else {
331 rto = dma_alloc_cpu_table(gfp);
332 if (!rto)
333 return NULL;
334
335 set_rs_rto(&rse, virt_to_phys(rto));
336 validate_rs_entry(&rse);
337 entry_clr_protected(&rse);
338
339 old_rse = cmpxchg(rsep, ZPCI_TABLE_INVALID, rse);
340 if (old_rse != ZPCI_TABLE_INVALID) {
341 /* Somone else was faster, use theirs */
342 dma_free_cpu_table(rto);
343 rto = get_rs_rto(old_rse);
344 }
345 }
346 return rto;
347 }
348
dma_walk_rf_table(unsigned long * rfo,dma_addr_t dma_addr,gfp_t gfp)349 static unsigned long *dma_walk_rf_table(unsigned long *rfo,
350 dma_addr_t dma_addr, gfp_t gfp)
351 {
352 unsigned int rfx = calc_rfx(dma_addr);
353 unsigned long old_rfe, rfe;
354 unsigned long *rfep, *rso;
355
356 rfep = &rfo[rfx];
357 rfe = READ_ONCE(*rfep);
358 if (reg_entry_isvalid(rfe)) {
359 rso = get_rf_rso(rfe);
360 } else {
361 rso = dma_alloc_cpu_table(gfp);
362 if (!rso)
363 return NULL;
364
365 set_rf_rso(&rfe, virt_to_phys(rso));
366 validate_rf_entry(&rfe);
367 entry_clr_protected(&rfe);
368
369 old_rfe = cmpxchg(rfep, ZPCI_TABLE_INVALID, rfe);
370 if (old_rfe != ZPCI_TABLE_INVALID) {
371 /* Somone else was faster, use theirs */
372 dma_free_cpu_table(rso);
373 rso = get_rf_rso(old_rfe);
374 }
375 }
376
377 if (!rso)
378 return NULL;
379
380 return dma_walk_rs_table(rso, dma_addr, gfp);
381 }
382
dma_get_seg_table_origin(unsigned long * rtep,gfp_t gfp)383 static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
384 {
385 unsigned long old_rte, rte;
386 unsigned long *sto;
387
388 rte = READ_ONCE(*rtep);
389 if (reg_entry_isvalid(rte)) {
390 sto = get_rt_sto(rte);
391 } else {
392 sto = dma_alloc_cpu_table(gfp);
393 if (!sto)
394 return NULL;
395
396 set_rt_sto(&rte, virt_to_phys(sto));
397 validate_rt_entry(&rte);
398 entry_clr_protected(&rte);
399
400 old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
401 if (old_rte != ZPCI_TABLE_INVALID) {
402 /* Somone else was faster, use theirs */
403 dma_free_cpu_table(sto);
404 sto = get_rt_sto(old_rte);
405 }
406 }
407 return sto;
408 }
409
dma_get_page_table_origin(unsigned long * step,gfp_t gfp)410 static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
411 {
412 unsigned long old_ste, ste;
413 unsigned long *pto;
414
415 ste = READ_ONCE(*step);
416 if (reg_entry_isvalid(ste)) {
417 pto = get_st_pto(ste);
418 } else {
419 pto = dma_alloc_page_table(gfp);
420 if (!pto)
421 return NULL;
422 set_st_pto(&ste, virt_to_phys(pto));
423 validate_st_entry(&ste);
424 entry_clr_protected(&ste);
425
426 old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
427 if (old_ste != ZPCI_TABLE_INVALID) {
428 /* Somone else was faster, use theirs */
429 dma_free_page_table(pto);
430 pto = get_st_pto(old_ste);
431 }
432 }
433 return pto;
434 }
435
dma_walk_region_tables(struct s390_domain * domain,dma_addr_t dma_addr,gfp_t gfp)436 static unsigned long *dma_walk_region_tables(struct s390_domain *domain,
437 dma_addr_t dma_addr, gfp_t gfp)
438 {
439 switch (domain->origin_type) {
440 case ZPCI_TABLE_TYPE_RFX:
441 return dma_walk_rf_table(domain->dma_table, dma_addr, gfp);
442 case ZPCI_TABLE_TYPE_RSX:
443 return dma_walk_rs_table(domain->dma_table, dma_addr, gfp);
444 case ZPCI_TABLE_TYPE_RTX:
445 return domain->dma_table;
446 default:
447 return NULL;
448 }
449 }
450
dma_walk_cpu_trans(struct s390_domain * domain,dma_addr_t dma_addr,gfp_t gfp)451 static unsigned long *dma_walk_cpu_trans(struct s390_domain *domain,
452 dma_addr_t dma_addr, gfp_t gfp)
453 {
454 unsigned long *rto, *sto, *pto;
455 unsigned int rtx, sx, px;
456
457 rto = dma_walk_region_tables(domain, dma_addr, gfp);
458 if (!rto)
459 return NULL;
460
461 rtx = calc_rtx(dma_addr);
462 sto = dma_get_seg_table_origin(&rto[rtx], gfp);
463 if (!sto)
464 return NULL;
465
466 sx = calc_sx(dma_addr);
467 pto = dma_get_page_table_origin(&sto[sx], gfp);
468 if (!pto)
469 return NULL;
470
471 px = calc_px(dma_addr);
472 return &pto[px];
473 }
474
dma_update_cpu_trans(unsigned long * ptep,phys_addr_t page_addr,int flags)475 static void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
476 {
477 unsigned long pte;
478
479 pte = READ_ONCE(*ptep);
480 if (flags & ZPCI_PTE_INVALID) {
481 invalidate_pt_entry(&pte);
482 } else {
483 set_pt_pfaa(&pte, page_addr);
484 validate_pt_entry(&pte);
485 }
486
487 if (flags & ZPCI_TABLE_PROTECTED)
488 entry_set_protected(&pte);
489 else
490 entry_clr_protected(&pte);
491
492 xchg(ptep, pte);
493 }
494
to_s390_domain(struct iommu_domain * dom)495 static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
496 {
497 return container_of(dom, struct s390_domain, domain);
498 }
499
s390_iommu_capable(struct device * dev,enum iommu_cap cap)500 static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
501 {
502 struct zpci_dev *zdev = to_zpci_dev(dev);
503
504 switch (cap) {
505 case IOMMU_CAP_CACHE_COHERENCY:
506 return true;
507 case IOMMU_CAP_DEFERRED_FLUSH:
508 return zdev->pft != PCI_FUNC_TYPE_ISM;
509 default:
510 return false;
511 }
512 }
513
max_tbl_size(struct s390_domain * domain)514 static inline u64 max_tbl_size(struct s390_domain *domain)
515 {
516 switch (domain->origin_type) {
517 case ZPCI_TABLE_TYPE_RTX:
518 return ZPCI_TABLE_SIZE_RT - 1;
519 case ZPCI_TABLE_TYPE_RSX:
520 return ZPCI_TABLE_SIZE_RS - 1;
521 case ZPCI_TABLE_TYPE_RFX:
522 return U64_MAX;
523 default:
524 return 0;
525 }
526 }
527
s390_domain_alloc_paging(struct device * dev)528 static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
529 {
530 struct zpci_dev *zdev = to_zpci_dev(dev);
531 struct s390_domain *s390_domain;
532 u64 aperture_size;
533
534 s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
535 if (!s390_domain)
536 return NULL;
537
538 s390_domain->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
539 if (!s390_domain->dma_table) {
540 kfree(s390_domain);
541 return NULL;
542 }
543
544 aperture_size = min(s390_iommu_aperture,
545 zdev->end_dma - zdev->start_dma + 1);
546 if (aperture_size <= (ZPCI_TABLE_SIZE_RT - zdev->start_dma)) {
547 s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
548 } else if (aperture_size <= (ZPCI_TABLE_SIZE_RS - zdev->start_dma) &&
549 (zdev->dtsm & ZPCI_IOTA_DT_RS)) {
550 s390_domain->origin_type = ZPCI_TABLE_TYPE_RSX;
551 } else if (zdev->dtsm & ZPCI_IOTA_DT_RF) {
552 s390_domain->origin_type = ZPCI_TABLE_TYPE_RFX;
553 } else {
554 /* Assume RTX available */
555 s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
556 aperture_size = ZPCI_TABLE_SIZE_RT - zdev->start_dma;
557 }
558 zdev->end_dma = zdev->start_dma + aperture_size - 1;
559
560 s390_domain->domain.pgsize_bitmap = SZ_4K;
561 s390_domain->domain.geometry.force_aperture = true;
562 s390_domain->domain.geometry.aperture_start = 0;
563 s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain);
564
565 spin_lock_init(&s390_domain->list_lock);
566 INIT_LIST_HEAD_RCU(&s390_domain->devices);
567
568 return &s390_domain->domain;
569 }
570
s390_iommu_rcu_free_domain(struct rcu_head * head)571 static void s390_iommu_rcu_free_domain(struct rcu_head *head)
572 {
573 struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
574
575 dma_cleanup_tables(s390_domain);
576 kfree(s390_domain);
577 }
578
s390_domain_free(struct iommu_domain * domain)579 static void s390_domain_free(struct iommu_domain *domain)
580 {
581 struct s390_domain *s390_domain = to_s390_domain(domain);
582
583 rcu_read_lock();
584 WARN_ON(!list_empty(&s390_domain->devices));
585 rcu_read_unlock();
586
587 call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
588 }
589
zdev_s390_domain_update(struct zpci_dev * zdev,struct iommu_domain * domain)590 static void zdev_s390_domain_update(struct zpci_dev *zdev,
591 struct iommu_domain *domain)
592 {
593 unsigned long flags;
594
595 spin_lock_irqsave(&zdev->dom_lock, flags);
596 zdev->s390_domain = domain;
597 spin_unlock_irqrestore(&zdev->dom_lock, flags);
598 }
599
get_iota_region_flag(struct s390_domain * domain)600 static u64 get_iota_region_flag(struct s390_domain *domain)
601 {
602 switch (domain->origin_type) {
603 case ZPCI_TABLE_TYPE_RTX:
604 return ZPCI_IOTA_RTTO_FLAG;
605 case ZPCI_TABLE_TYPE_RSX:
606 return ZPCI_IOTA_RSTO_FLAG;
607 case ZPCI_TABLE_TYPE_RFX:
608 return ZPCI_IOTA_RFTO_FLAG;
609 default:
610 WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
611 return 0;
612 }
613 }
614
reg_ioat_propagate_error(int cc,u8 status)615 static bool reg_ioat_propagate_error(int cc, u8 status)
616 {
617 /*
618 * If the device is in the error state the reset routine
619 * will register the IOAT of the newly set domain on re-enable
620 */
621 if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL)
622 return false;
623 /*
624 * If the device was removed treat registration as success
625 * and let the subsequent error event trigger tear down.
626 */
627 if (cc == ZPCI_CC_INVAL_HANDLE)
628 return false;
629 return cc != ZPCI_CC_OK;
630 }
631
s390_iommu_domain_reg_ioat(struct zpci_dev * zdev,struct iommu_domain * domain,u8 * status)632 static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
633 struct iommu_domain *domain, u8 *status)
634 {
635 struct s390_domain *s390_domain;
636 int rc = 0;
637 u64 iota;
638
639 switch (domain->type) {
640 case IOMMU_DOMAIN_IDENTITY:
641 rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
642 zdev->end_dma, 0, status);
643 break;
644 case IOMMU_DOMAIN_BLOCKED:
645 /* Nothing to do in this case */
646 break;
647 default:
648 s390_domain = to_s390_domain(domain);
649 iota = virt_to_phys(s390_domain->dma_table) |
650 get_iota_region_flag(s390_domain);
651 rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
652 zdev->end_dma, iota, status);
653 }
654
655 return rc;
656 }
657
zpci_iommu_register_ioat(struct zpci_dev * zdev,u8 * status)658 int zpci_iommu_register_ioat(struct zpci_dev *zdev, u8 *status)
659 {
660 unsigned long flags;
661 int rc;
662
663 spin_lock_irqsave(&zdev->dom_lock, flags);
664
665 rc = s390_iommu_domain_reg_ioat(zdev, zdev->s390_domain, status);
666
667 spin_unlock_irqrestore(&zdev->dom_lock, flags);
668
669 return rc;
670 }
671
blocking_domain_attach_device(struct iommu_domain * domain,struct device * dev)672 static int blocking_domain_attach_device(struct iommu_domain *domain,
673 struct device *dev)
674 {
675 struct zpci_dev *zdev = to_zpci_dev(dev);
676 struct s390_domain *s390_domain;
677 unsigned long flags;
678
679 if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
680 return 0;
681
682 s390_domain = to_s390_domain(zdev->s390_domain);
683 if (zdev->dma_table) {
684 spin_lock_irqsave(&s390_domain->list_lock, flags);
685 list_del_rcu(&zdev->iommu_list);
686 spin_unlock_irqrestore(&s390_domain->list_lock, flags);
687 }
688
689 zpci_unregister_ioat(zdev, 0);
690 zdev->dma_table = NULL;
691 zdev_s390_domain_update(zdev, domain);
692
693 return 0;
694 }
695
s390_iommu_attach_device(struct iommu_domain * domain,struct device * dev)696 static int s390_iommu_attach_device(struct iommu_domain *domain,
697 struct device *dev)
698 {
699 struct s390_domain *s390_domain = to_s390_domain(domain);
700 struct zpci_dev *zdev = to_zpci_dev(dev);
701 unsigned long flags;
702 u8 status;
703 int cc;
704
705 if (!zdev)
706 return -ENODEV;
707
708 if (WARN_ON(domain->geometry.aperture_start > zdev->end_dma ||
709 domain->geometry.aperture_end < zdev->start_dma))
710 return -EINVAL;
711
712 blocking_domain_attach_device(&blocking_domain, dev);
713
714 /* If we fail now DMA remains blocked via blocking domain */
715 cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
716 if (reg_ioat_propagate_error(cc, status))
717 return -EIO;
718 zdev->dma_table = s390_domain->dma_table;
719 zdev_s390_domain_update(zdev, domain);
720
721 spin_lock_irqsave(&s390_domain->list_lock, flags);
722 list_add_rcu(&zdev->iommu_list, &s390_domain->devices);
723 spin_unlock_irqrestore(&s390_domain->list_lock, flags);
724
725 return 0;
726 }
727
s390_iommu_get_resv_regions(struct device * dev,struct list_head * list)728 static void s390_iommu_get_resv_regions(struct device *dev,
729 struct list_head *list)
730 {
731 struct zpci_dev *zdev = to_zpci_dev(dev);
732 struct iommu_resv_region *region;
733 u64 max_size, end_resv;
734 unsigned long flags;
735
736 if (zdev->start_dma) {
737 region = iommu_alloc_resv_region(0, zdev->start_dma, 0,
738 IOMMU_RESV_RESERVED, GFP_KERNEL);
739 if (!region)
740 return;
741 list_add_tail(®ion->list, list);
742 }
743
744 spin_lock_irqsave(&zdev->dom_lock, flags);
745 if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
746 zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) {
747 spin_unlock_irqrestore(&zdev->dom_lock, flags);
748 return;
749 }
750
751 max_size = max_tbl_size(to_s390_domain(zdev->s390_domain));
752 spin_unlock_irqrestore(&zdev->dom_lock, flags);
753
754 if (zdev->end_dma < max_size) {
755 end_resv = max_size - zdev->end_dma;
756 region = iommu_alloc_resv_region(zdev->end_dma + 1, end_resv,
757 0, IOMMU_RESV_RESERVED,
758 GFP_KERNEL);
759 if (!region)
760 return;
761 list_add_tail(®ion->list, list);
762 }
763 }
764
s390_iommu_probe_device(struct device * dev)765 static struct iommu_device *s390_iommu_probe_device(struct device *dev)
766 {
767 struct zpci_dev *zdev;
768
769 if (!dev_is_pci(dev))
770 return ERR_PTR(-ENODEV);
771
772 zdev = to_zpci_dev(dev);
773
774 if (zdev->start_dma > zdev->end_dma)
775 return ERR_PTR(-EINVAL);
776
777 if (zdev->tlb_refresh)
778 dev->iommu->shadow_on_flush = 1;
779
780 /* Start with DMA blocked */
781 spin_lock_init(&zdev->dom_lock);
782 zdev_s390_domain_update(zdev, &blocking_domain);
783
784 return &zdev->iommu_dev;
785 }
786
zpci_refresh_all(struct zpci_dev * zdev)787 static int zpci_refresh_all(struct zpci_dev *zdev)
788 {
789 return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
790 zdev->end_dma - zdev->start_dma + 1);
791 }
792
s390_iommu_flush_iotlb_all(struct iommu_domain * domain)793 static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
794 {
795 struct s390_domain *s390_domain = to_s390_domain(domain);
796 struct zpci_dev *zdev;
797
798 rcu_read_lock();
799 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
800 atomic64_inc(&s390_domain->ctrs.global_rpcits);
801 zpci_refresh_all(zdev);
802 }
803 rcu_read_unlock();
804 }
805
s390_iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)806 static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
807 struct iommu_iotlb_gather *gather)
808 {
809 struct s390_domain *s390_domain = to_s390_domain(domain);
810 size_t size = gather->end - gather->start + 1;
811 struct zpci_dev *zdev;
812
813 /* If gather was never added to there is nothing to flush */
814 if (!gather->end)
815 return;
816
817 rcu_read_lock();
818 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
819 atomic64_inc(&s390_domain->ctrs.sync_rpcits);
820 zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
821 size);
822 }
823 rcu_read_unlock();
824 }
825
s390_iommu_iotlb_sync_map(struct iommu_domain * domain,unsigned long iova,size_t size)826 static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
827 unsigned long iova, size_t size)
828 {
829 struct s390_domain *s390_domain = to_s390_domain(domain);
830 struct zpci_dev *zdev;
831 int ret = 0;
832
833 rcu_read_lock();
834 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
835 if (!zdev->tlb_refresh)
836 continue;
837 atomic64_inc(&s390_domain->ctrs.sync_map_rpcits);
838 ret = zpci_refresh_trans((u64)zdev->fh << 32,
839 iova, size);
840 /*
841 * let the hypervisor discover invalidated entries
842 * allowing it to free IOVAs and unpin pages
843 */
844 if (ret == -ENOMEM) {
845 ret = zpci_refresh_all(zdev);
846 if (ret)
847 break;
848 }
849 }
850 rcu_read_unlock();
851
852 return ret;
853 }
854
s390_iommu_validate_trans(struct s390_domain * s390_domain,phys_addr_t pa,dma_addr_t dma_addr,unsigned long nr_pages,int flags,gfp_t gfp)855 static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
856 phys_addr_t pa, dma_addr_t dma_addr,
857 unsigned long nr_pages, int flags,
858 gfp_t gfp)
859 {
860 phys_addr_t page_addr = pa & PAGE_MASK;
861 unsigned long *entry;
862 unsigned long i;
863 int rc;
864
865 for (i = 0; i < nr_pages; i++) {
866 entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
867 if (unlikely(!entry)) {
868 rc = -ENOMEM;
869 goto undo_cpu_trans;
870 }
871 dma_update_cpu_trans(entry, page_addr, flags);
872 page_addr += PAGE_SIZE;
873 dma_addr += PAGE_SIZE;
874 }
875
876 return 0;
877
878 undo_cpu_trans:
879 while (i-- > 0) {
880 dma_addr -= PAGE_SIZE;
881 entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
882 if (!entry)
883 break;
884 dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
885 }
886
887 return rc;
888 }
889
s390_iommu_invalidate_trans(struct s390_domain * s390_domain,dma_addr_t dma_addr,unsigned long nr_pages)890 static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
891 dma_addr_t dma_addr, unsigned long nr_pages)
892 {
893 unsigned long *entry;
894 unsigned long i;
895 int rc = 0;
896
897 for (i = 0; i < nr_pages; i++) {
898 entry = dma_walk_cpu_trans(s390_domain, dma_addr, GFP_ATOMIC);
899 if (unlikely(!entry)) {
900 rc = -EINVAL;
901 break;
902 }
903 dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
904 dma_addr += PAGE_SIZE;
905 }
906
907 return rc;
908 }
909
s390_iommu_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)910 static int s390_iommu_map_pages(struct iommu_domain *domain,
911 unsigned long iova, phys_addr_t paddr,
912 size_t pgsize, size_t pgcount,
913 int prot, gfp_t gfp, size_t *mapped)
914 {
915 struct s390_domain *s390_domain = to_s390_domain(domain);
916 size_t size = pgcount << __ffs(pgsize);
917 int flags = ZPCI_PTE_VALID, rc = 0;
918
919 if (pgsize != SZ_4K)
920 return -EINVAL;
921
922 if (iova < s390_domain->domain.geometry.aperture_start ||
923 (iova + size - 1) > s390_domain->domain.geometry.aperture_end)
924 return -EINVAL;
925
926 if (!IS_ALIGNED(iova | paddr, pgsize))
927 return -EINVAL;
928
929 if (!(prot & IOMMU_WRITE))
930 flags |= ZPCI_TABLE_PROTECTED;
931
932 rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
933 pgcount, flags, gfp);
934 if (!rc) {
935 *mapped = size;
936 atomic64_add(pgcount, &s390_domain->ctrs.mapped_pages);
937 }
938
939 return rc;
940 }
941
get_rso_from_iova(struct s390_domain * domain,dma_addr_t iova)942 static unsigned long *get_rso_from_iova(struct s390_domain *domain,
943 dma_addr_t iova)
944 {
945 unsigned long *rfo;
946 unsigned long rfe;
947 unsigned int rfx;
948
949 switch (domain->origin_type) {
950 case ZPCI_TABLE_TYPE_RFX:
951 rfo = domain->dma_table;
952 rfx = calc_rfx(iova);
953 rfe = READ_ONCE(rfo[rfx]);
954 if (!reg_entry_isvalid(rfe))
955 return NULL;
956 return get_rf_rso(rfe);
957 case ZPCI_TABLE_TYPE_RSX:
958 return domain->dma_table;
959 default:
960 return NULL;
961 }
962 }
963
get_rto_from_iova(struct s390_domain * domain,dma_addr_t iova)964 static unsigned long *get_rto_from_iova(struct s390_domain *domain,
965 dma_addr_t iova)
966 {
967 unsigned long *rso;
968 unsigned long rse;
969 unsigned int rsx;
970
971 switch (domain->origin_type) {
972 case ZPCI_TABLE_TYPE_RFX:
973 case ZPCI_TABLE_TYPE_RSX:
974 rso = get_rso_from_iova(domain, iova);
975 rsx = calc_rsx(iova);
976 rse = READ_ONCE(rso[rsx]);
977 if (!reg_entry_isvalid(rse))
978 return NULL;
979 return get_rs_rto(rse);
980 case ZPCI_TABLE_TYPE_RTX:
981 return domain->dma_table;
982 default:
983 return NULL;
984 }
985 }
986
s390_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)987 static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
988 dma_addr_t iova)
989 {
990 struct s390_domain *s390_domain = to_s390_domain(domain);
991 unsigned long *rto, *sto, *pto;
992 unsigned long ste, pte, rte;
993 unsigned int rtx, sx, px;
994 phys_addr_t phys = 0;
995
996 if (iova < domain->geometry.aperture_start ||
997 iova > domain->geometry.aperture_end)
998 return 0;
999
1000 rto = get_rto_from_iova(s390_domain, iova);
1001 if (!rto)
1002 return 0;
1003
1004 rtx = calc_rtx(iova);
1005 sx = calc_sx(iova);
1006 px = calc_px(iova);
1007
1008 rte = READ_ONCE(rto[rtx]);
1009 if (reg_entry_isvalid(rte)) {
1010 sto = get_rt_sto(rte);
1011 ste = READ_ONCE(sto[sx]);
1012 if (reg_entry_isvalid(ste)) {
1013 pto = get_st_pto(ste);
1014 pte = READ_ONCE(pto[px]);
1015 if (pt_entry_isvalid(pte))
1016 phys = pte & ZPCI_PTE_ADDR_MASK;
1017 }
1018 }
1019
1020 return phys;
1021 }
1022
s390_iommu_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)1023 static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
1024 unsigned long iova,
1025 size_t pgsize, size_t pgcount,
1026 struct iommu_iotlb_gather *gather)
1027 {
1028 struct s390_domain *s390_domain = to_s390_domain(domain);
1029 size_t size = pgcount << __ffs(pgsize);
1030 int rc;
1031
1032 if (WARN_ON(iova < s390_domain->domain.geometry.aperture_start ||
1033 (iova + size - 1) > s390_domain->domain.geometry.aperture_end))
1034 return 0;
1035
1036 rc = s390_iommu_invalidate_trans(s390_domain, iova, pgcount);
1037 if (rc)
1038 return 0;
1039
1040 iommu_iotlb_gather_add_range(gather, iova, size);
1041 atomic64_add(pgcount, &s390_domain->ctrs.unmapped_pages);
1042
1043 return size;
1044 }
1045
zpci_get_iommu_ctrs(struct zpci_dev * zdev)1046 struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
1047 {
1048 struct s390_domain *s390_domain;
1049
1050 lockdep_assert_held(&zdev->dom_lock);
1051
1052 if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
1053 zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY)
1054 return NULL;
1055
1056 s390_domain = to_s390_domain(zdev->s390_domain);
1057 return &s390_domain->ctrs;
1058 }
1059
zpci_init_iommu(struct zpci_dev * zdev)1060 int zpci_init_iommu(struct zpci_dev *zdev)
1061 {
1062 int rc = 0;
1063
1064 rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
1065 "s390-iommu.%08x", zdev->fid);
1066 if (rc)
1067 goto out_err;
1068
1069 if (zdev->rtr_avail) {
1070 rc = iommu_device_register(&zdev->iommu_dev,
1071 &s390_iommu_rtr_ops, NULL);
1072 } else {
1073 rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops,
1074 NULL);
1075 }
1076 if (rc)
1077 goto out_sysfs;
1078
1079 return 0;
1080
1081 out_sysfs:
1082 iommu_device_sysfs_remove(&zdev->iommu_dev);
1083
1084 out_err:
1085 return rc;
1086 }
1087
zpci_destroy_iommu(struct zpci_dev * zdev)1088 void zpci_destroy_iommu(struct zpci_dev *zdev)
1089 {
1090 iommu_device_unregister(&zdev->iommu_dev);
1091 iommu_device_sysfs_remove(&zdev->iommu_dev);
1092 }
1093
s390_iommu_setup(char * str)1094 static int __init s390_iommu_setup(char *str)
1095 {
1096 if (!strcmp(str, "strict")) {
1097 pr_warn("s390_iommu=strict deprecated; use iommu.strict=1 instead\n");
1098 iommu_set_dma_strict();
1099 }
1100 return 1;
1101 }
1102
1103 __setup("s390_iommu=", s390_iommu_setup);
1104
s390_iommu_aperture_setup(char * str)1105 static int __init s390_iommu_aperture_setup(char *str)
1106 {
1107 if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
1108 s390_iommu_aperture_factor = 1;
1109 return 1;
1110 }
1111
1112 __setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
1113
s390_iommu_init(void)1114 static int __init s390_iommu_init(void)
1115 {
1116 int rc;
1117
1118 iommu_dma_forcedac = true;
1119 s390_iommu_aperture = (u64)virt_to_phys(high_memory);
1120 if (!s390_iommu_aperture_factor)
1121 s390_iommu_aperture = ULONG_MAX;
1122 else
1123 s390_iommu_aperture *= s390_iommu_aperture_factor;
1124
1125 rc = dma_alloc_cpu_table_caches();
1126 if (rc)
1127 return rc;
1128
1129 return rc;
1130 }
1131 subsys_initcall(s390_iommu_init);
1132
s390_attach_dev_identity(struct iommu_domain * domain,struct device * dev)1133 static int s390_attach_dev_identity(struct iommu_domain *domain,
1134 struct device *dev)
1135 {
1136 struct zpci_dev *zdev = to_zpci_dev(dev);
1137 u8 status;
1138 int cc;
1139
1140 blocking_domain_attach_device(&blocking_domain, dev);
1141
1142 /* If we fail now DMA remains blocked via blocking domain */
1143 cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
1144 if (reg_ioat_propagate_error(cc, status))
1145 return -EIO;
1146
1147 zdev_s390_domain_update(zdev, domain);
1148
1149 return 0;
1150 }
1151
1152 static const struct iommu_domain_ops s390_identity_ops = {
1153 .attach_dev = s390_attach_dev_identity,
1154 };
1155
1156 static struct iommu_domain s390_identity_domain = {
1157 .type = IOMMU_DOMAIN_IDENTITY,
1158 .ops = &s390_identity_ops,
1159 };
1160
1161 static struct iommu_domain blocking_domain = {
1162 .type = IOMMU_DOMAIN_BLOCKED,
1163 .ops = &(const struct iommu_domain_ops) {
1164 .attach_dev = blocking_domain_attach_device,
1165 }
1166 };
1167
1168 #define S390_IOMMU_COMMON_OPS() \
1169 .blocked_domain = &blocking_domain, \
1170 .release_domain = &blocking_domain, \
1171 .capable = s390_iommu_capable, \
1172 .domain_alloc_paging = s390_domain_alloc_paging, \
1173 .probe_device = s390_iommu_probe_device, \
1174 .device_group = generic_device_group, \
1175 .get_resv_regions = s390_iommu_get_resv_regions, \
1176 .default_domain_ops = &(const struct iommu_domain_ops) { \
1177 .attach_dev = s390_iommu_attach_device, \
1178 .map_pages = s390_iommu_map_pages, \
1179 .unmap_pages = s390_iommu_unmap_pages, \
1180 .flush_iotlb_all = s390_iommu_flush_iotlb_all, \
1181 .iotlb_sync = s390_iommu_iotlb_sync, \
1182 .iotlb_sync_map = s390_iommu_iotlb_sync_map, \
1183 .iova_to_phys = s390_iommu_iova_to_phys, \
1184 .free = s390_domain_free, \
1185 }
1186
1187 static const struct iommu_ops s390_iommu_ops = {
1188 S390_IOMMU_COMMON_OPS()
1189 };
1190
1191 static const struct iommu_ops s390_iommu_rtr_ops = {
1192 .identity_domain = &s390_identity_domain,
1193 S390_IOMMU_COMMON_OPS()
1194 };
1195