Lines Matching +full:height +full:- +full:mm
1 // SPDX-License-Identifier: MIT
38 if (node->color != color) in i915_ggtt_color_adjust()
39 *end -= I915_GTT_PAGE_SIZE; in i915_ggtt_color_adjust()
44 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw()
46 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
48 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
50 /* Only VLV supports read-only GGTT mappings */ in ggtt_init_hw()
51 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
54 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
56 if (ggtt->mappable_end) { in ggtt_init_hw()
57 if (!io_mapping_init_wc(&ggtt->iomap, in ggtt_init_hw()
58 ggtt->gmadr.start, in ggtt_init_hw()
59 ggtt->mappable_end)) { in ggtt_init_hw()
60 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
61 return -EIO; in ggtt_init_hw()
64 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, in ggtt_init_hw()
65 ggtt->mappable_end); in ggtt_init_hw()
74 * i915_ggtt_init_hw - Initialize GGTT hardware
87 ret = ggtt_init_hw(&i915->ggtt); in i915_ggtt_init_hw()
96 * unmapping anything from the GTT when VT-d is enabled.
112 mutex_lock(&ggtt->vm.mutex); in i915_ggtt_suspend()
115 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_suspend()
117 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_suspend()
118 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_ggtt_suspend()
126 drm_mm_remove_node(&vma->node); in i915_ggtt_suspend()
130 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_suspend()
131 ggtt->invalidate(ggtt); in i915_ggtt_suspend()
132 atomic_set(&ggtt->vm.open, open); in i915_ggtt_suspend()
134 mutex_unlock(&ggtt->vm.mutex); in i915_ggtt_suspend()
136 intel_gt_check_and_clear_faults(ggtt->vm.gt); in i915_ggtt_suspend()
141 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen6_ggtt_invalidate()
143 spin_lock_irq(&uncore->lock); in gen6_ggtt_invalidate()
146 spin_unlock_irq(&uncore->lock); in gen6_ggtt_invalidate()
151 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen8_ggtt_invalidate()
162 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in guc_ggtt_invalidate()
163 struct drm_i915_private *i915 = ggtt->vm.i915; in guc_ggtt_invalidate()
199 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_page()
203 ggtt->invalidate(ggtt); in gen8_ggtt_insert_page()
223 gte = (gen8_pte_t __iomem *)ggtt->gsm; in gen8_ggtt_insert_entries()
224 gte += vma->node.start / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_entries()
225 end = gte + vma->node.size / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_entries()
227 for_each_sgt_daddr(addr, iter, vma->pages) in gen8_ggtt_insert_entries()
233 gen8_set_pte(gte++, vm->scratch[0]->encode); in gen8_ggtt_insert_entries()
239 ggtt->invalidate(ggtt); in gen8_ggtt_insert_entries()
250 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_page()
252 iowrite32(vm->pte_encode(addr, level, flags), pte); in gen6_ggtt_insert_page()
254 ggtt->invalidate(ggtt); in gen6_ggtt_insert_page()
261 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
274 gte = (gen6_pte_t __iomem *)ggtt->gsm; in gen6_ggtt_insert_entries()
275 gte += vma->node.start / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_entries()
276 end = gte + vma->node.size / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_entries()
278 for_each_sgt_daddr(addr, iter, vma->pages) in gen6_ggtt_insert_entries()
279 iowrite32(vm->pte_encode(addr, level, flags), gte++); in gen6_ggtt_insert_entries()
284 iowrite32(vm->scratch[0]->encode, gte++); in gen6_ggtt_insert_entries()
290 ggtt->invalidate(ggtt); in gen6_ggtt_insert_entries()
304 const gen8_pte_t scratch_pte = vm->scratch[0]->encode; in gen8_ggtt_clear_range()
306 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; in gen8_ggtt_clear_range()
307 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen8_ggtt_clear_range()
328 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); in bxt_vtd_ggtt_wa()
342 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); in bxt_vtd_ggtt_insert_page__cb()
343 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_page__cb()
370 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); in bxt_vtd_ggtt_insert_entries__cb()
371 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_entries__cb()
393 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; in gen6_ggtt_clear_range()
394 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen6_ggtt_clear_range()
402 scratch_pte = vm->scratch[0]->encode; in gen6_ggtt_clear_range()
427 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, in i915_ggtt_insert_entries()
443 struct drm_i915_gem_object *obj = vma->obj; in ggtt_bind_vma()
454 vm->insert_entries(vm, vma, cache_level, pte_flags); in ggtt_bind_vma()
455 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; in ggtt_bind_vma()
460 vm->clear_range(vm, vma->node.start, vma->size); in ggtt_unbind_vma()
468 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) in ggtt_reserve_guc_top()
471 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); in ggtt_reserve_guc_top()
472 size = ggtt->vm.total - GUC_GGTT_TOP; in ggtt_reserve_guc_top()
474 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, in ggtt_reserve_guc_top()
478 drm_dbg(&ggtt->vm.i915->drm, in ggtt_reserve_guc_top()
486 if (drm_mm_node_allocated(&ggtt->uc_fw)) in ggtt_release_guc_top()
487 drm_mm_remove_node(&ggtt->uc_fw); in ggtt_release_guc_top()
493 if (drm_mm_node_allocated(&ggtt->error_capture)) in cleanup_init_ggtt()
494 drm_mm_remove_node(&ggtt->error_capture); in cleanup_init_ggtt()
495 mutex_destroy(&ggtt->error_mutex); in cleanup_init_ggtt()
516 * non-WOPCM memory. If GuC is not present or not in use we still need a in init_ggtt()
520 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, in init_ggtt()
521 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); in init_ggtt()
527 mutex_init(&ggtt->error_mutex); in init_ggtt()
528 if (ggtt->mappable_end) { in init_ggtt()
530 ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, in init_ggtt()
531 &ggtt->error_capture, in init_ggtt()
534 0, ggtt->mappable_end, in init_ggtt()
549 /* Clear any non-preallocated blocks */ in init_ggtt()
550 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { in init_ggtt()
551 drm_dbg_kms(&ggtt->vm.i915->drm, in init_ggtt()
554 ggtt->vm.clear_range(&ggtt->vm, hole_start, in init_ggtt()
555 hole_end - hole_start); in init_ggtt()
559 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); in init_ggtt()
578 if (i915_gem_object_is_readonly(vma->obj)) in aliasing_gtt_bind_vma()
582 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, in aliasing_gtt_bind_vma()
586 vm->insert_entries(vm, vma, cache_level, pte_flags); in aliasing_gtt_bind_vma()
593 vm->clear_range(vm, vma->node.start, vma->size); in aliasing_gtt_unbind_vma()
596 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma); in aliasing_gtt_unbind_vma()
605 ppgtt = i915_ppgtt_create(ggtt->vm.gt); in init_aliasing_ppgtt()
609 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { in init_aliasing_ppgtt()
610 err = -ENODEV; in init_aliasing_ppgtt()
614 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); in init_aliasing_ppgtt()
618 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
623 * Note we only pre-allocate as far as the end of the global in init_aliasing_ppgtt()
624 * GTT. On 48b / 4-level page-tables, the difference is very, in init_aliasing_ppgtt()
628 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); in init_aliasing_ppgtt()
630 ggtt->alias = ppgtt; in init_aliasing_ppgtt()
631 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; in init_aliasing_ppgtt()
633 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); in init_aliasing_ppgtt()
634 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; in init_aliasing_ppgtt()
636 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); in init_aliasing_ppgtt()
637 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; in init_aliasing_ppgtt()
639 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
643 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
645 i915_vm_put(&ppgtt->vm); in init_aliasing_ppgtt()
653 ppgtt = fetch_and_zero(&ggtt->alias); in fini_aliasing_ppgtt()
657 i915_vm_put(&ppgtt->vm); in fini_aliasing_ppgtt()
659 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in fini_aliasing_ppgtt()
660 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in fini_aliasing_ppgtt()
667 ret = init_ggtt(&i915->ggtt); in i915_init_ggtt()
672 ret = init_aliasing_ppgtt(&i915->ggtt); in i915_init_ggtt()
674 cleanup_init_ggtt(&i915->ggtt); in i915_init_ggtt()
684 atomic_set(&ggtt->vm.open, 0); in ggtt_cleanup_hw()
687 flush_workqueue(ggtt->vm.i915->wq); in ggtt_cleanup_hw()
689 mutex_lock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
691 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) in ggtt_cleanup_hw()
694 if (drm_mm_node_allocated(&ggtt->error_capture)) in ggtt_cleanup_hw()
695 drm_mm_remove_node(&ggtt->error_capture); in ggtt_cleanup_hw()
696 mutex_destroy(&ggtt->error_mutex); in ggtt_cleanup_hw()
701 ggtt->vm.cleanup(&ggtt->vm); in ggtt_cleanup_hw()
703 mutex_unlock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
704 i915_address_space_fini(&ggtt->vm); in ggtt_cleanup_hw()
706 arch_phys_wc_del(ggtt->mtrr); in ggtt_cleanup_hw()
708 if (ggtt->iomap.size) in ggtt_cleanup_hw()
709 io_mapping_fini(&ggtt->iomap); in ggtt_cleanup_hw()
713 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
718 struct i915_ggtt *ggtt = &i915->ggtt; in i915_ggtt_driver_release()
762 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_probe_common()
763 struct pci_dev *pdev = i915->drm.pdev; in ggtt_probe_common()
778 ggtt->gsm = ioremap(phys_addr, size); in ggtt_probe_common()
780 ggtt->gsm = ioremap_wc(phys_addr, size); in ggtt_probe_common()
781 if (!ggtt->gsm) { in ggtt_probe_common()
782 drm_err(&i915->drm, "Failed to map the ggtt page table\n"); in ggtt_probe_common()
783 return -ENOMEM; in ggtt_probe_common()
786 ret = setup_scratch_page(&ggtt->vm); in ggtt_probe_common()
788 drm_err(&i915->drm, "Scratch setup failed\n"); in ggtt_probe_common()
790 iounmap(ggtt->gsm); in ggtt_probe_common()
794 ggtt->vm.scratch[0]->encode = in ggtt_probe_common()
795 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), in ggtt_probe_common()
805 GEM_BUG_ON(vma->pages); in ggtt_set_pages()
811 vma->page_sizes = vma->obj->mm.page_sizes; in ggtt_set_pages()
820 iounmap(ggtt->gsm); in gen6_gmch_remove()
832 struct drm_i915_private *i915 = ggtt->vm.i915; in gen8_gmch_probe()
833 struct pci_dev *pdev = i915->drm.pdev; in gen8_gmch_probe()
839 ggtt->gmadr = pci_resource(pdev, 2); in gen8_gmch_probe()
840 ggtt->mappable_end = resource_size(&ggtt->gmadr); in gen8_gmch_probe()
849 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen8_gmch_probe()
851 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; in gen8_gmch_probe()
852 ggtt->vm.cleanup = gen6_gmch_remove; in gen8_gmch_probe()
853 ggtt->vm.insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
854 ggtt->vm.clear_range = nop_clear_range; in gen8_gmch_probe()
856 ggtt->vm.clear_range = gen8_ggtt_clear_range; in gen8_gmch_probe()
858 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
860 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ in gen8_gmch_probe()
863 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; in gen8_gmch_probe()
864 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; in gen8_gmch_probe()
865 ggtt->vm.bind_async_flags = in gen8_gmch_probe()
869 ggtt->invalidate = gen8_ggtt_invalidate; in gen8_gmch_probe()
871 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in gen8_gmch_probe()
872 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in gen8_gmch_probe()
873 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in gen8_gmch_probe()
874 ggtt->vm.vma_ops.clear_pages = clear_pages; in gen8_gmch_probe()
876 ggtt->vm.pte_encode = gen8_ggtt_pte_encode; in gen8_gmch_probe()
878 setup_private_pat(ggtt->vm.gt->uncore); in gen8_gmch_probe()
976 struct drm_i915_private *i915 = ggtt->vm.i915; in gen6_gmch_probe()
977 struct pci_dev *pdev = i915->drm.pdev; in gen6_gmch_probe()
981 ggtt->gmadr = pci_resource(pdev, 2); in gen6_gmch_probe()
982 ggtt->mappable_end = resource_size(&ggtt->gmadr); in gen6_gmch_probe()
988 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { in gen6_gmch_probe()
989 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n", in gen6_gmch_probe()
990 &ggtt->mappable_end); in gen6_gmch_probe()
991 return -ENXIO; in gen6_gmch_probe()
997 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; in gen6_gmch_probe()
999 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen6_gmch_probe()
1001 ggtt->vm.clear_range = nop_clear_range; in gen6_gmch_probe()
1003 ggtt->vm.clear_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
1004 ggtt->vm.insert_page = gen6_ggtt_insert_page; in gen6_gmch_probe()
1005 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; in gen6_gmch_probe()
1006 ggtt->vm.cleanup = gen6_gmch_remove; in gen6_gmch_probe()
1008 ggtt->invalidate = gen6_ggtt_invalidate; in gen6_gmch_probe()
1011 ggtt->vm.pte_encode = iris_pte_encode; in gen6_gmch_probe()
1013 ggtt->vm.pte_encode = hsw_pte_encode; in gen6_gmch_probe()
1015 ggtt->vm.pte_encode = byt_pte_encode; in gen6_gmch_probe()
1017 ggtt->vm.pte_encode = ivb_pte_encode; in gen6_gmch_probe()
1019 ggtt->vm.pte_encode = snb_pte_encode; in gen6_gmch_probe()
1021 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in gen6_gmch_probe()
1022 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in gen6_gmch_probe()
1023 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in gen6_gmch_probe()
1024 ggtt->vm.vma_ops.clear_pages = clear_pages; in gen6_gmch_probe()
1036 struct drm_i915_private *i915 = ggtt->vm.i915; in i915_gmch_probe()
1040 ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL); in i915_gmch_probe()
1042 drm_err(&i915->drm, "failed to set up gmch\n"); in i915_gmch_probe()
1043 return -EIO; in i915_gmch_probe()
1046 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); in i915_gmch_probe()
1048 ggtt->gmadr = in i915_gmch_probe()
1049 (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); in i915_gmch_probe()
1051 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in i915_gmch_probe()
1053 ggtt->do_idle_maps = needs_idle_maps(i915); in i915_gmch_probe()
1054 ggtt->vm.insert_page = i915_ggtt_insert_page; in i915_gmch_probe()
1055 ggtt->vm.insert_entries = i915_ggtt_insert_entries; in i915_gmch_probe()
1056 ggtt->vm.clear_range = i915_ggtt_clear_range; in i915_gmch_probe()
1057 ggtt->vm.cleanup = i915_gmch_remove; in i915_gmch_probe()
1059 ggtt->invalidate = gmch_ggtt_invalidate; in i915_gmch_probe()
1061 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in i915_gmch_probe()
1062 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in i915_gmch_probe()
1063 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in i915_gmch_probe()
1064 ggtt->vm.vma_ops.clear_pages = clear_pages; in i915_gmch_probe()
1066 if (unlikely(ggtt->do_idle_maps)) in i915_gmch_probe()
1067 drm_notice(&i915->drm, in i915_gmch_probe()
1075 struct drm_i915_private *i915 = gt->i915; in ggtt_probe_hw()
1078 ggtt->vm.gt = gt; in ggtt_probe_hw()
1079 ggtt->vm.i915 = i915; in ggtt_probe_hw()
1080 ggtt->vm.dma = &i915->drm.pdev->dev; in ggtt_probe_hw()
1091 if ((ggtt->vm.total - 1) >> 32) { in ggtt_probe_hw()
1092 drm_err(&i915->drm, in ggtt_probe_hw()
1095 ggtt->vm.total >> 20); in ggtt_probe_hw()
1096 ggtt->vm.total = 1ULL << 32; in ggtt_probe_hw()
1097 ggtt->mappable_end = in ggtt_probe_hw()
1098 min_t(u64, ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1101 if (ggtt->mappable_end > ggtt->vm.total) { in ggtt_probe_hw()
1102 drm_err(&i915->drm, in ggtt_probe_hw()
1105 &ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1106 ggtt->mappable_end = ggtt->vm.total; in ggtt_probe_hw()
1110 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); in ggtt_probe_hw()
1111 drm_dbg(&i915->drm, "GMADR size = %lluM\n", in ggtt_probe_hw()
1112 (u64)ggtt->mappable_end >> 20); in ggtt_probe_hw()
1113 drm_dbg(&i915->drm, "DSM size = %lluM\n", in ggtt_probe_hw()
1120 * i915_ggtt_probe_hw - Probe GGTT hardware location
1127 ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); in i915_ggtt_probe_hw()
1132 drm_info(&i915->drm, "VT-d active for gfx access\n"); in i915_ggtt_probe_hw()
1140 return -EIO; in i915_ggtt_enable_hw()
1147 GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); in i915_ggtt_enable_guc()
1149 ggtt->invalidate = guc_ggtt_invalidate; in i915_ggtt_enable_guc()
1151 ggtt->invalidate(ggtt); in i915_ggtt_enable_guc()
1157 if (ggtt->invalidate == gen8_ggtt_invalidate) in i915_ggtt_disable_guc()
1161 GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); in i915_ggtt_disable_guc()
1163 ggtt->invalidate = gen8_ggtt_invalidate; in i915_ggtt_disable_guc()
1165 ggtt->invalidate(ggtt); in i915_ggtt_disable_guc()
1174 intel_gt_check_and_clear_faults(ggtt->vm.gt); in i915_ggtt_resume()
1177 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_resume()
1180 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_resume()
1183 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_resume()
1184 struct drm_i915_gem_object *obj = vma->obj; in i915_ggtt_resume()
1186 atomic_read(&vma->flags) & I915_VMA_BIND_MASK; in i915_ggtt_resume()
1189 vma->ops->bind_vma(&ggtt->vm, NULL, vma, in i915_ggtt_resume()
1190 obj ? obj->cache_level : 0, in i915_ggtt_resume()
1193 flush |= fetch_and_zero(&obj->write_domain); in i915_ggtt_resume()
1194 obj->read_domains |= I915_GEM_DOMAIN_GTT; in i915_ggtt_resume()
1198 atomic_set(&ggtt->vm.open, open); in i915_ggtt_resume()
1199 ggtt->invalidate(ggtt); in i915_ggtt_resume()
1204 if (INTEL_GEN(ggtt->vm.i915) >= 8) in i915_ggtt_resume()
1205 setup_private_pat(ggtt->vm.gt->uncore); in i915_ggtt_resume()
1212 unsigned int width, unsigned int height, in rotate_pages() argument
1220 src_idx = stride * (height - 1) + column + offset; in rotate_pages()
1221 for (row = 0; row < height; row++) { in rotate_pages()
1222 st->nents++; in rotate_pages()
1233 src_idx -= stride; in rotate_pages()
1245 struct drm_i915_private *i915 = to_i915(obj->base.dev); in intel_rotate_pages()
1248 int ret = -ENOMEM; in intel_rotate_pages()
1260 st->nents = 0; in intel_rotate_pages()
1261 sg = st->sgl; in intel_rotate_pages()
1263 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { in intel_rotate_pages()
1264 sg = rotate_pages(obj, rot_info->plane[i].offset, in intel_rotate_pages()
1265 rot_info->plane[i].width, rot_info->plane[i].height, in intel_rotate_pages()
1266 rot_info->plane[i].stride, st, sg); in intel_rotate_pages()
1275 …drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)… in intel_rotate_pages()
1276 obj->base.size, rot_info->plane[0].width, in intel_rotate_pages()
1277 rot_info->plane[0].height, size); in intel_rotate_pages()
1284 unsigned int width, unsigned int height, in remap_pages() argument
1290 for (row = 0; row < height; row++) { in remap_pages()
1307 st->nents++; in remap_pages()
1315 left -= length; in remap_pages()
1318 offset += stride - width; in remap_pages()
1329 struct drm_i915_private *i915 = to_i915(obj->base.dev); in intel_remap_pages()
1332 int ret = -ENOMEM; in intel_remap_pages()
1344 st->nents = 0; in intel_remap_pages()
1345 sg = st->sgl; in intel_remap_pages()
1347 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { in intel_remap_pages()
1348 sg = remap_pages(obj, rem_info->plane[i].offset, in intel_remap_pages()
1349 rem_info->plane[i].width, rem_info->plane[i].height, in intel_remap_pages()
1350 rem_info->plane[i].stride, st, sg); in intel_remap_pages()
1361 …drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages… in intel_remap_pages()
1362 obj->base.size, rem_info->plane[0].width, in intel_remap_pages()
1363 rem_info->plane[0].height, size); in intel_remap_pages()
1374 unsigned int count = view->partial.size; in intel_partial_pages()
1376 int ret = -ENOMEM; in intel_partial_pages()
1386 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); in intel_partial_pages()
1389 sg = st->sgl; in intel_partial_pages()
1390 st->nents = 0; in intel_partial_pages()
1394 len = min(iter->length - (offset << PAGE_SHIFT), in intel_partial_pages()
1401 st->nents++; in intel_partial_pages()
1402 count -= len >> PAGE_SHIFT; in intel_partial_pages()
1427 * The vma->pages are only valid within the lifespan of the borrowed in i915_get_ggtt_vma_pages()
1428 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so in i915_get_ggtt_vma_pages()
1429 * must be the vma->pages. A simple rule is that vma->pages must only in i915_get_ggtt_vma_pages()
1430 * be accessed when the obj->mm.pages are pinned. in i915_get_ggtt_vma_pages()
1432 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); in i915_get_ggtt_vma_pages()
1434 switch (vma->ggtt_view.type) { in i915_get_ggtt_vma_pages()
1436 GEM_BUG_ON(vma->ggtt_view.type); in i915_get_ggtt_vma_pages()
1439 vma->pages = vma->obj->mm.pages; in i915_get_ggtt_vma_pages()
1443 vma->pages = in i915_get_ggtt_vma_pages()
1444 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); in i915_get_ggtt_vma_pages()
1448 vma->pages = in i915_get_ggtt_vma_pages()
1449 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); in i915_get_ggtt_vma_pages()
1453 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); in i915_get_ggtt_vma_pages()
1458 if (IS_ERR(vma->pages)) { in i915_get_ggtt_vma_pages()
1459 ret = PTR_ERR(vma->pages); in i915_get_ggtt_vma_pages()
1460 vma->pages = NULL; in i915_get_ggtt_vma_pages()
1461 drm_err(&vma->vm->i915->drm, in i915_get_ggtt_vma_pages()
1463 vma->ggtt_view.type, ret); in i915_get_ggtt_vma_pages()