Lines Matching +full:use +full:- +full:guard +full:- +full:pages

1 // SPDX-License-Identifier: MIT
13 #include <drm/intel/intel-gtt.h>
46 * GTT and any objects within the GTT, i.e. we use the color adjustment in i915_ggtt_color_adjust()
47 * to insert a guard page to prevent prefetches crossing over the in i915_ggtt_color_adjust()
51 if (node->color != color) in i915_ggtt_color_adjust()
52 *end -= I915_GTT_PAGE_SIZE; in i915_ggtt_color_adjust()
57 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw()
59 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
61 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
63 /* Only VLV supports read-only GGTT mappings */ in ggtt_init_hw()
64 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
67 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
69 if (ggtt->mappable_end) { in ggtt_init_hw()
70 if (!io_mapping_init_wc(&ggtt->iomap, in ggtt_init_hw()
71 ggtt->gmadr.start, in ggtt_init_hw()
72 ggtt->mappable_end)) { in ggtt_init_hw()
73 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
74 return -EIO; in ggtt_init_hw()
77 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, in ggtt_init_hw()
78 ggtt->mappable_end); in ggtt_init_hw()
87 * i915_ggtt_init_hw - Initialize GGTT hardware
95 * Note that we use page colouring to enforce a guard page at the in i915_ggtt_init_hw()
98 * and beyond the end of the GTT if we do not provide a guard. in i915_ggtt_init_hw()
100 ret = ggtt_init_hw(to_gt(i915)->ggtt); in i915_ggtt_init_hw()
108 * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
120 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_suspend_vm()
123 i915_gem_drain_freed_objects(vm->i915); in i915_ggtt_suspend_vm()
125 mutex_lock(&vm->mutex); in i915_ggtt_suspend_vm()
129 * FIXME: Use an argument to i915_vma_unbind() instead? in i915_ggtt_suspend_vm()
131 save_skip_rewrite = vm->skip_pte_rewrite; in i915_ggtt_suspend_vm()
132 vm->skip_pte_rewrite = true; in i915_ggtt_suspend_vm()
134 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { in i915_ggtt_suspend_vm()
135 struct drm_i915_gem_object *obj = vma->obj; in i915_ggtt_suspend_vm()
137 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_ggtt_suspend_vm()
150 mutex_unlock(&vm->mutex); in i915_ggtt_suspend_vm()
157 vm->skip_pte_rewrite = save_skip_rewrite; in i915_ggtt_suspend_vm()
165 drm_mm_remove_node(&vma->node); in i915_ggtt_suspend_vm()
171 vm->clear_range(vm, 0, vm->total); in i915_ggtt_suspend_vm()
173 vm->skip_pte_rewrite = save_skip_rewrite; in i915_ggtt_suspend_vm()
175 mutex_unlock(&vm->mutex); in i915_ggtt_suspend_vm()
177 drm_WARN_ON(&vm->i915->drm, evict_all && !list_empty(&vm->bound_list)); in i915_ggtt_suspend_vm()
184 i915_ggtt_suspend_vm(&ggtt->vm, false); in i915_ggtt_suspend()
185 ggtt->invalidate(ggtt); in i915_ggtt_suspend()
187 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) in i915_ggtt_suspend()
193 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen6_ggtt_invalidate()
195 spin_lock_irq(&uncore->lock); in gen6_ggtt_invalidate()
198 spin_unlock_irq(&uncore->lock); in gen6_ggtt_invalidate()
206 * writes when the WC buffer is flushed, so we can't use it, but have to in needs_wc_ggtt_mapping()
218 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen8_ggtt_invalidate()
226 if (needs_wc_ggtt_mapping(ggtt->vm.i915)) in gen8_ggtt_invalidate()
233 struct intel_uncore *uncore = gt->uncore; in guc_ggtt_ct_invalidate()
236 with_intel_runtime_pm_if_active(uncore->rpm, wakeref) in guc_ggtt_ct_invalidate()
242 struct drm_i915_private *i915 = ggtt->vm.i915; in guc_ggtt_invalidate()
247 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) { in guc_ggtt_invalidate()
251 intel_uncore_write_fw(gt->uncore, in guc_ggtt_invalidate()
255 intel_uncore_write_fw(gt->uncore, in guc_ggtt_invalidate()
294 struct intel_gt *gt = ggtt->vm.gt; in should_update_ggtt_with_bind()
302 struct intel_gt *gt = ggtt->vm.gt; in gen8_ggtt_bind_get_ce()
307 ce = gt->engine[BCS0]->bind_context; in gen8_ggtt_bind_get_ce()
320 intel_engine_pm_get(ce->engine); in gen8_ggtt_bind_get_ce()
327 intel_engine_pm_put(ce->engine); in gen8_ggtt_bind_put_ce()
328 intel_gt_pm_put(ce->engine->gt, wakeref); in gen8_ggtt_bind_put_ce()
332 struct sg_table *pages, u32 num_entries, in gen8_ggtt_bind_ptes() argument
336 struct intel_gt *gt = ggtt->vm.gt; in gen8_ggtt_bind_ptes()
337 const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode; in gen8_ggtt_bind_ptes()
351 if (pages) in gen8_ggtt_bind_ptes()
352 iter = __sgt_iter(pages->sgl, true); in gen8_ggtt_bind_ptes()
363 if (mutex_lock_interruptible(&ce->timeline->mutex)) in gen8_ggtt_bind_ptes()
371 mutex_unlock(&ce->timeline->mutex); in gen8_ggtt_bind_ptes()
386 if (pages) { in gen8_ggtt_bind_ptes()
397 n_ptes - count); in gen8_ggtt_bind_ptes()
398 cs += (n_ptes - count) * 2; in gen8_ggtt_bind_ptes()
411 mutex_unlock(&ce->timeline->mutex); in gen8_ggtt_bind_ptes()
414 if (rq->fence.error) in gen8_ggtt_bind_ptes()
419 num_entries -= n_ptes; in gen8_ggtt_bind_ptes()
446 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_page()
448 gen8_set_pte(pte, ggtt->vm.pte_encode(addr, pat_index, flags)); in gen8_ggtt_insert_page()
450 ggtt->invalidate(ggtt); in gen8_ggtt_insert_page()
460 pte = ggtt->vm.pte_encode(addr, pat_index, flags); in gen8_ggtt_insert_page_bind()
463 return ggtt->invalidate(ggtt); in gen8_ggtt_insert_page_bind()
474 const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); in gen8_ggtt_insert_entries()
485 gte = (gen8_pte_t __iomem *)ggtt->gsm; in gen8_ggtt_insert_entries()
486 gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_entries()
487 end = gte + vma_res->guard / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_entries()
489 gen8_set_pte(gte++, vm->scratch[0]->encode); in gen8_ggtt_insert_entries()
490 end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_entries()
492 for_each_sgt_daddr(addr, iter, vma_res->bi.pages) in gen8_ggtt_insert_entries()
498 gen8_set_pte(gte++, vm->scratch[0]->encode); in gen8_ggtt_insert_entries()
504 ggtt->invalidate(ggtt); in gen8_ggtt_insert_entries()
512 gen8_pte_t scratch_pte = vm->scratch[0]->encode; in __gen8_ggtt_insert_entries_bind()
516 pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); in __gen8_ggtt_insert_entries_bind()
517 start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; in __gen8_ggtt_insert_entries_bind()
518 end = start + vma_res->guard / I915_GTT_PAGE_SIZE; in __gen8_ggtt_insert_entries_bind()
519 if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte)) in __gen8_ggtt_insert_entries_bind()
523 end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; in __gen8_ggtt_insert_entries_bind()
524 if (!gen8_ggtt_bind_ptes(ggtt, start, vma_res->bi.pages, in __gen8_ggtt_insert_entries_bind()
525 vma_res->node_size / I915_GTT_PAGE_SIZE, pte_encode)) in __gen8_ggtt_insert_entries_bind()
528 start += vma_res->node_size / I915_GTT_PAGE_SIZE; in __gen8_ggtt_insert_entries_bind()
529 if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte)) in __gen8_ggtt_insert_entries_bind()
546 return ggtt->invalidate(ggtt); in gen8_ggtt_insert_entries_bind()
557 const gen8_pte_t scratch_pte = vm->scratch[0]->encode; in gen8_ggtt_clear_range()
559 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; in gen8_ggtt_clear_range()
560 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen8_ggtt_clear_range()
578 const gen8_pte_t scratch_pte = vm->scratch[0]->encode; in gen8_ggtt_scratch_range_bind()
579 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen8_ggtt_scratch_range_bind()
588 return ggtt->invalidate(ggtt); in gen8_ggtt_scratch_range_bind()
601 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_page()
603 iowrite32(vm->pte_encode(addr, pat_index, flags), pte); in gen6_ggtt_insert_page()
605 ggtt->invalidate(ggtt); in gen6_ggtt_insert_page()
612 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
625 gte = (gen6_pte_t __iomem *)ggtt->gsm; in gen6_ggtt_insert_entries()
626 gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_entries()
628 end = gte + vma_res->guard / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_entries()
630 iowrite32(vm->scratch[0]->encode, gte++); in gen6_ggtt_insert_entries()
631 end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_entries()
632 for_each_sgt_daddr(addr, iter, vma_res->bi.pages) in gen6_ggtt_insert_entries()
633 iowrite32(vm->pte_encode(addr, pat_index, flags), gte++); in gen6_ggtt_insert_entries()
638 iowrite32(vm->scratch[0]->encode, gte++); in gen6_ggtt_insert_entries()
644 ggtt->invalidate(ggtt); in gen6_ggtt_insert_entries()
661 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); in bxt_vtd_ggtt_wa()
675 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, in bxt_vtd_ggtt_insert_page__cb()
676 arg->pat_index, 0); in bxt_vtd_ggtt_insert_page__cb()
677 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_page__cb()
704 gen8_ggtt_insert_entries(arg->vm, arg->vma_res, in bxt_vtd_ggtt_insert_entries__cb()
705 arg->pat_index, arg->flags); in bxt_vtd_ggtt_insert_entries__cb()
706 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_entries__cb()
728 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; in gen6_ggtt_clear_range()
729 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen6_ggtt_clear_range()
737 scratch_pte = vm->scratch[0]->encode; in gen6_ggtt_clear_range()
750 if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK)) in intel_ggtt_bind_vma()
753 vma_res->bound_flags |= flags; in intel_ggtt_bind_vma()
757 if (vma_res->bi.readonly) in intel_ggtt_bind_vma()
759 if (vma_res->bi.lmem) in intel_ggtt_bind_vma()
762 vm->insert_entries(vm, vma_res, pat_index, pte_flags); in intel_ggtt_bind_vma()
763 vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE; in intel_ggtt_bind_vma()
769 vm->clear_range(vm, vma_res->start, vma_res->vma_size); in intel_ggtt_unbind_vma()
776 * size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT
781 #define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
788 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) in ggtt_reserve_guc_top()
791 GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE); in ggtt_reserve_guc_top()
792 offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE; in ggtt_reserve_guc_top()
794 ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, in ggtt_reserve_guc_top()
798 drm_dbg(&ggtt->vm.i915->drm, in ggtt_reserve_guc_top()
806 if (drm_mm_node_allocated(&ggtt->uc_fw)) in ggtt_release_guc_top()
807 drm_mm_remove_node(&ggtt->uc_fw); in ggtt_release_guc_top()
813 if (drm_mm_node_allocated(&ggtt->error_capture)) in cleanup_init_ggtt()
814 drm_mm_remove_node(&ggtt->error_capture); in cleanup_init_ggtt()
815 mutex_destroy(&ggtt->error_mutex); in cleanup_init_ggtt()
836 * non-WOPCM memory. If GuC is not present or not in use we still need a in init_ggtt()
840 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, in init_ggtt()
841 intel_wopcm_guc_size(&ggtt->vm.gt->wopcm)); in init_ggtt()
847 mutex_init(&ggtt->error_mutex); in init_ggtt()
848 if (ggtt->mappable_end) { in init_ggtt()
856 * Since 0 may already be in use by the system (e.g. the BIOS in init_ggtt()
861 * for an error-capture, remain silent. We can afford not in init_ggtt()
868 * (write-combining allows it) add scratch page after error in init_ggtt()
871 ggtt->error_capture.size = 2 * I915_GTT_PAGE_SIZE; in init_ggtt()
872 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE; in init_ggtt()
873 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) in init_ggtt()
874 drm_mm_insert_node_in_range(&ggtt->vm.mm, in init_ggtt()
875 &ggtt->error_capture, in init_ggtt()
876 ggtt->error_capture.size, 0, in init_ggtt()
877 ggtt->error_capture.color, in init_ggtt()
878 0, ggtt->mappable_end, in init_ggtt()
881 if (drm_mm_node_allocated(&ggtt->error_capture)) { in init_ggtt()
882 u64 start = ggtt->error_capture.start; in init_ggtt()
883 u64 size = ggtt->error_capture.size; in init_ggtt()
885 ggtt->vm.scratch_range(&ggtt->vm, start, size); in init_ggtt()
886 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
887 "Reserved GGTT:[%llx, %llx] for use by error capture\n", in init_ggtt()
900 /* Clear any non-preallocated blocks */ in init_ggtt()
901 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { in init_ggtt()
902 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
905 ggtt->vm.clear_range(&ggtt->vm, hole_start, in init_ggtt()
906 hole_end - hole_start); in init_ggtt()
909 /* And finally clear the reserved guard page */ in init_ggtt()
910 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); in init_ggtt()
929 if (vma_res->bi.readonly) in aliasing_gtt_bind_vma()
933 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, in aliasing_gtt_bind_vma()
937 vm->insert_entries(vm, vma_res, pat_index, pte_flags); in aliasing_gtt_bind_vma()
939 vma_res->bound_flags |= flags; in aliasing_gtt_bind_vma()
945 if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND) in aliasing_gtt_unbind_vma()
946 vm->clear_range(vm, vma_res->start, vma_res->vma_size); in aliasing_gtt_unbind_vma()
948 if (vma_res->bound_flags & I915_VMA_LOCAL_BIND) in aliasing_gtt_unbind_vma()
949 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res); in aliasing_gtt_unbind_vma()
958 ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0); in init_aliasing_ppgtt()
962 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { in init_aliasing_ppgtt()
963 err = -ENODEV; in init_aliasing_ppgtt()
967 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); in init_aliasing_ppgtt()
971 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL); in init_aliasing_ppgtt()
972 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
973 i915_gem_object_unlock(ppgtt->vm.scratch[0]); in init_aliasing_ppgtt()
978 * Note we only pre-allocate as far as the end of the global in init_aliasing_ppgtt()
979 * GTT. On 48b / 4-level page-tables, the difference is very, in init_aliasing_ppgtt()
983 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); in init_aliasing_ppgtt()
985 ggtt->alias = ppgtt; in init_aliasing_ppgtt()
986 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; in init_aliasing_ppgtt()
988 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma); in init_aliasing_ppgtt()
989 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; in init_aliasing_ppgtt()
991 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma); in init_aliasing_ppgtt()
992 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; in init_aliasing_ppgtt()
994 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
998 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
1000 i915_vm_put(&ppgtt->vm); in init_aliasing_ppgtt()
1008 ppgtt = fetch_and_zero(&ggtt->alias); in fini_aliasing_ppgtt()
1012 i915_vm_put(&ppgtt->vm); in fini_aliasing_ppgtt()
1014 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in fini_aliasing_ppgtt()
1015 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in fini_aliasing_ppgtt()
1022 ret = init_ggtt(to_gt(i915)->ggtt); in i915_init_ggtt()
1027 ret = init_aliasing_ppgtt(to_gt(i915)->ggtt); in i915_init_ggtt()
1029 cleanup_init_ggtt(to_gt(i915)->ggtt); in i915_init_ggtt()
1039 flush_workqueue(ggtt->vm.i915->wq); in ggtt_cleanup_hw()
1040 i915_gem_drain_freed_objects(ggtt->vm.i915); in ggtt_cleanup_hw()
1042 mutex_lock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
1044 ggtt->vm.skip_pte_rewrite = true; in ggtt_cleanup_hw()
1046 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in ggtt_cleanup_hw()
1047 struct drm_i915_gem_object *obj = vma->obj; in ggtt_cleanup_hw()
1058 if (drm_mm_node_allocated(&ggtt->error_capture)) in ggtt_cleanup_hw()
1059 drm_mm_remove_node(&ggtt->error_capture); in ggtt_cleanup_hw()
1060 mutex_destroy(&ggtt->error_mutex); in ggtt_cleanup_hw()
1065 ggtt->vm.cleanup(&ggtt->vm); in ggtt_cleanup_hw()
1067 mutex_unlock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
1068 i915_address_space_fini(&ggtt->vm); in ggtt_cleanup_hw()
1070 arch_phys_wc_del(ggtt->mtrr); in ggtt_cleanup_hw()
1072 if (ggtt->iomap.size) in ggtt_cleanup_hw()
1073 io_mapping_fini(&ggtt->iomap); in ggtt_cleanup_hw()
1077 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
1082 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; in i915_ggtt_driver_release()
1091 * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
1097 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; in i915_ggtt_driver_late_release()
1099 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1); in i915_ggtt_driver_late_release()
1100 dma_resv_fini(&ggtt->vm._resv); in i915_ggtt_driver_late_release()
1154 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_probe_common()
1155 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in ggtt_probe_common()
1156 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); in ggtt_probe_common()
1164 drm_dbg(&i915->drm, "Using direct GSM access\n"); in ggtt_probe_common()
1171 ggtt->gsm = ioremap_wc(phys_addr, size); in ggtt_probe_common()
1173 ggtt->gsm = ioremap(phys_addr, size); in ggtt_probe_common()
1175 if (!ggtt->gsm) { in ggtt_probe_common()
1176 drm_err(&i915->drm, "Failed to map the ggtt page table\n"); in ggtt_probe_common()
1177 return -ENOMEM; in ggtt_probe_common()
1180 kref_init(&ggtt->vm.resv_ref); in ggtt_probe_common()
1181 ret = setup_scratch_page(&ggtt->vm); in ggtt_probe_common()
1183 drm_err(&i915->drm, "Scratch setup failed\n"); in ggtt_probe_common()
1185 iounmap(ggtt->gsm); in ggtt_probe_common()
1190 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0])) in ggtt_probe_common()
1193 ggtt->vm.scratch[0]->encode = in ggtt_probe_common()
1194 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), in ggtt_probe_common()
1206 iounmap(ggtt->gsm); in gen6_gmch_remove()
1218 struct drm_i915_private *i915 = ggtt->vm.i915; in gen8_gmch_probe()
1219 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); in gen8_gmch_probe()
1225 return -ENXIO; in gen8_gmch_probe()
1227 ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR); in gen8_gmch_probe()
1228 ggtt->mappable_end = resource_size(&ggtt->gmadr); in gen8_gmch_probe()
1237 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen8_gmch_probe()
1238 ggtt->vm.alloc_scratch_dma = alloc_pt_dma; in gen8_gmch_probe()
1239 ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY; in gen8_gmch_probe()
1241 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; in gen8_gmch_probe()
1242 ggtt->vm.cleanup = gen6_gmch_remove; in gen8_gmch_probe()
1243 ggtt->vm.insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
1244 ggtt->vm.clear_range = nop_clear_range; in gen8_gmch_probe()
1245 ggtt->vm.scratch_range = gen8_ggtt_clear_range; in gen8_gmch_probe()
1247 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
1250 * Serialize GTT updates with aperture access on BXT if VT-d is on, in gen8_gmch_probe()
1254 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; in gen8_gmch_probe()
1255 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; in gen8_gmch_probe()
1263 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
1264 ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
1266 ggtt->vm.bind_async_flags = in gen8_gmch_probe()
1271 ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind; in gen8_gmch_probe()
1272 ggtt->vm.insert_page = gen8_ggtt_insert_page_bind; in gen8_gmch_probe()
1273 ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind; in gen8_gmch_probe()
1278 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
1281 if (intel_uc_wants_guc_submission(&ggtt->vm.gt->uc)) in gen8_gmch_probe()
1282 ggtt->invalidate = guc_ggtt_invalidate; in gen8_gmch_probe()
1284 ggtt->invalidate = gen8_ggtt_invalidate; in gen8_gmch_probe()
1286 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in gen8_gmch_probe()
1287 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in gen8_gmch_probe()
1290 ggtt->vm.pte_encode = mtl_ggtt_pte_encode; in gen8_gmch_probe()
1292 ggtt->vm.pte_encode = gen8_ggtt_pte_encode; in gen8_gmch_probe()
1298 * For pre-gen8 platforms pat_index is the same as enum i915_cache_level,
1299 * so the switch-case statements in these PTE encode functions are still valid.
1395 struct drm_i915_private *i915 = ggtt->vm.i915; in gen6_gmch_probe()
1396 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); in gen6_gmch_probe()
1401 return -ENXIO; in gen6_gmch_probe()
1403 ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR); in gen6_gmch_probe()
1404 ggtt->mappable_end = resource_size(&ggtt->gmadr); in gen6_gmch_probe()
1410 if (ggtt->mappable_end < (64 << 20) || in gen6_gmch_probe()
1411 ggtt->mappable_end > (512 << 20)) { in gen6_gmch_probe()
1412 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n", in gen6_gmch_probe()
1413 &ggtt->mappable_end); in gen6_gmch_probe()
1414 return -ENXIO; in gen6_gmch_probe()
1420 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; in gen6_gmch_probe()
1422 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen6_gmch_probe()
1423 ggtt->vm.alloc_scratch_dma = alloc_pt_dma; in gen6_gmch_probe()
1425 ggtt->vm.clear_range = nop_clear_range; in gen6_gmch_probe()
1427 ggtt->vm.clear_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
1428 ggtt->vm.scratch_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
1429 ggtt->vm.insert_page = gen6_ggtt_insert_page; in gen6_gmch_probe()
1430 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; in gen6_gmch_probe()
1431 ggtt->vm.cleanup = gen6_gmch_remove; in gen6_gmch_probe()
1433 ggtt->invalidate = gen6_ggtt_invalidate; in gen6_gmch_probe()
1436 ggtt->vm.pte_encode = iris_pte_encode; in gen6_gmch_probe()
1438 ggtt->vm.pte_encode = hsw_pte_encode; in gen6_gmch_probe()
1440 ggtt->vm.pte_encode = byt_pte_encode; in gen6_gmch_probe()
1442 ggtt->vm.pte_encode = ivb_pte_encode; in gen6_gmch_probe()
1444 ggtt->vm.pte_encode = snb_pte_encode; in gen6_gmch_probe()
1446 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in gen6_gmch_probe()
1447 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in gen6_gmch_probe()
1454 struct drm_i915_private *i915 = gt->i915; in ggtt_probe_hw()
1457 ggtt->vm.gt = gt; in ggtt_probe_hw()
1458 ggtt->vm.i915 = i915; in ggtt_probe_hw()
1459 ggtt->vm.dma = i915->drm.dev; in ggtt_probe_hw()
1460 dma_resv_init(&ggtt->vm._resv); in ggtt_probe_hw()
1470 dma_resv_fini(&ggtt->vm._resv); in ggtt_probe_hw()
1474 if ((ggtt->vm.total - 1) >> 32) { in ggtt_probe_hw()
1475 drm_err(&i915->drm, in ggtt_probe_hw()
1478 ggtt->vm.total >> 20); in ggtt_probe_hw()
1479 ggtt->vm.total = 1ULL << 32; in ggtt_probe_hw()
1480 ggtt->mappable_end = in ggtt_probe_hw()
1481 min_t(u64, ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1484 if (ggtt->mappable_end > ggtt->vm.total) { in ggtt_probe_hw()
1485 drm_err(&i915->drm, in ggtt_probe_hw()
1488 &ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1489 ggtt->mappable_end = ggtt->vm.total; in ggtt_probe_hw()
1493 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); in ggtt_probe_hw()
1494 drm_dbg(&i915->drm, "GMADR size = %lluM\n", in ggtt_probe_hw()
1495 (u64)ggtt->mappable_end >> 20); in ggtt_probe_hw()
1496 drm_dbg(&i915->drm, "DSM size = %lluM\n", in ggtt_probe_hw()
1503 * i915_ggtt_probe_hw - Probe GGTT hardware location
1517 ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915)); in i915_ggtt_probe_hw()
1522 drm_info(&i915->drm, "VT-d active for gfx access\n"); in i915_ggtt_probe_hw()
1531 ggtt = drmm_kzalloc(&i915->drm, sizeof(*ggtt), GFP_KERNEL); in i915_ggtt_create()
1533 return ERR_PTR(-ENOMEM); in i915_ggtt_create()
1535 INIT_LIST_HEAD(&ggtt->gt_list); in i915_ggtt_create()
1549 * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
1564 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_resume_vm()
1567 drm_WARN_ON(&vm->i915->drm, !list_empty(&vm->bound_list)); in i915_ggtt_resume_vm()
1571 /* First fill our portion of the GTT with scratch pages */ in i915_ggtt_resume_vm()
1572 vm->clear_range(vm, 0, vm->total); in i915_ggtt_resume_vm()
1575 list_for_each_entry(vma, &vm->bound_list, vm_link) { in i915_ggtt_resume_vm()
1576 struct drm_i915_gem_object *obj = vma->obj; in i915_ggtt_resume_vm()
1578 atomic_read(&vma->flags) & I915_VMA_BIND_MASK; in i915_ggtt_resume_vm()
1586 vma->resource->bound_flags = 0; in i915_ggtt_resume_vm()
1587 vma->ops->bind_vma(vm, NULL, vma->resource, in i915_ggtt_resume_vm()
1588 obj ? obj->pat_index : in i915_ggtt_resume_vm()
1589 i915_gem_get_pat_index(vm->i915, in i915_ggtt_resume_vm()
1594 write_domain_objs |= fetch_and_zero(&obj->write_domain); in i915_ggtt_resume_vm()
1595 obj->read_domains |= I915_GEM_DOMAIN_GTT; in i915_ggtt_resume_vm()
1607 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) in i915_ggtt_resume()
1610 flush = i915_ggtt_resume_vm(&ggtt->vm, false); in i915_ggtt_resume()
1612 if (drm_mm_node_allocated(&ggtt->error_capture)) in i915_ggtt_resume()
1613 ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start, in i915_ggtt_resume()
1614 ggtt->error_capture.size); in i915_ggtt_resume()
1616 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) in i915_ggtt_resume()
1617 intel_uc_resume_mappings(&gt->uc); in i915_ggtt_resume()
1619 ggtt->invalidate(ggtt); in i915_ggtt_resume()