xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_stolen.c (revision 03c7918d0d52378d215712ff66c06a980a2119ab)
19797fbfbSChris Wilson /*
210be98a7SChris Wilson  * SPDX-License-Identifier: MIT
310be98a7SChris Wilson  *
49797fbfbSChris Wilson  * Copyright © 2008-2012 Intel Corporation
59797fbfbSChris Wilson  */
69797fbfbSChris Wilson 
710be98a7SChris Wilson #include <linux/errno.h>
810be98a7SChris Wilson #include <linux/mutex.h>
910be98a7SChris Wilson 
1010be98a7SChris Wilson #include <drm/drm_mm.h>
1103c7918dSJani Nikula #include <drm/intel/i915_drm.h>
1210be98a7SChris Wilson 
13d57d4a1dSCQ Tang #include "gem/i915_gem_lmem.h"
1472405c3dSMatthew Auld #include "gem/i915_gem_region.h"
15b8ca8fefSAkeem G Abodunrin #include "gt/intel_gt.h"
16e7858254SMatt Roper #include "gt/intel_gt_mcr.h"
178524bb67SMatt Roper #include "gt/intel_gt_regs.h"
18b8ca8fefSAkeem G Abodunrin #include "gt/intel_region_lmem.h"
199797fbfbSChris Wilson #include "i915_drv.h"
206401fafbSJani Nikula #include "i915_gem_stolen.h"
211bba7323SPiotr Piórkowski #include "i915_pci.h"
22ce2fce25SMatt Roper #include "i915_reg.h"
23a7f46d5bSTvrtko Ursulin #include "i915_utils.h"
249e859eb9SJani Nikula #include "i915_vgpu.h"
25e30e6c7bSMatt Roper #include "intel_mchbar_regs.h"
266bba2b30SPiotr Piórkowski #include "intel_pci_config.h"
279797fbfbSChris Wilson 
289797fbfbSChris Wilson /*
299797fbfbSChris Wilson  * The BIOS typically reserves some of the system's memory for the exclusive
309797fbfbSChris Wilson  * use of the integrated graphics. This memory is no longer available for
319797fbfbSChris Wilson  * use by the OS and so the user finds that his system has less memory
329797fbfbSChris Wilson  * available than he put in. We refer to this memory as stolen.
339797fbfbSChris Wilson  *
349797fbfbSChris Wilson  * The BIOS will allocate its framebuffer from the stolen memory. Our
359797fbfbSChris Wilson  * goal is try to reuse that object for our own fbcon which must always
369797fbfbSChris Wilson  * be available for panics. Anything else we can reuse the stolen memory
379797fbfbSChris Wilson  * for is a boon.
389797fbfbSChris Wilson  */
399797fbfbSChris Wilson 
40bdce2beaSChris Wilson int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
41d713fd49SPaulo Zanoni 					 struct drm_mm_node *node, u64 size,
42a9da512bSPaulo Zanoni 					 unsigned alignment, u64 start, u64 end)
43d713fd49SPaulo Zanoni {
4492e97d2fSPaulo Zanoni 	int ret;
4592e97d2fSPaulo Zanoni 
46bdce2beaSChris Wilson 	if (!drm_mm_initialized(&i915->mm.stolen))
47d713fd49SPaulo Zanoni 		return -ENODEV;
48d713fd49SPaulo Zanoni 
49011f22ebSHans de Goede 	/* WaSkipStolenMemoryFirstPage:bdw+ */
5040e1956eSLucas De Marchi 	if (GRAPHICS_VER(i915) >= 8 && start < 4096)
51011f22ebSHans de Goede 		start = 4096;
52011f22ebSHans de Goede 
53bdce2beaSChris Wilson 	mutex_lock(&i915->mm.stolen_lock);
54bdce2beaSChris Wilson 	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
554e64e553SChris Wilson 					  size, alignment, 0,
564e64e553SChris Wilson 					  start, end, DRM_MM_INSERT_BEST);
57bdce2beaSChris Wilson 	mutex_unlock(&i915->mm.stolen_lock);
5892e97d2fSPaulo Zanoni 
5992e97d2fSPaulo Zanoni 	return ret;
60d713fd49SPaulo Zanoni }
61d713fd49SPaulo Zanoni 
62bdce2beaSChris Wilson int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
63a9da512bSPaulo Zanoni 				struct drm_mm_node *node, u64 size,
64a9da512bSPaulo Zanoni 				unsigned alignment)
65a9da512bSPaulo Zanoni {
663da3c5c1SChris Wilson 	return i915_gem_stolen_insert_node_in_range(i915, node,
673da3c5c1SChris Wilson 						    size, alignment,
683da3c5c1SChris Wilson 						    I915_GEM_STOLEN_BIAS,
693da3c5c1SChris Wilson 						    U64_MAX);
70a9da512bSPaulo Zanoni }
71a9da512bSPaulo Zanoni 
72bdce2beaSChris Wilson void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
73d713fd49SPaulo Zanoni 				 struct drm_mm_node *node)
74d713fd49SPaulo Zanoni {
75bdce2beaSChris Wilson 	mutex_lock(&i915->mm.stolen_lock);
76d713fd49SPaulo Zanoni 	drm_mm_remove_node(node);
77bdce2beaSChris Wilson 	mutex_unlock(&i915->mm.stolen_lock);
78d713fd49SPaulo Zanoni }
79d713fd49SPaulo Zanoni 
80dbb2ffbfSAravind Iddamsetty static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
813d99597cSLucas De Marchi {
8203eababbSVille Syrjälä 	return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
833d99597cSLucas De Marchi }
843d99597cSLucas De Marchi 
853d99597cSLucas De Marchi static int adjust_stolen(struct drm_i915_private *i915,
8677894226SMatthew Auld 			 struct resource *dsm)
879797fbfbSChris Wilson {
885c24c9d2SMichał Winiarski 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
89bdce2beaSChris Wilson 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
909797fbfbSChris Wilson 
91dbb2ffbfSAravind Iddamsetty 	if (!valid_stolen_size(i915, dsm))
9277894226SMatthew Auld 		return -EINVAL;
939797fbfbSChris Wilson 
9477894226SMatthew Auld 	/*
953d99597cSLucas De Marchi 	 * Make sure we don't clobber the GTT if it's within stolen memory
963d99597cSLucas De Marchi 	 *
9777894226SMatthew Auld 	 * TODO: We have yet too encounter the case where the GTT wasn't at the
9877894226SMatthew Auld 	 * end of stolen. With that assumption we could simplify this.
9977894226SMatthew Auld 	 */
10040e1956eSLucas De Marchi 	if (GRAPHICS_VER(i915) <= 4 &&
101bdce2beaSChris Wilson 	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
10277894226SMatthew Auld 		struct resource stolen[2] = {*dsm, *dsm};
10377894226SMatthew Auld 		struct resource ggtt_res;
104b7128ef1SMatthew Auld 		resource_size_t ggtt_start;
105f1e1c212SVille Syrjälä 
106bdce2beaSChris Wilson 		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
10740e1956eSLucas De Marchi 		if (GRAPHICS_VER(i915) == 4)
10872e96d64SJoonas Lahtinen 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
10972e96d64SJoonas Lahtinen 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
110f1e1c212SVille Syrjälä 		else
11172e96d64SJoonas Lahtinen 			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
112f1e1c212SVille Syrjälä 
113e5405178SJani Nikula 		ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4);
114f1e1c212SVille Syrjälä 
11577894226SMatthew Auld 		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
11677894226SMatthew Auld 			stolen[0].end = ggtt_res.start;
11777894226SMatthew Auld 		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
11877894226SMatthew Auld 			stolen[1].start = ggtt_res.end;
11977894226SMatthew Auld 
12077894226SMatthew Auld 		/* Pick the larger of the two chunks */
12177894226SMatthew Auld 		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
12277894226SMatthew Auld 			*dsm = stolen[0];
12377894226SMatthew Auld 		else
12477894226SMatthew Auld 			*dsm = stolen[1];
125f1e1c212SVille Syrjälä 
126f1e1c212SVille Syrjälä 		if (stolen[0].start != stolen[1].start ||
127f1e1c212SVille Syrjälä 		    stolen[0].end != stolen[1].end) {
128baa89ba3SWambui Karuga 			drm_dbg(&i915->drm,
129baa89ba3SWambui Karuga 				"GTT within stolen memory at %pR\n",
130baa89ba3SWambui Karuga 				&ggtt_res);
131baa89ba3SWambui Karuga 			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
132baa89ba3SWambui Karuga 				dsm);
133f1e1c212SVille Syrjälä 		}
134f1e1c212SVille Syrjälä 	}
135f1e1c212SVille Syrjälä 
136dbb2ffbfSAravind Iddamsetty 	if (!valid_stolen_size(i915, dsm))
1373d99597cSLucas De Marchi 		return -EINVAL;
1383d99597cSLucas De Marchi 
1393d99597cSLucas De Marchi 	return 0;
1403d99597cSLucas De Marchi }
1413d99597cSLucas De Marchi 
1423d99597cSLucas De Marchi static int request_smem_stolen(struct drm_i915_private *i915,
1433d99597cSLucas De Marchi 			       struct resource *dsm)
1443d99597cSLucas De Marchi {
1453d99597cSLucas De Marchi 	struct resource *r;
1463d99597cSLucas De Marchi 
14777894226SMatthew Auld 	/*
1483d99597cSLucas De Marchi 	 * With stolen lmem, we don't need to request system memory for the
1493d99597cSLucas De Marchi 	 * address range since it's local to the gpu.
150dbb2ffbfSAravind Iddamsetty 	 *
151dbb2ffbfSAravind Iddamsetty 	 * Starting MTL, in IGFX devices the stolen memory is exposed via
15203eababbSVille Syrjälä 	 * LMEMBAR and shall be considered similar to stolen lmem.
153d57d4a1dSCQ Tang 	 */
15403eababbSVille Syrjälä 	if (HAS_LMEM(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
155d57d4a1dSCQ Tang 		return 0;
156d57d4a1dSCQ Tang 
157d57d4a1dSCQ Tang 	/*
15877894226SMatthew Auld 	 * Verify that nothing else uses this physical address. Stolen
159eaba1b8fSChris Wilson 	 * memory should be reserved by the BIOS and hidden from the
160eaba1b8fSChris Wilson 	 * kernel. So if the region is already marked as busy, something
161eaba1b8fSChris Wilson 	 * is seriously wrong.
162eaba1b8fSChris Wilson 	 */
163bdce2beaSChris Wilson 	r = devm_request_mem_region(i915->drm.dev, dsm->start,
16477894226SMatthew Auld 				    resource_size(dsm),
165eaba1b8fSChris Wilson 				    "Graphics Stolen Memory");
166eaba1b8fSChris Wilson 	if (r == NULL) {
1673617dc96SAkash Goel 		/*
1683617dc96SAkash Goel 		 * One more attempt but this time requesting region from
16977894226SMatthew Auld 		 * start + 1, as we have seen that this resolves the region
1703617dc96SAkash Goel 		 * conflict with the PCI Bus.
1713617dc96SAkash Goel 		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
1723617dc96SAkash Goel 		 * PCI bus, but have an off-by-one error. Hence retry the
1733617dc96SAkash Goel 		 * reservation starting from 1 instead of 0.
174023f8079SDaniel Vetter 		 * There's also BIOS with off-by-one on the other end.
1753617dc96SAkash Goel 		 */
176bdce2beaSChris Wilson 		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
17777894226SMatthew Auld 					    resource_size(dsm) - 2,
1783617dc96SAkash Goel 					    "Graphics Stolen Memory");
1790b6d24c0SDaniel Vetter 		/*
1800b6d24c0SDaniel Vetter 		 * GEN3 firmware likes to smash pci bridges into the stolen
1810b6d24c0SDaniel Vetter 		 * range. Apparently this works.
1820b6d24c0SDaniel Vetter 		 */
18340e1956eSLucas De Marchi 		if (!r && GRAPHICS_VER(i915) != 3) {
184baa89ba3SWambui Karuga 			drm_err(&i915->drm,
185baa89ba3SWambui Karuga 				"conflict detected with stolen region: %pR\n",
18677894226SMatthew Auld 				dsm);
187920bcd18SPaulo Zanoni 
18877894226SMatthew Auld 			return -EBUSY;
189eaba1b8fSChris Wilson 		}
1903617dc96SAkash Goel 	}
191eaba1b8fSChris Wilson 
19277894226SMatthew Auld 	return 0;
1939797fbfbSChris Wilson }
1949797fbfbSChris Wilson 
195bdce2beaSChris Wilson static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
1969797fbfbSChris Wilson {
197bdce2beaSChris Wilson 	if (!drm_mm_initialized(&i915->mm.stolen))
198446f8d81SDaniel Vetter 		return;
199446f8d81SDaniel Vetter 
200bdce2beaSChris Wilson 	drm_mm_takedown(&i915->mm.stolen);
2019797fbfbSChris Wilson }
2029797fbfbSChris Wilson 
203bdce2beaSChris Wilson static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
204bdce2beaSChris Wilson 				    struct intel_uncore *uncore,
2050efb6561SChris Wilson 				    resource_size_t *base,
2060efb6561SChris Wilson 				    resource_size_t *size)
2077d316aecSVille Syrjälä {
208bdce2beaSChris Wilson 	u32 reg_val = intel_uncore_read(uncore,
209bdce2beaSChris Wilson 					IS_GM45(i915) ?
2107d316aecSVille Syrjälä 					CTG_STOLEN_RESERVED :
2117d316aecSVille Syrjälä 					ELK_STOLEN_RESERVED);
2121eca0778SJani Nikula 	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
2137d316aecSVille Syrjälä 
214baa89ba3SWambui Karuga 	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
215bdce2beaSChris Wilson 		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
2160efb6561SChris Wilson 
217957d32feSChris Wilson 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
218db7fb605SVille Syrjälä 		return;
219db7fb605SVille Syrjälä 
220b099a445SVille Syrjälä 	/*
221b099a445SVille Syrjälä 	 * Whether ILK really reuses the ELK register for this is unclear.
222b099a445SVille Syrjälä 	 * Let's see if we catch anyone with this supposedly enabled on ILK.
223b099a445SVille Syrjälä 	 */
22440e1956eSLucas De Marchi 	drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
22585c823acSPankaj Bharadiya 		 "ILK stolen reserved found? 0x%08x\n",
226cf819effSLucas De Marchi 		 reg_val);
227b099a445SVille Syrjälä 
228957d32feSChris Wilson 	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
229957d32feSChris Wilson 		return;
2307d316aecSVille Syrjälä 
231957d32feSChris Wilson 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
23285c823acSPankaj Bharadiya 	drm_WARN_ON(&i915->drm,
23385c823acSPankaj Bharadiya 		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
2347d316aecSVille Syrjälä 
2357d316aecSVille Syrjälä 	*size = stolen_top - *base;
2367d316aecSVille Syrjälä }
2377d316aecSVille Syrjälä 
238bdce2beaSChris Wilson static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
239bdce2beaSChris Wilson 				     struct intel_uncore *uncore,
2400efb6561SChris Wilson 				     resource_size_t *base,
2410efb6561SChris Wilson 				     resource_size_t *size)
2423774eb50SPaulo Zanoni {
243bdce2beaSChris Wilson 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
2440efb6561SChris Wilson 
245baa89ba3SWambui Karuga 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
2463774eb50SPaulo Zanoni 
247957d32feSChris Wilson 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
248db7fb605SVille Syrjälä 		return;
249db7fb605SVille Syrjälä 
2503774eb50SPaulo Zanoni 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
2513774eb50SPaulo Zanoni 
2523774eb50SPaulo Zanoni 	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
2533774eb50SPaulo Zanoni 	case GEN6_STOLEN_RESERVED_1M:
2543774eb50SPaulo Zanoni 		*size = 1024 * 1024;
2553774eb50SPaulo Zanoni 		break;
2563774eb50SPaulo Zanoni 	case GEN6_STOLEN_RESERVED_512K:
2573774eb50SPaulo Zanoni 		*size = 512 * 1024;
2583774eb50SPaulo Zanoni 		break;
2593774eb50SPaulo Zanoni 	case GEN6_STOLEN_RESERVED_256K:
2603774eb50SPaulo Zanoni 		*size = 256 * 1024;
2613774eb50SPaulo Zanoni 		break;
2623774eb50SPaulo Zanoni 	case GEN6_STOLEN_RESERVED_128K:
2633774eb50SPaulo Zanoni 		*size = 128 * 1024;
2643774eb50SPaulo Zanoni 		break;
2653774eb50SPaulo Zanoni 	default:
2663774eb50SPaulo Zanoni 		*size = 1024 * 1024;
2673774eb50SPaulo Zanoni 		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
2683774eb50SPaulo Zanoni 	}
2693774eb50SPaulo Zanoni }
2703774eb50SPaulo Zanoni 
271bdce2beaSChris Wilson static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
272bdce2beaSChris Wilson 				    struct intel_uncore *uncore,
273957d32feSChris Wilson 				    resource_size_t *base,
274957d32feSChris Wilson 				    resource_size_t *size)
275957d32feSChris Wilson {
276bdce2beaSChris Wilson 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
2771eca0778SJani Nikula 	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
278957d32feSChris Wilson 
279baa89ba3SWambui Karuga 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
280957d32feSChris Wilson 
281957d32feSChris Wilson 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
282957d32feSChris Wilson 		return;
283957d32feSChris Wilson 
284957d32feSChris Wilson 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
285957d32feSChris Wilson 	default:
286957d32feSChris Wilson 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
287df561f66SGustavo A. R. Silva 		fallthrough;
288957d32feSChris Wilson 	case GEN7_STOLEN_RESERVED_1M:
289957d32feSChris Wilson 		*size = 1024 * 1024;
290957d32feSChris Wilson 		break;
291957d32feSChris Wilson 	}
292957d32feSChris Wilson 
293957d32feSChris Wilson 	/*
294957d32feSChris Wilson 	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
295957d32feSChris Wilson 	 * reserved location as (top - size).
296957d32feSChris Wilson 	 */
297957d32feSChris Wilson 	*base = stolen_top - *size;
298957d32feSChris Wilson }
299957d32feSChris Wilson 
300bdce2beaSChris Wilson static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
301bdce2beaSChris Wilson 				     struct intel_uncore *uncore,
3020efb6561SChris Wilson 				     resource_size_t *base,
3030efb6561SChris Wilson 				     resource_size_t *size)
3043774eb50SPaulo Zanoni {
305bdce2beaSChris Wilson 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
3060efb6561SChris Wilson 
307baa89ba3SWambui Karuga 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
3083774eb50SPaulo Zanoni 
309957d32feSChris Wilson 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
310db7fb605SVille Syrjälä 		return;
311db7fb605SVille Syrjälä 
3123774eb50SPaulo Zanoni 	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
3133774eb50SPaulo Zanoni 
3143774eb50SPaulo Zanoni 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
3153774eb50SPaulo Zanoni 	case GEN7_STOLEN_RESERVED_1M:
3163774eb50SPaulo Zanoni 		*size = 1024 * 1024;
3173774eb50SPaulo Zanoni 		break;
3183774eb50SPaulo Zanoni 	case GEN7_STOLEN_RESERVED_256K:
3193774eb50SPaulo Zanoni 		*size = 256 * 1024;
3203774eb50SPaulo Zanoni 		break;
3213774eb50SPaulo Zanoni 	default:
3223774eb50SPaulo Zanoni 		*size = 1024 * 1024;
3233774eb50SPaulo Zanoni 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
3243774eb50SPaulo Zanoni 	}
3253774eb50SPaulo Zanoni }
3263774eb50SPaulo Zanoni 
327bdce2beaSChris Wilson static void chv_get_stolen_reserved(struct drm_i915_private *i915,
328bdce2beaSChris Wilson 				    struct intel_uncore *uncore,
3290efb6561SChris Wilson 				    resource_size_t *base,
3300efb6561SChris Wilson 				    resource_size_t *size)
3313774eb50SPaulo Zanoni {
332bdce2beaSChris Wilson 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
3330efb6561SChris Wilson 
334baa89ba3SWambui Karuga 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
3353774eb50SPaulo Zanoni 
336957d32feSChris Wilson 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
337db7fb605SVille Syrjälä 		return;
338db7fb605SVille Syrjälä 
3393774eb50SPaulo Zanoni 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
3403774eb50SPaulo Zanoni 
3413774eb50SPaulo Zanoni 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
3423774eb50SPaulo Zanoni 	case GEN8_STOLEN_RESERVED_1M:
3433774eb50SPaulo Zanoni 		*size = 1024 * 1024;
3443774eb50SPaulo Zanoni 		break;
3453774eb50SPaulo Zanoni 	case GEN8_STOLEN_RESERVED_2M:
3463774eb50SPaulo Zanoni 		*size = 2 * 1024 * 1024;
3473774eb50SPaulo Zanoni 		break;
3483774eb50SPaulo Zanoni 	case GEN8_STOLEN_RESERVED_4M:
3493774eb50SPaulo Zanoni 		*size = 4 * 1024 * 1024;
3503774eb50SPaulo Zanoni 		break;
3513774eb50SPaulo Zanoni 	case GEN8_STOLEN_RESERVED_8M:
3523774eb50SPaulo Zanoni 		*size = 8 * 1024 * 1024;
3533774eb50SPaulo Zanoni 		break;
3543774eb50SPaulo Zanoni 	default:
3553774eb50SPaulo Zanoni 		*size = 8 * 1024 * 1024;
3563774eb50SPaulo Zanoni 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
3573774eb50SPaulo Zanoni 	}
3583774eb50SPaulo Zanoni }
3593774eb50SPaulo Zanoni 
360bdce2beaSChris Wilson static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
361bdce2beaSChris Wilson 				    struct intel_uncore *uncore,
3620efb6561SChris Wilson 				    resource_size_t *base,
3630efb6561SChris Wilson 				    resource_size_t *size)
3643774eb50SPaulo Zanoni {
365bdce2beaSChris Wilson 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
3661eca0778SJani Nikula 	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
3673774eb50SPaulo Zanoni 
368baa89ba3SWambui Karuga 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
3690efb6561SChris Wilson 
370957d32feSChris Wilson 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
371db7fb605SVille Syrjälä 		return;
372db7fb605SVille Syrjälä 
373957d32feSChris Wilson 	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
374957d32feSChris Wilson 		return;
3753774eb50SPaulo Zanoni 
3763774eb50SPaulo Zanoni 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
3773774eb50SPaulo Zanoni 	*size = stolen_top - *base;
3783774eb50SPaulo Zanoni }
3793774eb50SPaulo Zanoni 
3808500f14bSTvrtko Ursulin static void icl_get_stolen_reserved(struct drm_i915_private *i915,
381bdce2beaSChris Wilson 				    struct intel_uncore *uncore,
382185441e0SPaulo Zanoni 				    resource_size_t *base,
383185441e0SPaulo Zanoni 				    resource_size_t *size)
384185441e0SPaulo Zanoni {
385bdce2beaSChris Wilson 	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
386185441e0SPaulo Zanoni 
387baa89ba3SWambui Karuga 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
388185441e0SPaulo Zanoni 
38943dea469SDnyaneshwar Bhadane 	/* Wa_14019821291 */
39043dea469SDnyaneshwar Bhadane 	if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
39143dea469SDnyaneshwar Bhadane 		/*
39243dea469SDnyaneshwar Bhadane 		 * This workaround is primarily implemented by the BIOS.  We
39343dea469SDnyaneshwar Bhadane 		 * just need to figure out whether the BIOS has applied the
39443dea469SDnyaneshwar Bhadane 		 * workaround (meaning the programmed address falls within
39543dea469SDnyaneshwar Bhadane 		 * the DSM) and, if so, reserve that part of the DSM to
39643dea469SDnyaneshwar Bhadane 		 * prevent accidental reuse.  The DSM location should be just
39743dea469SDnyaneshwar Bhadane 		 * below the WOPCM.
39843dea469SDnyaneshwar Bhadane 		 */
39943dea469SDnyaneshwar Bhadane 		u64 gscpsmi_base = intel_uncore_read64_2x32(uncore,
40043dea469SDnyaneshwar Bhadane 							    MTL_GSCPSMI_BASEADDR_LSB,
40143dea469SDnyaneshwar Bhadane 							    MTL_GSCPSMI_BASEADDR_MSB);
40243dea469SDnyaneshwar Bhadane 		if (gscpsmi_base >= i915->dsm.stolen.start &&
40343dea469SDnyaneshwar Bhadane 		    gscpsmi_base < i915->dsm.stolen.end) {
40443dea469SDnyaneshwar Bhadane 			*base = gscpsmi_base;
40543dea469SDnyaneshwar Bhadane 			*size = i915->dsm.stolen.end - gscpsmi_base;
40643dea469SDnyaneshwar Bhadane 			return;
40743dea469SDnyaneshwar Bhadane 		}
40843dea469SDnyaneshwar Bhadane 	}
40943dea469SDnyaneshwar Bhadane 
410185441e0SPaulo Zanoni 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
411185441e0SPaulo Zanoni 	case GEN8_STOLEN_RESERVED_1M:
412185441e0SPaulo Zanoni 		*size = 1024 * 1024;
413185441e0SPaulo Zanoni 		break;
414185441e0SPaulo Zanoni 	case GEN8_STOLEN_RESERVED_2M:
415185441e0SPaulo Zanoni 		*size = 2 * 1024 * 1024;
416185441e0SPaulo Zanoni 		break;
417185441e0SPaulo Zanoni 	case GEN8_STOLEN_RESERVED_4M:
418185441e0SPaulo Zanoni 		*size = 4 * 1024 * 1024;
419185441e0SPaulo Zanoni 		break;
420185441e0SPaulo Zanoni 	case GEN8_STOLEN_RESERVED_8M:
421185441e0SPaulo Zanoni 		*size = 8 * 1024 * 1024;
422185441e0SPaulo Zanoni 		break;
423185441e0SPaulo Zanoni 	default:
424185441e0SPaulo Zanoni 		*size = 8 * 1024 * 1024;
425185441e0SPaulo Zanoni 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
426185441e0SPaulo Zanoni 	}
427dbb2ffbfSAravind Iddamsetty 
42803eababbSVille Syrjälä 	if (HAS_LMEMBAR_SMEM_STOLEN(i915))
429dbb2ffbfSAravind Iddamsetty 		/* the base is initialized to stolen top so subtract size to get base */
430dbb2ffbfSAravind Iddamsetty 		*base -= *size;
431dbb2ffbfSAravind Iddamsetty 	else
432dbb2ffbfSAravind Iddamsetty 		*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
433185441e0SPaulo Zanoni }
434185441e0SPaulo Zanoni 
4353d99597cSLucas De Marchi /*
4361eca0778SJani Nikula  * Initialize i915->dsm.reserved to contain the reserved space within the Data
4373d99597cSLucas De Marchi  * Stolen Memory. This is a range on the top of DSM that is reserved, not to
4383d99597cSLucas De Marchi  * be used by driver, so must be excluded from the region passed to the
4393d99597cSLucas De Marchi  * allocator later. In the spec this is also called as WOPCM.
4403d99597cSLucas De Marchi  *
4413d99597cSLucas De Marchi  * Our expectation is that the reserved space is at the top of the stolen
4423d99597cSLucas De Marchi  * region, as it has been the case for every platform, and *never* at the
4433d99597cSLucas De Marchi  * bottom, so the calculation here can be simplified.
4443d99597cSLucas De Marchi  */
4453d99597cSLucas De Marchi static int init_reserved_stolen(struct drm_i915_private *i915)
4469797fbfbSChris Wilson {
447bdce2beaSChris Wilson 	struct intel_uncore *uncore = &i915->uncore;
448b7128ef1SMatthew Auld 	resource_size_t reserved_base, stolen_top;
4493d99597cSLucas De Marchi 	resource_size_t reserved_size;
4503d99597cSLucas De Marchi 	int ret = 0;
45177894226SMatthew Auld 
4521eca0778SJani Nikula 	stolen_top = i915->dsm.stolen.end + 1;
453957d32feSChris Wilson 	reserved_base = stolen_top;
45446fad808SChris Wilson 	reserved_size = 0;
455e12a2d53SChris Wilson 
456542110f2SLucas De Marchi 	if (GRAPHICS_VER(i915) >= 11) {
457542110f2SLucas De Marchi 		icl_get_stolen_reserved(i915, uncore,
45846fad808SChris Wilson 					&reserved_base, &reserved_size);
459542110f2SLucas De Marchi 	} else if (GRAPHICS_VER(i915) >= 8) {
460bdce2beaSChris Wilson 		if (IS_LP(i915))
461bdce2beaSChris Wilson 			chv_get_stolen_reserved(i915, uncore,
46246fad808SChris Wilson 						&reserved_base, &reserved_size);
4633774eb50SPaulo Zanoni 		else
464bdce2beaSChris Wilson 			bdw_get_stolen_reserved(i915, uncore,
46546fad808SChris Wilson 						&reserved_base, &reserved_size);
466542110f2SLucas De Marchi 	} else if (GRAPHICS_VER(i915) >= 7) {
467542110f2SLucas De Marchi 		if (IS_VALLEYVIEW(i915))
468542110f2SLucas De Marchi 			vlv_get_stolen_reserved(i915, uncore,
469542110f2SLucas De Marchi 						&reserved_base, &reserved_size);
470542110f2SLucas De Marchi 		else
471542110f2SLucas De Marchi 			gen7_get_stolen_reserved(i915, uncore,
472542110f2SLucas De Marchi 						 &reserved_base, &reserved_size);
473542110f2SLucas De Marchi 	} else if (GRAPHICS_VER(i915) >= 6) {
474542110f2SLucas De Marchi 		gen6_get_stolen_reserved(i915, uncore,
475542110f2SLucas De Marchi 					 &reserved_base, &reserved_size);
476542110f2SLucas De Marchi 	} else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
477542110f2SLucas De Marchi 		g4x_get_stolen_reserved(i915, uncore,
478542110f2SLucas De Marchi 					&reserved_base, &reserved_size);
47940bae736SDaniel Vetter 	}
480c9cddffcSJesse Barnes 
4813d99597cSLucas De Marchi 	/* No reserved stolen */
4823d99597cSLucas De Marchi 	if (reserved_base == stolen_top)
4833d99597cSLucas De Marchi 		goto bail_out;
4843d99597cSLucas De Marchi 
485957d32feSChris Wilson 	if (!reserved_base) {
486baa89ba3SWambui Karuga 		drm_err(&i915->drm,
487baa89ba3SWambui Karuga 			"inconsistent reservation %pa + %pa; ignoring\n",
488957d32feSChris Wilson 			&reserved_base, &reserved_size);
4893d99597cSLucas De Marchi 		ret = -EINVAL;
4903d99597cSLucas De Marchi 		goto bail_out;
4913774eb50SPaulo Zanoni 	}
4923774eb50SPaulo Zanoni 
493e5405178SJani Nikula 	i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size);
49417a05345SMatthew Auld 
4951eca0778SJani Nikula 	if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) {
496baa89ba3SWambui Karuga 		drm_err(&i915->drm,
497baa89ba3SWambui Karuga 			"Stolen reserved area %pR outside stolen memory %pR\n",
4981eca0778SJani Nikula 			&i915->dsm.reserved, &i915->dsm.stolen);
4993d99597cSLucas De Marchi 		ret = -EINVAL;
5003d99597cSLucas De Marchi 		goto bail_out;
5013d99597cSLucas De Marchi 	}
5023d99597cSLucas De Marchi 
5033d99597cSLucas De Marchi 	return 0;
5043d99597cSLucas De Marchi 
5053d99597cSLucas De Marchi bail_out:
506e5405178SJani Nikula 	i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0);
5073d99597cSLucas De Marchi 
5083d99597cSLucas De Marchi 	return ret;
5093d99597cSLucas De Marchi }
5103d99597cSLucas De Marchi 
5113d99597cSLucas De Marchi static int i915_gem_init_stolen(struct intel_memory_region *mem)
5123d99597cSLucas De Marchi {
5133d99597cSLucas De Marchi 	struct drm_i915_private *i915 = mem->i915;
5143d99597cSLucas De Marchi 
5153d99597cSLucas De Marchi 	mutex_init(&i915->mm.stolen_lock);
5163d99597cSLucas De Marchi 
5173d99597cSLucas De Marchi 	if (intel_vgpu_active(i915)) {
5183d99597cSLucas De Marchi 		drm_notice(&i915->drm,
5193d99597cSLucas De Marchi 			   "%s, disabling use of stolen memory\n",
5203d99597cSLucas De Marchi 			   "iGVT-g active");
521c40bd3b1SLucas De Marchi 		return -ENOSPC;
5223774eb50SPaulo Zanoni 	}
5233774eb50SPaulo Zanoni 
5243d99597cSLucas De Marchi 	if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
5253d99597cSLucas De Marchi 		drm_notice(&i915->drm,
5263d99597cSLucas De Marchi 			   "%s, disabling use of stolen memory\n",
5273d99597cSLucas De Marchi 			   "DMAR active");
528c40bd3b1SLucas De Marchi 		return -ENOSPC;
5293d99597cSLucas De Marchi 	}
5303d99597cSLucas De Marchi 
5313d99597cSLucas De Marchi 	if (adjust_stolen(i915, &mem->region))
532c40bd3b1SLucas De Marchi 		return -ENOSPC;
5333d99597cSLucas De Marchi 
5343d99597cSLucas De Marchi 	if (request_smem_stolen(i915, &mem->region))
535c40bd3b1SLucas De Marchi 		return -ENOSPC;
5363d99597cSLucas De Marchi 
5371eca0778SJani Nikula 	i915->dsm.stolen = mem->region;
5383d99597cSLucas De Marchi 
5393d99597cSLucas De Marchi 	if (init_reserved_stolen(i915))
540c40bd3b1SLucas De Marchi 		return -ENOSPC;
5413d99597cSLucas De Marchi 
5420ef42fb7SChris Wilson 	/* Exclude the reserved region from driver use */
5431eca0778SJani Nikula 	mem->region.end = i915->dsm.reserved.start - 1;
5443c0fa9f4SVille Syrjälä 	mem->io = DEFINE_RES_MEM(mem->io.start,
5453c0fa9f4SVille Syrjälä 				 min(resource_size(&mem->io),
5463c0fa9f4SVille Syrjälä 				     resource_size(&mem->region)));
5470ef42fb7SChris Wilson 
5481eca0778SJani Nikula 	i915->dsm.usable_size = resource_size(&mem->region);
549b8986c88SJosé Roberto de Souza 
550baa89ba3SWambui Karuga 	drm_dbg(&i915->drm,
551baa89ba3SWambui Karuga 		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
5521eca0778SJani Nikula 		(u64)resource_size(&i915->dsm.stolen) >> 10,
5531eca0778SJani Nikula 		(u64)i915->dsm.usable_size >> 10);
554897f9ed0SDaniel Vetter 
5551eca0778SJani Nikula 	if (i915->dsm.usable_size == 0)
556c40bd3b1SLucas De Marchi 		return -ENOSPC;
5573c6b29b2SPaulo Zanoni 
5583c6b29b2SPaulo Zanoni 	/* Basic memrange allocator for stolen space. */
5591eca0778SJani Nikula 	drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
5609797fbfbSChris Wilson 
561b90b044cSNirmoy Das 	/*
562b90b044cSNirmoy Das 	 * Access to stolen lmem beyond certain size for MTL A0 stepping
563b90b044cSNirmoy Das 	 * would crash the machine. Disable stolen lmem for userspace access
564b90b044cSNirmoy Das 	 * by setting usable_size to zero.
565b90b044cSNirmoy Das 	 */
566b90b044cSNirmoy Das 	if (IS_METEORLAKE(i915) && INTEL_REVID(i915) == 0x0)
567b90b044cSNirmoy Das 		i915->dsm.usable_size = 0;
568b90b044cSNirmoy Das 
5699797fbfbSChris Wilson 	return 0;
5709797fbfbSChris Wilson }
5710104fdbbSChris Wilson 
572d7085b0fSChris Wilson static void dbg_poison(struct i915_ggtt *ggtt,
573d7085b0fSChris Wilson 		       dma_addr_t addr, resource_size_t size,
574d7085b0fSChris Wilson 		       u8 x)
575d7085b0fSChris Wilson {
576d7085b0fSChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
577d7085b0fSChris Wilson 	if (!drm_mm_node_allocated(&ggtt->error_capture))
578d7085b0fSChris Wilson 		return;
579d7085b0fSChris Wilson 
580c071ab8cSChris Wilson 	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
581c071ab8cSChris Wilson 		return; /* beware stop_machine() inversion */
582c071ab8cSChris Wilson 
583d7085b0fSChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
584d7085b0fSChris Wilson 
585d7085b0fSChris Wilson 	mutex_lock(&ggtt->error_mutex);
586d7085b0fSChris Wilson 	while (size) {
587d7085b0fSChris Wilson 		void __iomem *s;
588d7085b0fSChris Wilson 
589d7085b0fSChris Wilson 		ggtt->vm.insert_page(&ggtt->vm, addr,
590d7085b0fSChris Wilson 				     ggtt->error_capture.start,
5919275277dSFei Yang 				     i915_gem_get_pat_index(ggtt->vm.i915,
5929275277dSFei Yang 							    I915_CACHE_NONE),
5939275277dSFei Yang 				     0);
594d7085b0fSChris Wilson 		mb();
595d7085b0fSChris Wilson 
596d7085b0fSChris Wilson 		s = io_mapping_map_wc(&ggtt->iomap,
597d7085b0fSChris Wilson 				      ggtt->error_capture.start,
598d7085b0fSChris Wilson 				      PAGE_SIZE);
599d7085b0fSChris Wilson 		memset_io(s, x, PAGE_SIZE);
600d7085b0fSChris Wilson 		io_mapping_unmap(s);
601d7085b0fSChris Wilson 
602d7085b0fSChris Wilson 		addr += PAGE_SIZE;
603d7085b0fSChris Wilson 		size -= PAGE_SIZE;
604d7085b0fSChris Wilson 	}
605d7085b0fSChris Wilson 	mb();
606d7085b0fSChris Wilson 	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
607d7085b0fSChris Wilson 	mutex_unlock(&ggtt->error_mutex);
608d7085b0fSChris Wilson #endif
609d7085b0fSChris Wilson }
610d7085b0fSChris Wilson 
6110104fdbbSChris Wilson static struct sg_table *
6120104fdbbSChris Wilson i915_pages_create_for_stolen(struct drm_device *dev,
613b7128ef1SMatthew Auld 			     resource_size_t offset, resource_size_t size)
6140104fdbbSChris Wilson {
615bdce2beaSChris Wilson 	struct drm_i915_private *i915 = to_i915(dev);
6160104fdbbSChris Wilson 	struct sg_table *st;
6170104fdbbSChris Wilson 	struct scatterlist *sg;
6180104fdbbSChris Wilson 
6191eca0778SJani Nikula 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen)));
6200104fdbbSChris Wilson 
6210104fdbbSChris Wilson 	/* We hide that we have no struct page backing our stolen object
6220104fdbbSChris Wilson 	 * by wrapping the contiguous physical allocation with a fake
6230104fdbbSChris Wilson 	 * dma mapping in a single scatterlist.
6240104fdbbSChris Wilson 	 */
6250104fdbbSChris Wilson 
6260104fdbbSChris Wilson 	st = kmalloc(sizeof(*st), GFP_KERNEL);
6270104fdbbSChris Wilson 	if (st == NULL)
62843e157faSMatthew Auld 		return ERR_PTR(-ENOMEM);
6290104fdbbSChris Wilson 
6300104fdbbSChris Wilson 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
6310104fdbbSChris Wilson 		kfree(st);
63243e157faSMatthew Auld 		return ERR_PTR(-ENOMEM);
6330104fdbbSChris Wilson 	}
6340104fdbbSChris Wilson 
6350104fdbbSChris Wilson 	sg = st->sgl;
636ec14ba47SAkash Goel 	sg->offset = 0;
637ed23abddSImre Deak 	sg->length = size;
6380104fdbbSChris Wilson 
6391eca0778SJani Nikula 	sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset;
6400104fdbbSChris Wilson 	sg_dma_len(sg) = size;
6410104fdbbSChris Wilson 
6420104fdbbSChris Wilson 	return st;
6430104fdbbSChris Wilson }
6440104fdbbSChris Wilson 
645b91b09eeSMatthew Auld static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
6460104fdbbSChris Wilson {
6475c24c9d2SMichał Winiarski 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
648b91b09eeSMatthew Auld 	struct sg_table *pages =
649b91b09eeSMatthew Auld 		i915_pages_create_for_stolen(obj->base.dev,
65003ac84f1SChris Wilson 					     obj->stolen->start,
65103ac84f1SChris Wilson 					     obj->stolen->size);
652b91b09eeSMatthew Auld 	if (IS_ERR(pages))
653b91b09eeSMatthew Auld 		return PTR_ERR(pages);
654b91b09eeSMatthew Auld 
6555c24c9d2SMichał Winiarski 	dbg_poison(to_gt(i915)->ggtt,
656d7085b0fSChris Wilson 		   sg_dma_address(pages->sgl),
657d7085b0fSChris Wilson 		   sg_dma_len(pages->sgl),
658d7085b0fSChris Wilson 		   POISON_INUSE);
659d7085b0fSChris Wilson 
6608c949515SMatthew Auld 	__i915_gem_object_set_pages(obj, pages);
661b91b09eeSMatthew Auld 
662b91b09eeSMatthew Auld 	return 0;
6630104fdbbSChris Wilson }
6640104fdbbSChris Wilson 
66503ac84f1SChris Wilson static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
66603ac84f1SChris Wilson 					     struct sg_table *pages)
6670104fdbbSChris Wilson {
6685c24c9d2SMichał Winiarski 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
6696288c79eSChris Wilson 	/* Should only be called from i915_gem_object_release_stolen() */
670d7085b0fSChris Wilson 
6715c24c9d2SMichał Winiarski 	dbg_poison(to_gt(i915)->ggtt,
672d7085b0fSChris Wilson 		   sg_dma_address(pages->sgl),
673d7085b0fSChris Wilson 		   sg_dma_len(pages->sgl),
674d7085b0fSChris Wilson 		   POISON_FREE);
675d7085b0fSChris Wilson 
67603ac84f1SChris Wilson 	sg_free_table(pages);
67703ac84f1SChris Wilson 	kfree(pages);
6780104fdbbSChris Wilson }
6790104fdbbSChris Wilson 
680ef0cf27cSChris Wilson static void
681ef0cf27cSChris Wilson i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
682ef0cf27cSChris Wilson {
683bdce2beaSChris Wilson 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
6846288c79eSChris Wilson 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
6856288c79eSChris Wilson 
6866288c79eSChris Wilson 	GEM_BUG_ON(!stolen);
687bdce2beaSChris Wilson 	i915_gem_stolen_remove_node(i915, stolen);
6886288c79eSChris Wilson 	kfree(stolen);
689f7073fb9SChris Wilson 
690f7073fb9SChris Wilson 	i915_gem_object_release_memory_region(obj);
691ef0cf27cSChris Wilson }
6926288c79eSChris Wilson 
6930104fdbbSChris Wilson static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
6947d192daaSChris Wilson 	.name = "i915_gem_object_stolen",
6950104fdbbSChris Wilson 	.get_pages = i915_gem_object_get_pages_stolen,
6960104fdbbSChris Wilson 	.put_pages = i915_gem_object_put_pages_stolen,
697ef0cf27cSChris Wilson 	.release = i915_gem_object_release_stolen,
6980104fdbbSChris Wilson };
6990104fdbbSChris Wilson 
7000bd08049SMatthew Auld static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
70197d55396SMatthew Auld 					   struct drm_i915_gem_object *obj,
7021e507872SChris Wilson 					   struct drm_mm_node *stolen)
7030104fdbbSChris Wilson {
7047867d709SChris Wilson 	static struct lock_class_key lock_class;
705b8f55be6SChris Wilson 	unsigned int cache_level;
706ae7f0dc1SMatthew Auld 	unsigned int flags;
70797d55396SMatthew Auld 	int err;
7080104fdbbSChris Wilson 
709ae7f0dc1SMatthew Auld 	/*
710ae7f0dc1SMatthew Auld 	 * Stolen objects are always physically contiguous since we just
711ae7f0dc1SMatthew Auld 	 * allocate one big block underneath using the drm_mm range allocator.
712ae7f0dc1SMatthew Auld 	 */
713ae7f0dc1SMatthew Auld 	flags = I915_BO_ALLOC_CONTIGUOUS;
714ae7f0dc1SMatthew Auld 
7151e507872SChris Wilson 	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
716ae7f0dc1SMatthew Auld 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
7170104fdbbSChris Wilson 
7180104fdbbSChris Wilson 	obj->stolen = stolen;
719c0a51fd0SChristian König 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
7201e507872SChris Wilson 	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
721b8f55be6SChris Wilson 	i915_gem_object_set_cache_coherency(obj, cache_level);
7220104fdbbSChris Wilson 
723d8be1357SMaarten Lankhorst 	if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
724a3258dbdSThomas Hellström 		return -EBUSY;
725a3258dbdSThomas Hellström 
726c471748dSMaarten Lankhorst 	i915_gem_object_init_memory_region(obj, mem);
72700d6dc3dSMatthew Auld 
72800d6dc3dSMatthew Auld 	err = i915_gem_object_pin_pages(obj);
72900d6dc3dSMatthew Auld 	if (err)
73000d6dc3dSMatthew Auld 		i915_gem_object_release_memory_region(obj);
731a3258dbdSThomas Hellström 	i915_gem_object_unlock(obj);
73272405c3dSMatthew Auld 
733a3258dbdSThomas Hellström 	return err;
7340104fdbbSChris Wilson }
7350104fdbbSChris Wilson 
7360bd08049SMatthew Auld static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
73797d55396SMatthew Auld 					struct drm_i915_gem_object *obj,
7389b78b5daSMatthew Auld 					resource_size_t offset,
73972405c3dSMatthew Auld 					resource_size_t size,
740d22632c8SMatthew Auld 					resource_size_t page_size,
74172405c3dSMatthew Auld 					unsigned int flags)
7420104fdbbSChris Wilson {
743bdce2beaSChris Wilson 	struct drm_i915_private *i915 = mem->i915;
7440104fdbbSChris Wilson 	struct drm_mm_node *stolen;
74506e78edfSDavid Herrmann 	int ret;
7460104fdbbSChris Wilson 
747bdce2beaSChris Wilson 	if (!drm_mm_initialized(&i915->mm.stolen))
74897d55396SMatthew Auld 		return -ENODEV;
7490104fdbbSChris Wilson 
7500104fdbbSChris Wilson 	if (size == 0)
75197d55396SMatthew Auld 		return -EINVAL;
7520104fdbbSChris Wilson 
753d511d013SMatthew Auld 	/*
754d511d013SMatthew Auld 	 * With discrete devices, where we lack a mappable aperture there is no
755d511d013SMatthew Auld 	 * possible way to ever access this memory on the CPU side.
756d511d013SMatthew Auld 	 */
7573c0fa9f4SVille Syrjälä 	if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
758d511d013SMatthew Auld 	    !(flags & I915_BO_ALLOC_GPU_ONLY))
759d511d013SMatthew Auld 		return -ENOSPC;
760d511d013SMatthew Auld 
76106e78edfSDavid Herrmann 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
76206e78edfSDavid Herrmann 	if (!stolen)
76397d55396SMatthew Auld 		return -ENOMEM;
7640104fdbbSChris Wilson 
7659b78b5daSMatthew Auld 	if (offset != I915_BO_INVALID_OFFSET) {
7669b78b5daSMatthew Auld 		drm_dbg(&i915->drm,
7679b78b5daSMatthew Auld 			"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
7689b78b5daSMatthew Auld 			&offset, &size);
7699b78b5daSMatthew Auld 
7709b78b5daSMatthew Auld 		stolen->start = offset;
7719b78b5daSMatthew Auld 		stolen->size = size;
7729b78b5daSMatthew Auld 		mutex_lock(&i915->mm.stolen_lock);
7739b78b5daSMatthew Auld 		ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
7749b78b5daSMatthew Auld 		mutex_unlock(&i915->mm.stolen_lock);
7759b78b5daSMatthew Auld 	} else {
776f9a7b01eSCQ Tang 		ret = i915_gem_stolen_insert_node(i915, stolen, size,
777f9a7b01eSCQ Tang 						  mem->min_page_size);
7789b78b5daSMatthew Auld 	}
77997d55396SMatthew Auld 	if (ret)
7800e5493caSCQ Tang 		goto err_free;
78106e78edfSDavid Herrmann 
78297d55396SMatthew Auld 	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
78397d55396SMatthew Auld 	if (ret)
7840e5493caSCQ Tang 		goto err_remove;
7850e5493caSCQ Tang 
78697d55396SMatthew Auld 	return 0;
7870104fdbbSChris Wilson 
7880e5493caSCQ Tang err_remove:
789bdce2beaSChris Wilson 	i915_gem_stolen_remove_node(i915, stolen);
7900e5493caSCQ Tang err_free:
79106e78edfSDavid Herrmann 	kfree(stolen);
79297d55396SMatthew Auld 	return ret;
7930104fdbbSChris Wilson }
7940104fdbbSChris Wilson 
795866d12b4SChris Wilson struct drm_i915_gem_object *
796bdce2beaSChris Wilson i915_gem_object_create_stolen(struct drm_i915_private *i915,
79772405c3dSMatthew Auld 			      resource_size_t size)
79872405c3dSMatthew Auld {
799d22632c8SMatthew Auld 	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
80072405c3dSMatthew Auld }
80172405c3dSMatthew Auld 
802d57d4a1dSCQ Tang static int init_stolen_smem(struct intel_memory_region *mem)
80372405c3dSMatthew Auld {
804c40bd3b1SLucas De Marchi 	int err;
805c40bd3b1SLucas De Marchi 
80672405c3dSMatthew Auld 	/*
80772405c3dSMatthew Auld 	 * Initialise stolen early so that we may reserve preallocated
80872405c3dSMatthew Auld 	 * objects for the BIOS to KMS transition.
80972405c3dSMatthew Auld 	 */
810c40bd3b1SLucas De Marchi 	err = i915_gem_init_stolen(mem);
811c40bd3b1SLucas De Marchi 	if (err)
812c40bd3b1SLucas De Marchi 		drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
813c40bd3b1SLucas De Marchi 
814c40bd3b1SLucas De Marchi 	return 0;
81572405c3dSMatthew Auld }
81672405c3dSMatthew Auld 
8178b1f7f92SThomas Hellström static int release_stolen_smem(struct intel_memory_region *mem)
81872405c3dSMatthew Auld {
81972405c3dSMatthew Auld 	i915_gem_cleanup_stolen(mem->i915);
8208b1f7f92SThomas Hellström 	return 0;
82172405c3dSMatthew Auld }
82272405c3dSMatthew Auld 
823d57d4a1dSCQ Tang static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
824d57d4a1dSCQ Tang 	.init = init_stolen_smem,
825d57d4a1dSCQ Tang 	.release = release_stolen_smem,
82697d55396SMatthew Auld 	.init_object = _i915_gem_object_stolen_init,
82772405c3dSMatthew Auld };
82872405c3dSMatthew Auld 
829d57d4a1dSCQ Tang static int init_stolen_lmem(struct intel_memory_region *mem)
83072405c3dSMatthew Auld {
831d57d4a1dSCQ Tang 	int err;
832d57d4a1dSCQ Tang 
833d57d4a1dSCQ Tang 	if (GEM_WARN_ON(resource_size(&mem->region) == 0))
834c40bd3b1SLucas De Marchi 		return 0;
835d57d4a1dSCQ Tang 
836d57d4a1dSCQ Tang 	err = i915_gem_init_stolen(mem);
837c40bd3b1SLucas De Marchi 	if (err) {
838c40bd3b1SLucas De Marchi 		drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
839c40bd3b1SLucas De Marchi 		return 0;
840b8ca8fefSAkeem G Abodunrin 	}
841d57d4a1dSCQ Tang 
8423c0fa9f4SVille Syrjälä 	if (resource_size(&mem->io) &&
8433c0fa9f4SVille Syrjälä 	    !io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
844c40bd3b1SLucas De Marchi 		goto err_cleanup;
845c40bd3b1SLucas De Marchi 
846d57d4a1dSCQ Tang 	return 0;
847d57d4a1dSCQ Tang 
848b8ca8fefSAkeem G Abodunrin err_cleanup:
849b8ca8fefSAkeem G Abodunrin 	i915_gem_cleanup_stolen(mem->i915);
850d57d4a1dSCQ Tang 	return err;
851d57d4a1dSCQ Tang }
852d57d4a1dSCQ Tang 
8538b1f7f92SThomas Hellström static int release_stolen_lmem(struct intel_memory_region *mem)
854d57d4a1dSCQ Tang {
8553c0fa9f4SVille Syrjälä 	if (resource_size(&mem->io))
856d57d4a1dSCQ Tang 		io_mapping_fini(&mem->iomap);
857d57d4a1dSCQ Tang 	i915_gem_cleanup_stolen(mem->i915);
8588b1f7f92SThomas Hellström 	return 0;
859d57d4a1dSCQ Tang }
860d57d4a1dSCQ Tang 
861d57d4a1dSCQ Tang static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
862d57d4a1dSCQ Tang 	.init = init_stolen_lmem,
863d57d4a1dSCQ Tang 	.release = release_stolen_lmem,
864d57d4a1dSCQ Tang 	.init_object = _i915_gem_object_stolen_init,
865d57d4a1dSCQ Tang };
866d57d4a1dSCQ Tang 
867dbb2ffbfSAravind Iddamsetty static int mtl_get_gms_size(struct intel_uncore *uncore)
868dbb2ffbfSAravind Iddamsetty {
869dbb2ffbfSAravind Iddamsetty 	u16 ggc, gms;
870dbb2ffbfSAravind Iddamsetty 
871dbb2ffbfSAravind Iddamsetty 	ggc = intel_uncore_read16(uncore, GGC);
872dbb2ffbfSAravind Iddamsetty 
873dbb2ffbfSAravind Iddamsetty 	/* check GGMS, should be fixed 0x3 (8MB) */
874dbb2ffbfSAravind Iddamsetty 	if ((ggc & GGMS_MASK) != GGMS_MASK)
875dbb2ffbfSAravind Iddamsetty 		return -EIO;
876dbb2ffbfSAravind Iddamsetty 
877dbb2ffbfSAravind Iddamsetty 	/* return valid GMS value, -EIO if invalid */
878dbb2ffbfSAravind Iddamsetty 	gms = REG_FIELD_GET(GMS_MASK, ggc);
879dbb2ffbfSAravind Iddamsetty 	switch (gms) {
880dbb2ffbfSAravind Iddamsetty 	case 0x0 ... 0x04:
881dbb2ffbfSAravind Iddamsetty 		return gms * 32;
882dbb2ffbfSAravind Iddamsetty 	case 0xf0 ... 0xfe:
883dbb2ffbfSAravind Iddamsetty 		return (gms - 0xf0 + 1) * 4;
884dbb2ffbfSAravind Iddamsetty 	default:
885dbb2ffbfSAravind Iddamsetty 		MISSING_CASE(gms);
886dbb2ffbfSAravind Iddamsetty 		return -EIO;
887dbb2ffbfSAravind Iddamsetty 	}
888dbb2ffbfSAravind Iddamsetty }
889dbb2ffbfSAravind Iddamsetty 
890d57d4a1dSCQ Tang struct intel_memory_region *
891d1487389SThomas Hellström i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
892d1487389SThomas Hellström 			   u16 instance)
893d57d4a1dSCQ Tang {
894d57d4a1dSCQ Tang 	struct intel_uncore *uncore = &i915->uncore;
895ff2d0ba2SThomas Zimmermann 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
896b8ca8fefSAkeem G Abodunrin 	resource_size_t dsm_size, dsm_base, lmem_size;
897d57d4a1dSCQ Tang 	struct intel_memory_region *mem;
898b8ca8fefSAkeem G Abodunrin 	resource_size_t io_start, io_size;
899ca921624SMatthew Auld 	resource_size_t min_page_size;
900dbb2ffbfSAravind Iddamsetty 	int ret;
901d57d4a1dSCQ Tang 
902b8ca8fefSAkeem G Abodunrin 	if (WARN_ON_ONCE(instance))
903d57d4a1dSCQ Tang 		return ERR_PTR(-ENODEV);
904d57d4a1dSCQ Tang 
9051bba7323SPiotr Piórkowski 	if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
9061bba7323SPiotr Piórkowski 		return ERR_PTR(-ENXIO);
9071bba7323SPiotr Piórkowski 
90803eababbSVille Syrjälä 	if (HAS_LMEMBAR_SMEM_STOLEN(i915) || IS_DG1(i915)) {
9096bba2b30SPiotr Piórkowski 		lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
910b8ca8fefSAkeem G Abodunrin 	} else {
911b8ca8fefSAkeem G Abodunrin 		resource_size_t lmem_range;
912b8ca8fefSAkeem G Abodunrin 
913306f7a5bSAndi Shyti 		lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
9147d809707SMatt Roper 		lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
915b8ca8fefSAkeem G Abodunrin 		lmem_size *= SZ_1G;
916b8ca8fefSAkeem G Abodunrin 	}
917b8ca8fefSAkeem G Abodunrin 
91803eababbSVille Syrjälä 	if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
919dbb2ffbfSAravind Iddamsetty 		/*
920dbb2ffbfSAravind Iddamsetty 		 * MTL dsm size is in GGC register.
9218d8d062bSDaniele Ceraolo Spurio 		 * Also MTL uses offset to GSMBASE in ptes, so i915
9228d8d062bSDaniele Ceraolo Spurio 		 * uses dsm_base = 8MBs to setup stolen region, since
9238d8d062bSDaniele Ceraolo Spurio 		 * DSMBASE = GSMBASE + 8MB.
924dbb2ffbfSAravind Iddamsetty 		 */
925dbb2ffbfSAravind Iddamsetty 		ret = mtl_get_gms_size(uncore);
926dbb2ffbfSAravind Iddamsetty 		if (ret < 0) {
927dbb2ffbfSAravind Iddamsetty 			drm_err(&i915->drm, "invalid MTL GGC register setting\n");
928dbb2ffbfSAravind Iddamsetty 			return ERR_PTR(ret);
929dbb2ffbfSAravind Iddamsetty 		}
930dbb2ffbfSAravind Iddamsetty 
9318d8d062bSDaniele Ceraolo Spurio 		dsm_base = SZ_8M;
932dbb2ffbfSAravind Iddamsetty 		dsm_size = (resource_size_t)(ret * SZ_1M);
933dbb2ffbfSAravind Iddamsetty 
93497acb6a8STvrtko Ursulin 		GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
9358d8d062bSDaniele Ceraolo Spurio 		GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
936dbb2ffbfSAravind Iddamsetty 	} else {
937dbb2ffbfSAravind Iddamsetty 		/* Use DSM base address instead for stolen memory */
938f8ae1d52SVille Syrjälä 		dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
939dbb2ffbfSAravind Iddamsetty 		if (WARN_ON(lmem_size < dsm_base))
940dbb2ffbfSAravind Iddamsetty 			return ERR_PTR(-ENODEV);
94101f0411fSNirmoy Das 		dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
942dbb2ffbfSAravind Iddamsetty 	}
943dbb2ffbfSAravind Iddamsetty 
944c08c3641SVille Syrjälä 	if (i915_direct_stolen_access(i915)) {
945c08c3641SVille Syrjälä 		drm_dbg(&i915->drm, "Using direct DSM access\n");
946f8ae1d52SVille Syrjälä 		io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
947c08c3641SVille Syrjälä 		io_size = dsm_size;
948c08c3641SVille Syrjälä 	} else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
949b8ca8fefSAkeem G Abodunrin 		io_start = 0;
950b8ca8fefSAkeem G Abodunrin 		io_size = 0;
951b8ca8fefSAkeem G Abodunrin 	} else {
9526bba2b30SPiotr Piórkowski 		io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
9538d8d062bSDaniele Ceraolo Spurio 		io_size = dsm_size;
954b8ca8fefSAkeem G Abodunrin 	}
955d57d4a1dSCQ Tang 
956ca921624SMatthew Auld 	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
957ca921624SMatthew Auld 						I915_GTT_PAGE_SIZE_4K;
958ca921624SMatthew Auld 
959b8ca8fefSAkeem G Abodunrin 	mem = intel_memory_region_create(i915, dsm_base, dsm_size,
960235582caSMatthew Auld 					 min_page_size,
961b8ca8fefSAkeem G Abodunrin 					 io_start, io_size,
962d1487389SThomas Hellström 					 type, instance,
963d57d4a1dSCQ Tang 					 &i915_region_stolen_lmem_ops);
964d57d4a1dSCQ Tang 	if (IS_ERR(mem))
965d57d4a1dSCQ Tang 		return mem;
966d57d4a1dSCQ Tang 
967d57d4a1dSCQ Tang 	intel_memory_region_set_name(mem, "stolen-local");
968d57d4a1dSCQ Tang 
96936150bbaSMatthew Auld 	mem->private = true;
97036150bbaSMatthew Auld 
971d57d4a1dSCQ Tang 	return mem;
972d57d4a1dSCQ Tang }
973d57d4a1dSCQ Tang 
974d57d4a1dSCQ Tang struct intel_memory_region*
975d1487389SThomas Hellström i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
976d1487389SThomas Hellström 			   u16 instance)
977d57d4a1dSCQ Tang {
978d57d4a1dSCQ Tang 	struct intel_memory_region *mem;
979d57d4a1dSCQ Tang 
980d57d4a1dSCQ Tang 	mem = intel_memory_region_create(i915,
98172405c3dSMatthew Auld 					 intel_graphics_stolen_res.start,
98272405c3dSMatthew Auld 					 resource_size(&intel_graphics_stolen_res),
983235582caSMatthew Auld 					 PAGE_SIZE, 0, 0, type, instance,
984d57d4a1dSCQ Tang 					 &i915_region_stolen_smem_ops);
985d57d4a1dSCQ Tang 	if (IS_ERR(mem))
986d57d4a1dSCQ Tang 		return mem;
987d57d4a1dSCQ Tang 
988d57d4a1dSCQ Tang 	intel_memory_region_set_name(mem, "stolen-system");
989d57d4a1dSCQ Tang 
99036150bbaSMatthew Auld 	mem->private = true;
991c40bd3b1SLucas De Marchi 
992d57d4a1dSCQ Tang 	return mem;
99372405c3dSMatthew Auld }
99472405c3dSMatthew Auld 
99541a9c75dSChris Wilson bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
99641a9c75dSChris Wilson {
99741a9c75dSChris Wilson 	return obj->ops == &i915_gem_object_stolen_ops;
99841a9c75dSChris Wilson }
9991d47074cSJouni Högander 
10001d47074cSJouni Högander bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
10011d47074cSJouni Högander {
10021d47074cSJouni Högander 	return drm_mm_initialized(&i915->mm.stolen);
10031d47074cSJouni Högander }
10041d47074cSJouni Högander 
10051d47074cSJouni Högander u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
10061d47074cSJouni Högander {
10071d47074cSJouni Högander 	return i915->dsm.stolen.start;
10081d47074cSJouni Högander }
10091d47074cSJouni Högander 
10101d47074cSJouni Högander u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
10111d47074cSJouni Högander {
10121d47074cSJouni Högander 	return resource_size(&i915->dsm.stolen);
10131d47074cSJouni Högander }
10141d47074cSJouni Högander 
10151d47074cSJouni Högander u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
10161d47074cSJouni Högander 				 const struct drm_mm_node *node)
10171d47074cSJouni Högander {
10181d47074cSJouni Högander 	return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
10191d47074cSJouni Högander }
10201d47074cSJouni Högander 
10211d47074cSJouni Högander bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
10221d47074cSJouni Högander {
10231d47074cSJouni Högander 	return drm_mm_node_allocated(node);
10241d47074cSJouni Högander }
10251d47074cSJouni Högander 
10261d47074cSJouni Högander u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
10271d47074cSJouni Högander {
10281d47074cSJouni Högander 	return node->start;
10291d47074cSJouni Högander }
10301d47074cSJouni Högander 
10311d47074cSJouni Högander u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
10321d47074cSJouni Högander {
10331d47074cSJouni Högander 	return node->size;
10341d47074cSJouni Högander }
1035