1*4e22d5b8SAndi Shyti // SPDX-License-Identifier: MIT
29797fbfbSChris Wilson /*
39797fbfbSChris Wilson * Copyright © 2008-2012 Intel Corporation
49797fbfbSChris Wilson */
59797fbfbSChris Wilson
610be98a7SChris Wilson #include <linux/errno.h>
710be98a7SChris Wilson #include <linux/mutex.h>
810be98a7SChris Wilson
910be98a7SChris Wilson #include <drm/drm_mm.h>
1003c7918dSJani Nikula #include <drm/intel/i915_drm.h>
1110be98a7SChris Wilson
12d57d4a1dSCQ Tang #include "gem/i915_gem_lmem.h"
1372405c3dSMatthew Auld #include "gem/i915_gem_region.h"
14b8ca8fefSAkeem G Abodunrin #include "gt/intel_gt.h"
15e7858254SMatt Roper #include "gt/intel_gt_mcr.h"
168524bb67SMatt Roper #include "gt/intel_gt_regs.h"
17b8ca8fefSAkeem G Abodunrin #include "gt/intel_region_lmem.h"
189797fbfbSChris Wilson #include "i915_drv.h"
196401fafbSJani Nikula #include "i915_gem_stolen.h"
201bba7323SPiotr Piórkowski #include "i915_pci.h"
21ce2fce25SMatt Roper #include "i915_reg.h"
22a7f46d5bSTvrtko Ursulin #include "i915_utils.h"
239e859eb9SJani Nikula #include "i915_vgpu.h"
24e30e6c7bSMatt Roper #include "intel_mchbar_regs.h"
256bba2b30SPiotr Piórkowski #include "intel_pci_config.h"
269797fbfbSChris Wilson
279797fbfbSChris Wilson /*
289797fbfbSChris Wilson * The BIOS typically reserves some of the system's memory for the exclusive
299797fbfbSChris Wilson * use of the integrated graphics. This memory is no longer available for
309797fbfbSChris Wilson * use by the OS and so the user finds that his system has less memory
319797fbfbSChris Wilson * available than he put in. We refer to this memory as stolen.
329797fbfbSChris Wilson *
339797fbfbSChris Wilson * The BIOS will allocate its framebuffer from the stolen memory. Our
349797fbfbSChris Wilson * goal is try to reuse that object for our own fbcon which must always
359797fbfbSChris Wilson * be available for panics. Anything else we can reuse the stolen memory
369797fbfbSChris Wilson * for is a boon.
379797fbfbSChris Wilson */
389797fbfbSChris Wilson
i915_gem_stolen_insert_node_in_range(struct drm_i915_private * i915,struct drm_mm_node * node,u64 size,unsigned alignment,u64 start,u64 end)39bdce2beaSChris Wilson int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
40d713fd49SPaulo Zanoni struct drm_mm_node *node, u64 size,
41a9da512bSPaulo Zanoni unsigned alignment, u64 start, u64 end)
42d713fd49SPaulo Zanoni {
4392e97d2fSPaulo Zanoni int ret;
4492e97d2fSPaulo Zanoni
45bdce2beaSChris Wilson if (!drm_mm_initialized(&i915->mm.stolen))
46d713fd49SPaulo Zanoni return -ENODEV;
47d713fd49SPaulo Zanoni
48011f22ebSHans de Goede /* WaSkipStolenMemoryFirstPage:bdw+ */
4940e1956eSLucas De Marchi if (GRAPHICS_VER(i915) >= 8 && start < 4096)
50011f22ebSHans de Goede start = 4096;
51011f22ebSHans de Goede
52bdce2beaSChris Wilson mutex_lock(&i915->mm.stolen_lock);
53bdce2beaSChris Wilson ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
544e64e553SChris Wilson size, alignment, 0,
554e64e553SChris Wilson start, end, DRM_MM_INSERT_BEST);
56bdce2beaSChris Wilson mutex_unlock(&i915->mm.stolen_lock);
5792e97d2fSPaulo Zanoni
5892e97d2fSPaulo Zanoni return ret;
59d713fd49SPaulo Zanoni }
60d713fd49SPaulo Zanoni
i915_gem_stolen_insert_node(struct drm_i915_private * i915,struct drm_mm_node * node,u64 size,unsigned alignment)61bdce2beaSChris Wilson int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
62a9da512bSPaulo Zanoni struct drm_mm_node *node, u64 size,
63a9da512bSPaulo Zanoni unsigned alignment)
64a9da512bSPaulo Zanoni {
653da3c5c1SChris Wilson return i915_gem_stolen_insert_node_in_range(i915, node,
663da3c5c1SChris Wilson size, alignment,
673da3c5c1SChris Wilson I915_GEM_STOLEN_BIAS,
683da3c5c1SChris Wilson U64_MAX);
69a9da512bSPaulo Zanoni }
70a9da512bSPaulo Zanoni
i915_gem_stolen_remove_node(struct drm_i915_private * i915,struct drm_mm_node * node)71bdce2beaSChris Wilson void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
72d713fd49SPaulo Zanoni struct drm_mm_node *node)
73d713fd49SPaulo Zanoni {
74bdce2beaSChris Wilson mutex_lock(&i915->mm.stolen_lock);
75d713fd49SPaulo Zanoni drm_mm_remove_node(node);
76bdce2beaSChris Wilson mutex_unlock(&i915->mm.stolen_lock);
77d713fd49SPaulo Zanoni }
78d713fd49SPaulo Zanoni
valid_stolen_size(struct drm_i915_private * i915,struct resource * dsm)79dbb2ffbfSAravind Iddamsetty static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
803d99597cSLucas De Marchi {
8103eababbSVille Syrjälä return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
823d99597cSLucas De Marchi }
833d99597cSLucas De Marchi
adjust_stolen(struct drm_i915_private * i915,struct resource * dsm)843d99597cSLucas De Marchi static int adjust_stolen(struct drm_i915_private *i915,
8577894226SMatthew Auld struct resource *dsm)
869797fbfbSChris Wilson {
875c24c9d2SMichał Winiarski struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
88bdce2beaSChris Wilson struct intel_uncore *uncore = ggtt->vm.gt->uncore;
899797fbfbSChris Wilson
90dbb2ffbfSAravind Iddamsetty if (!valid_stolen_size(i915, dsm))
9177894226SMatthew Auld return -EINVAL;
929797fbfbSChris Wilson
9377894226SMatthew Auld /*
943d99597cSLucas De Marchi * Make sure we don't clobber the GTT if it's within stolen memory
953d99597cSLucas De Marchi *
9677894226SMatthew Auld * TODO: We have yet too encounter the case where the GTT wasn't at the
9777894226SMatthew Auld * end of stolen. With that assumption we could simplify this.
9877894226SMatthew Auld */
9940e1956eSLucas De Marchi if (GRAPHICS_VER(i915) <= 4 &&
100bdce2beaSChris Wilson !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
10177894226SMatthew Auld struct resource stolen[2] = {*dsm, *dsm};
10277894226SMatthew Auld struct resource ggtt_res;
103b7128ef1SMatthew Auld resource_size_t ggtt_start;
104f1e1c212SVille Syrjälä
105bdce2beaSChris Wilson ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
10640e1956eSLucas De Marchi if (GRAPHICS_VER(i915) == 4)
10772e96d64SJoonas Lahtinen ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
10872e96d64SJoonas Lahtinen (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
109f1e1c212SVille Syrjälä else
11072e96d64SJoonas Lahtinen ggtt_start &= PGTBL_ADDRESS_LO_MASK;
111f1e1c212SVille Syrjälä
112e5405178SJani Nikula ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4);
113f1e1c212SVille Syrjälä
11477894226SMatthew Auld if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
11577894226SMatthew Auld stolen[0].end = ggtt_res.start;
11677894226SMatthew Auld if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
11777894226SMatthew Auld stolen[1].start = ggtt_res.end;
11877894226SMatthew Auld
11977894226SMatthew Auld /* Pick the larger of the two chunks */
12077894226SMatthew Auld if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
12177894226SMatthew Auld *dsm = stolen[0];
12277894226SMatthew Auld else
12377894226SMatthew Auld *dsm = stolen[1];
124f1e1c212SVille Syrjälä
125f1e1c212SVille Syrjälä if (stolen[0].start != stolen[1].start ||
126f1e1c212SVille Syrjälä stolen[0].end != stolen[1].end) {
127baa89ba3SWambui Karuga drm_dbg(&i915->drm,
128baa89ba3SWambui Karuga "GTT within stolen memory at %pR\n",
129baa89ba3SWambui Karuga &ggtt_res);
130baa89ba3SWambui Karuga drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
131baa89ba3SWambui Karuga dsm);
132f1e1c212SVille Syrjälä }
133f1e1c212SVille Syrjälä }
134f1e1c212SVille Syrjälä
135dbb2ffbfSAravind Iddamsetty if (!valid_stolen_size(i915, dsm))
1363d99597cSLucas De Marchi return -EINVAL;
1373d99597cSLucas De Marchi
1383d99597cSLucas De Marchi return 0;
1393d99597cSLucas De Marchi }
1403d99597cSLucas De Marchi
request_smem_stolen(struct drm_i915_private * i915,struct resource * dsm)1413d99597cSLucas De Marchi static int request_smem_stolen(struct drm_i915_private *i915,
1423d99597cSLucas De Marchi struct resource *dsm)
1433d99597cSLucas De Marchi {
1443d99597cSLucas De Marchi struct resource *r;
1453d99597cSLucas De Marchi
14677894226SMatthew Auld /*
1473d99597cSLucas De Marchi * With stolen lmem, we don't need to request system memory for the
1483d99597cSLucas De Marchi * address range since it's local to the gpu.
149dbb2ffbfSAravind Iddamsetty *
150dbb2ffbfSAravind Iddamsetty * Starting MTL, in IGFX devices the stolen memory is exposed via
15103eababbSVille Syrjälä * LMEMBAR and shall be considered similar to stolen lmem.
152d57d4a1dSCQ Tang */
15303eababbSVille Syrjälä if (HAS_LMEM(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
154d57d4a1dSCQ Tang return 0;
155d57d4a1dSCQ Tang
156d57d4a1dSCQ Tang /*
15777894226SMatthew Auld * Verify that nothing else uses this physical address. Stolen
158eaba1b8fSChris Wilson * memory should be reserved by the BIOS and hidden from the
159eaba1b8fSChris Wilson * kernel. So if the region is already marked as busy, something
160eaba1b8fSChris Wilson * is seriously wrong.
161eaba1b8fSChris Wilson */
162bdce2beaSChris Wilson r = devm_request_mem_region(i915->drm.dev, dsm->start,
16377894226SMatthew Auld resource_size(dsm),
164eaba1b8fSChris Wilson "Graphics Stolen Memory");
165eaba1b8fSChris Wilson if (r == NULL) {
1663617dc96SAkash Goel /*
1673617dc96SAkash Goel * One more attempt but this time requesting region from
16877894226SMatthew Auld * start + 1, as we have seen that this resolves the region
1693617dc96SAkash Goel * conflict with the PCI Bus.
1703617dc96SAkash Goel * This is a BIOS w/a: Some BIOS wrap stolen in the root
1713617dc96SAkash Goel * PCI bus, but have an off-by-one error. Hence retry the
1723617dc96SAkash Goel * reservation starting from 1 instead of 0.
173023f8079SDaniel Vetter * There's also BIOS with off-by-one on the other end.
1743617dc96SAkash Goel */
175bdce2beaSChris Wilson r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
17677894226SMatthew Auld resource_size(dsm) - 2,
1773617dc96SAkash Goel "Graphics Stolen Memory");
1780b6d24c0SDaniel Vetter /*
1790b6d24c0SDaniel Vetter * GEN3 firmware likes to smash pci bridges into the stolen
1800b6d24c0SDaniel Vetter * range. Apparently this works.
1810b6d24c0SDaniel Vetter */
18240e1956eSLucas De Marchi if (!r && GRAPHICS_VER(i915) != 3) {
183baa89ba3SWambui Karuga drm_err(&i915->drm,
184baa89ba3SWambui Karuga "conflict detected with stolen region: %pR\n",
18577894226SMatthew Auld dsm);
186920bcd18SPaulo Zanoni
18777894226SMatthew Auld return -EBUSY;
188eaba1b8fSChris Wilson }
1893617dc96SAkash Goel }
190eaba1b8fSChris Wilson
19177894226SMatthew Auld return 0;
1929797fbfbSChris Wilson }
1939797fbfbSChris Wilson
i915_gem_cleanup_stolen(struct drm_i915_private * i915)194bdce2beaSChris Wilson static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
1959797fbfbSChris Wilson {
196bdce2beaSChris Wilson if (!drm_mm_initialized(&i915->mm.stolen))
197446f8d81SDaniel Vetter return;
198446f8d81SDaniel Vetter
199bdce2beaSChris Wilson drm_mm_takedown(&i915->mm.stolen);
2009797fbfbSChris Wilson }
2019797fbfbSChris Wilson
g4x_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)202bdce2beaSChris Wilson static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
203bdce2beaSChris Wilson struct intel_uncore *uncore,
2040efb6561SChris Wilson resource_size_t *base,
2050efb6561SChris Wilson resource_size_t *size)
2067d316aecSVille Syrjälä {
207bdce2beaSChris Wilson u32 reg_val = intel_uncore_read(uncore,
208bdce2beaSChris Wilson IS_GM45(i915) ?
2097d316aecSVille Syrjälä CTG_STOLEN_RESERVED :
2107d316aecSVille Syrjälä ELK_STOLEN_RESERVED);
2111eca0778SJani Nikula resource_size_t stolen_top = i915->dsm.stolen.end + 1;
2127d316aecSVille Syrjälä
213baa89ba3SWambui Karuga drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
214bdce2beaSChris Wilson IS_GM45(i915) ? "CTG" : "ELK", reg_val);
2150efb6561SChris Wilson
216957d32feSChris Wilson if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
217db7fb605SVille Syrjälä return;
218db7fb605SVille Syrjälä
219b099a445SVille Syrjälä /*
220b099a445SVille Syrjälä * Whether ILK really reuses the ELK register for this is unclear.
221b099a445SVille Syrjälä * Let's see if we catch anyone with this supposedly enabled on ILK.
222b099a445SVille Syrjälä */
22340e1956eSLucas De Marchi drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
22485c823acSPankaj Bharadiya "ILK stolen reserved found? 0x%08x\n",
225cf819effSLucas De Marchi reg_val);
226b099a445SVille Syrjälä
227957d32feSChris Wilson if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
228957d32feSChris Wilson return;
2297d316aecSVille Syrjälä
230957d32feSChris Wilson *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
23185c823acSPankaj Bharadiya drm_WARN_ON(&i915->drm,
23285c823acSPankaj Bharadiya (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
2337d316aecSVille Syrjälä
2347d316aecSVille Syrjälä *size = stolen_top - *base;
2357d316aecSVille Syrjälä }
2367d316aecSVille Syrjälä
gen6_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)237bdce2beaSChris Wilson static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
238bdce2beaSChris Wilson struct intel_uncore *uncore,
2390efb6561SChris Wilson resource_size_t *base,
2400efb6561SChris Wilson resource_size_t *size)
2413774eb50SPaulo Zanoni {
242bdce2beaSChris Wilson u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
2430efb6561SChris Wilson
244baa89ba3SWambui Karuga drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
2453774eb50SPaulo Zanoni
246957d32feSChris Wilson if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
247db7fb605SVille Syrjälä return;
248db7fb605SVille Syrjälä
2493774eb50SPaulo Zanoni *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
2503774eb50SPaulo Zanoni
2513774eb50SPaulo Zanoni switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
2523774eb50SPaulo Zanoni case GEN6_STOLEN_RESERVED_1M:
2533774eb50SPaulo Zanoni *size = 1024 * 1024;
2543774eb50SPaulo Zanoni break;
2553774eb50SPaulo Zanoni case GEN6_STOLEN_RESERVED_512K:
2563774eb50SPaulo Zanoni *size = 512 * 1024;
2573774eb50SPaulo Zanoni break;
2583774eb50SPaulo Zanoni case GEN6_STOLEN_RESERVED_256K:
2593774eb50SPaulo Zanoni *size = 256 * 1024;
2603774eb50SPaulo Zanoni break;
2613774eb50SPaulo Zanoni case GEN6_STOLEN_RESERVED_128K:
2623774eb50SPaulo Zanoni *size = 128 * 1024;
2633774eb50SPaulo Zanoni break;
2643774eb50SPaulo Zanoni default:
2653774eb50SPaulo Zanoni *size = 1024 * 1024;
2663774eb50SPaulo Zanoni MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
2673774eb50SPaulo Zanoni }
2683774eb50SPaulo Zanoni }
2693774eb50SPaulo Zanoni
vlv_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)270bdce2beaSChris Wilson static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
271bdce2beaSChris Wilson struct intel_uncore *uncore,
272957d32feSChris Wilson resource_size_t *base,
273957d32feSChris Wilson resource_size_t *size)
274957d32feSChris Wilson {
275bdce2beaSChris Wilson u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
2761eca0778SJani Nikula resource_size_t stolen_top = i915->dsm.stolen.end + 1;
277957d32feSChris Wilson
278baa89ba3SWambui Karuga drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
279957d32feSChris Wilson
280957d32feSChris Wilson if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
281957d32feSChris Wilson return;
282957d32feSChris Wilson
283957d32feSChris Wilson switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
284957d32feSChris Wilson default:
285957d32feSChris Wilson MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
286df561f66SGustavo A. R. Silva fallthrough;
287957d32feSChris Wilson case GEN7_STOLEN_RESERVED_1M:
288957d32feSChris Wilson *size = 1024 * 1024;
289957d32feSChris Wilson break;
290957d32feSChris Wilson }
291957d32feSChris Wilson
292957d32feSChris Wilson /*
293957d32feSChris Wilson * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
294957d32feSChris Wilson * reserved location as (top - size).
295957d32feSChris Wilson */
296957d32feSChris Wilson *base = stolen_top - *size;
297957d32feSChris Wilson }
298957d32feSChris Wilson
gen7_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)299bdce2beaSChris Wilson static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
300bdce2beaSChris Wilson struct intel_uncore *uncore,
3010efb6561SChris Wilson resource_size_t *base,
3020efb6561SChris Wilson resource_size_t *size)
3033774eb50SPaulo Zanoni {
304bdce2beaSChris Wilson u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
3050efb6561SChris Wilson
306baa89ba3SWambui Karuga drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
3073774eb50SPaulo Zanoni
308957d32feSChris Wilson if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
309db7fb605SVille Syrjälä return;
310db7fb605SVille Syrjälä
3113774eb50SPaulo Zanoni *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
3123774eb50SPaulo Zanoni
3133774eb50SPaulo Zanoni switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
3143774eb50SPaulo Zanoni case GEN7_STOLEN_RESERVED_1M:
3153774eb50SPaulo Zanoni *size = 1024 * 1024;
3163774eb50SPaulo Zanoni break;
3173774eb50SPaulo Zanoni case GEN7_STOLEN_RESERVED_256K:
3183774eb50SPaulo Zanoni *size = 256 * 1024;
3193774eb50SPaulo Zanoni break;
3203774eb50SPaulo Zanoni default:
3213774eb50SPaulo Zanoni *size = 1024 * 1024;
3223774eb50SPaulo Zanoni MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
3233774eb50SPaulo Zanoni }
3243774eb50SPaulo Zanoni }
3253774eb50SPaulo Zanoni
chv_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)326bdce2beaSChris Wilson static void chv_get_stolen_reserved(struct drm_i915_private *i915,
327bdce2beaSChris Wilson struct intel_uncore *uncore,
3280efb6561SChris Wilson resource_size_t *base,
3290efb6561SChris Wilson resource_size_t *size)
3303774eb50SPaulo Zanoni {
331bdce2beaSChris Wilson u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
3320efb6561SChris Wilson
333baa89ba3SWambui Karuga drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
3343774eb50SPaulo Zanoni
335957d32feSChris Wilson if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
336db7fb605SVille Syrjälä return;
337db7fb605SVille Syrjälä
3383774eb50SPaulo Zanoni *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
3393774eb50SPaulo Zanoni
3403774eb50SPaulo Zanoni switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
3413774eb50SPaulo Zanoni case GEN8_STOLEN_RESERVED_1M:
3423774eb50SPaulo Zanoni *size = 1024 * 1024;
3433774eb50SPaulo Zanoni break;
3443774eb50SPaulo Zanoni case GEN8_STOLEN_RESERVED_2M:
3453774eb50SPaulo Zanoni *size = 2 * 1024 * 1024;
3463774eb50SPaulo Zanoni break;
3473774eb50SPaulo Zanoni case GEN8_STOLEN_RESERVED_4M:
3483774eb50SPaulo Zanoni *size = 4 * 1024 * 1024;
3493774eb50SPaulo Zanoni break;
3503774eb50SPaulo Zanoni case GEN8_STOLEN_RESERVED_8M:
3513774eb50SPaulo Zanoni *size = 8 * 1024 * 1024;
3523774eb50SPaulo Zanoni break;
3533774eb50SPaulo Zanoni default:
3543774eb50SPaulo Zanoni *size = 8 * 1024 * 1024;
3553774eb50SPaulo Zanoni MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
3563774eb50SPaulo Zanoni }
3573774eb50SPaulo Zanoni }
3583774eb50SPaulo Zanoni
bdw_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)359bdce2beaSChris Wilson static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
360bdce2beaSChris Wilson struct intel_uncore *uncore,
3610efb6561SChris Wilson resource_size_t *base,
3620efb6561SChris Wilson resource_size_t *size)
3633774eb50SPaulo Zanoni {
364bdce2beaSChris Wilson u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
3651eca0778SJani Nikula resource_size_t stolen_top = i915->dsm.stolen.end + 1;
3663774eb50SPaulo Zanoni
367baa89ba3SWambui Karuga drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
3680efb6561SChris Wilson
369957d32feSChris Wilson if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
370db7fb605SVille Syrjälä return;
371db7fb605SVille Syrjälä
372957d32feSChris Wilson if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
373957d32feSChris Wilson return;
3743774eb50SPaulo Zanoni
3753774eb50SPaulo Zanoni *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
3763774eb50SPaulo Zanoni *size = stolen_top - *base;
3773774eb50SPaulo Zanoni }
3783774eb50SPaulo Zanoni
icl_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)3798500f14bSTvrtko Ursulin static void icl_get_stolen_reserved(struct drm_i915_private *i915,
380bdce2beaSChris Wilson struct intel_uncore *uncore,
381185441e0SPaulo Zanoni resource_size_t *base,
382185441e0SPaulo Zanoni resource_size_t *size)
383185441e0SPaulo Zanoni {
384bdce2beaSChris Wilson u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
385185441e0SPaulo Zanoni
386baa89ba3SWambui Karuga drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
387185441e0SPaulo Zanoni
38843dea469SDnyaneshwar Bhadane /* Wa_14019821291 */
38943dea469SDnyaneshwar Bhadane if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
39043dea469SDnyaneshwar Bhadane /*
39143dea469SDnyaneshwar Bhadane * This workaround is primarily implemented by the BIOS. We
39243dea469SDnyaneshwar Bhadane * just need to figure out whether the BIOS has applied the
39343dea469SDnyaneshwar Bhadane * workaround (meaning the programmed address falls within
39443dea469SDnyaneshwar Bhadane * the DSM) and, if so, reserve that part of the DSM to
39543dea469SDnyaneshwar Bhadane * prevent accidental reuse. The DSM location should be just
39643dea469SDnyaneshwar Bhadane * below the WOPCM.
39743dea469SDnyaneshwar Bhadane */
39843dea469SDnyaneshwar Bhadane u64 gscpsmi_base = intel_uncore_read64_2x32(uncore,
39943dea469SDnyaneshwar Bhadane MTL_GSCPSMI_BASEADDR_LSB,
40043dea469SDnyaneshwar Bhadane MTL_GSCPSMI_BASEADDR_MSB);
40143dea469SDnyaneshwar Bhadane if (gscpsmi_base >= i915->dsm.stolen.start &&
40243dea469SDnyaneshwar Bhadane gscpsmi_base < i915->dsm.stolen.end) {
40343dea469SDnyaneshwar Bhadane *base = gscpsmi_base;
40443dea469SDnyaneshwar Bhadane *size = i915->dsm.stolen.end - gscpsmi_base;
40543dea469SDnyaneshwar Bhadane return;
40643dea469SDnyaneshwar Bhadane }
40743dea469SDnyaneshwar Bhadane }
40843dea469SDnyaneshwar Bhadane
409185441e0SPaulo Zanoni switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
410185441e0SPaulo Zanoni case GEN8_STOLEN_RESERVED_1M:
411185441e0SPaulo Zanoni *size = 1024 * 1024;
412185441e0SPaulo Zanoni break;
413185441e0SPaulo Zanoni case GEN8_STOLEN_RESERVED_2M:
414185441e0SPaulo Zanoni *size = 2 * 1024 * 1024;
415185441e0SPaulo Zanoni break;
416185441e0SPaulo Zanoni case GEN8_STOLEN_RESERVED_4M:
417185441e0SPaulo Zanoni *size = 4 * 1024 * 1024;
418185441e0SPaulo Zanoni break;
419185441e0SPaulo Zanoni case GEN8_STOLEN_RESERVED_8M:
420185441e0SPaulo Zanoni *size = 8 * 1024 * 1024;
421185441e0SPaulo Zanoni break;
422185441e0SPaulo Zanoni default:
423185441e0SPaulo Zanoni *size = 8 * 1024 * 1024;
424185441e0SPaulo Zanoni MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
425185441e0SPaulo Zanoni }
426dbb2ffbfSAravind Iddamsetty
42703eababbSVille Syrjälä if (HAS_LMEMBAR_SMEM_STOLEN(i915))
428dbb2ffbfSAravind Iddamsetty /* the base is initialized to stolen top so subtract size to get base */
429dbb2ffbfSAravind Iddamsetty *base -= *size;
430dbb2ffbfSAravind Iddamsetty else
431dbb2ffbfSAravind Iddamsetty *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
432185441e0SPaulo Zanoni }
433185441e0SPaulo Zanoni
4343d99597cSLucas De Marchi /*
4351eca0778SJani Nikula * Initialize i915->dsm.reserved to contain the reserved space within the Data
4363d99597cSLucas De Marchi * Stolen Memory. This is a range on the top of DSM that is reserved, not to
4373d99597cSLucas De Marchi * be used by driver, so must be excluded from the region passed to the
4383d99597cSLucas De Marchi * allocator later. In the spec this is also called as WOPCM.
4393d99597cSLucas De Marchi *
4403d99597cSLucas De Marchi * Our expectation is that the reserved space is at the top of the stolen
4413d99597cSLucas De Marchi * region, as it has been the case for every platform, and *never* at the
4423d99597cSLucas De Marchi * bottom, so the calculation here can be simplified.
4433d99597cSLucas De Marchi */
init_reserved_stolen(struct drm_i915_private * i915)4443d99597cSLucas De Marchi static int init_reserved_stolen(struct drm_i915_private *i915)
4459797fbfbSChris Wilson {
446bdce2beaSChris Wilson struct intel_uncore *uncore = &i915->uncore;
447b7128ef1SMatthew Auld resource_size_t reserved_base, stolen_top;
4483d99597cSLucas De Marchi resource_size_t reserved_size;
4493d99597cSLucas De Marchi int ret = 0;
45077894226SMatthew Auld
4511eca0778SJani Nikula stolen_top = i915->dsm.stolen.end + 1;
452957d32feSChris Wilson reserved_base = stolen_top;
45346fad808SChris Wilson reserved_size = 0;
454e12a2d53SChris Wilson
455542110f2SLucas De Marchi if (GRAPHICS_VER(i915) >= 11) {
456542110f2SLucas De Marchi icl_get_stolen_reserved(i915, uncore,
45746fad808SChris Wilson &reserved_base, &reserved_size);
458542110f2SLucas De Marchi } else if (GRAPHICS_VER(i915) >= 8) {
45906f4d328SJani Nikula if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915) || IS_GEMINILAKE(i915))
460bdce2beaSChris Wilson chv_get_stolen_reserved(i915, uncore,
46146fad808SChris Wilson &reserved_base, &reserved_size);
4623774eb50SPaulo Zanoni else
463bdce2beaSChris Wilson bdw_get_stolen_reserved(i915, uncore,
46446fad808SChris Wilson &reserved_base, &reserved_size);
465542110f2SLucas De Marchi } else if (GRAPHICS_VER(i915) >= 7) {
466542110f2SLucas De Marchi if (IS_VALLEYVIEW(i915))
467542110f2SLucas De Marchi vlv_get_stolen_reserved(i915, uncore,
468542110f2SLucas De Marchi &reserved_base, &reserved_size);
469542110f2SLucas De Marchi else
470542110f2SLucas De Marchi gen7_get_stolen_reserved(i915, uncore,
471542110f2SLucas De Marchi &reserved_base, &reserved_size);
472542110f2SLucas De Marchi } else if (GRAPHICS_VER(i915) >= 6) {
473542110f2SLucas De Marchi gen6_get_stolen_reserved(i915, uncore,
474542110f2SLucas De Marchi &reserved_base, &reserved_size);
475542110f2SLucas De Marchi } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
476542110f2SLucas De Marchi g4x_get_stolen_reserved(i915, uncore,
477542110f2SLucas De Marchi &reserved_base, &reserved_size);
47840bae736SDaniel Vetter }
479c9cddffcSJesse Barnes
4803d99597cSLucas De Marchi /* No reserved stolen */
4813d99597cSLucas De Marchi if (reserved_base == stolen_top)
4823d99597cSLucas De Marchi goto bail_out;
4833d99597cSLucas De Marchi
484957d32feSChris Wilson if (!reserved_base) {
485baa89ba3SWambui Karuga drm_err(&i915->drm,
486baa89ba3SWambui Karuga "inconsistent reservation %pa + %pa; ignoring\n",
487957d32feSChris Wilson &reserved_base, &reserved_size);
4883d99597cSLucas De Marchi ret = -EINVAL;
4893d99597cSLucas De Marchi goto bail_out;
4903774eb50SPaulo Zanoni }
4913774eb50SPaulo Zanoni
492e5405178SJani Nikula i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size);
49317a05345SMatthew Auld
4941eca0778SJani Nikula if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) {
495baa89ba3SWambui Karuga drm_err(&i915->drm,
496baa89ba3SWambui Karuga "Stolen reserved area %pR outside stolen memory %pR\n",
4971eca0778SJani Nikula &i915->dsm.reserved, &i915->dsm.stolen);
4983d99597cSLucas De Marchi ret = -EINVAL;
4993d99597cSLucas De Marchi goto bail_out;
5003d99597cSLucas De Marchi }
5013d99597cSLucas De Marchi
5023d99597cSLucas De Marchi return 0;
5033d99597cSLucas De Marchi
5043d99597cSLucas De Marchi bail_out:
505e5405178SJani Nikula i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0);
5063d99597cSLucas De Marchi
5073d99597cSLucas De Marchi return ret;
5083d99597cSLucas De Marchi }
5093d99597cSLucas De Marchi
i915_gem_init_stolen(struct intel_memory_region * mem)5103d99597cSLucas De Marchi static int i915_gem_init_stolen(struct intel_memory_region *mem)
5113d99597cSLucas De Marchi {
5123d99597cSLucas De Marchi struct drm_i915_private *i915 = mem->i915;
5133d99597cSLucas De Marchi
5143d99597cSLucas De Marchi mutex_init(&i915->mm.stolen_lock);
5153d99597cSLucas De Marchi
5163d99597cSLucas De Marchi if (intel_vgpu_active(i915)) {
5173d99597cSLucas De Marchi drm_notice(&i915->drm,
5183d99597cSLucas De Marchi "%s, disabling use of stolen memory\n",
5193d99597cSLucas De Marchi "iGVT-g active");
520c40bd3b1SLucas De Marchi return -ENOSPC;
5213774eb50SPaulo Zanoni }
5223774eb50SPaulo Zanoni
5233d99597cSLucas De Marchi if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
5243d99597cSLucas De Marchi drm_notice(&i915->drm,
5253d99597cSLucas De Marchi "%s, disabling use of stolen memory\n",
5263d99597cSLucas De Marchi "DMAR active");
527c40bd3b1SLucas De Marchi return -ENOSPC;
5283d99597cSLucas De Marchi }
5293d99597cSLucas De Marchi
5303d99597cSLucas De Marchi if (adjust_stolen(i915, &mem->region))
531c40bd3b1SLucas De Marchi return -ENOSPC;
5323d99597cSLucas De Marchi
5333d99597cSLucas De Marchi if (request_smem_stolen(i915, &mem->region))
534c40bd3b1SLucas De Marchi return -ENOSPC;
5353d99597cSLucas De Marchi
5361eca0778SJani Nikula i915->dsm.stolen = mem->region;
5373d99597cSLucas De Marchi
5383d99597cSLucas De Marchi if (init_reserved_stolen(i915))
539c40bd3b1SLucas De Marchi return -ENOSPC;
5403d99597cSLucas De Marchi
5410ef42fb7SChris Wilson /* Exclude the reserved region from driver use */
5421eca0778SJani Nikula mem->region.end = i915->dsm.reserved.start - 1;
5433c0fa9f4SVille Syrjälä mem->io = DEFINE_RES_MEM(mem->io.start,
5443c0fa9f4SVille Syrjälä min(resource_size(&mem->io),
5453c0fa9f4SVille Syrjälä resource_size(&mem->region)));
5460ef42fb7SChris Wilson
5471eca0778SJani Nikula i915->dsm.usable_size = resource_size(&mem->region);
548b8986c88SJosé Roberto de Souza
549baa89ba3SWambui Karuga drm_dbg(&i915->drm,
550baa89ba3SWambui Karuga "Memory reserved for graphics device: %lluK, usable: %lluK\n",
5511eca0778SJani Nikula (u64)resource_size(&i915->dsm.stolen) >> 10,
5521eca0778SJani Nikula (u64)i915->dsm.usable_size >> 10);
553897f9ed0SDaniel Vetter
5541eca0778SJani Nikula if (i915->dsm.usable_size == 0)
555c40bd3b1SLucas De Marchi return -ENOSPC;
5563c6b29b2SPaulo Zanoni
5573c6b29b2SPaulo Zanoni /* Basic memrange allocator for stolen space. */
5581eca0778SJani Nikula drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
5599797fbfbSChris Wilson
560b90b044cSNirmoy Das /*
561b90b044cSNirmoy Das * Access to stolen lmem beyond certain size for MTL A0 stepping
562b90b044cSNirmoy Das * would crash the machine. Disable stolen lmem for userspace access
563b90b044cSNirmoy Das * by setting usable_size to zero.
564b90b044cSNirmoy Das */
565b90b044cSNirmoy Das if (IS_METEORLAKE(i915) && INTEL_REVID(i915) == 0x0)
566b90b044cSNirmoy Das i915->dsm.usable_size = 0;
567b90b044cSNirmoy Das
5689797fbfbSChris Wilson return 0;
5699797fbfbSChris Wilson }
5700104fdbbSChris Wilson
dbg_poison(struct i915_ggtt * ggtt,dma_addr_t addr,resource_size_t size,u8 x)571d7085b0fSChris Wilson static void dbg_poison(struct i915_ggtt *ggtt,
572d7085b0fSChris Wilson dma_addr_t addr, resource_size_t size,
573d7085b0fSChris Wilson u8 x)
574d7085b0fSChris Wilson {
575d7085b0fSChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
576d7085b0fSChris Wilson if (!drm_mm_node_allocated(&ggtt->error_capture))
577d7085b0fSChris Wilson return;
578d7085b0fSChris Wilson
579c071ab8cSChris Wilson if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
580c071ab8cSChris Wilson return; /* beware stop_machine() inversion */
581c071ab8cSChris Wilson
582d7085b0fSChris Wilson GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
583d7085b0fSChris Wilson
584d7085b0fSChris Wilson mutex_lock(&ggtt->error_mutex);
585d7085b0fSChris Wilson while (size) {
586d7085b0fSChris Wilson void __iomem *s;
587d7085b0fSChris Wilson
588d7085b0fSChris Wilson ggtt->vm.insert_page(&ggtt->vm, addr,
589d7085b0fSChris Wilson ggtt->error_capture.start,
5909275277dSFei Yang i915_gem_get_pat_index(ggtt->vm.i915,
5919275277dSFei Yang I915_CACHE_NONE),
5929275277dSFei Yang 0);
593d7085b0fSChris Wilson mb();
594d7085b0fSChris Wilson
595d7085b0fSChris Wilson s = io_mapping_map_wc(&ggtt->iomap,
596d7085b0fSChris Wilson ggtt->error_capture.start,
597d7085b0fSChris Wilson PAGE_SIZE);
598d7085b0fSChris Wilson memset_io(s, x, PAGE_SIZE);
599d7085b0fSChris Wilson io_mapping_unmap(s);
600d7085b0fSChris Wilson
601d7085b0fSChris Wilson addr += PAGE_SIZE;
602d7085b0fSChris Wilson size -= PAGE_SIZE;
603d7085b0fSChris Wilson }
604d7085b0fSChris Wilson mb();
605d7085b0fSChris Wilson ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
606d7085b0fSChris Wilson mutex_unlock(&ggtt->error_mutex);
607d7085b0fSChris Wilson #endif
608d7085b0fSChris Wilson }
609d7085b0fSChris Wilson
6100104fdbbSChris Wilson static struct sg_table *
i915_pages_create_for_stolen(struct drm_device * dev,resource_size_t offset,resource_size_t size)6110104fdbbSChris Wilson i915_pages_create_for_stolen(struct drm_device *dev,
612b7128ef1SMatthew Auld resource_size_t offset, resource_size_t size)
6130104fdbbSChris Wilson {
614bdce2beaSChris Wilson struct drm_i915_private *i915 = to_i915(dev);
6150104fdbbSChris Wilson struct sg_table *st;
6160104fdbbSChris Wilson struct scatterlist *sg;
6170104fdbbSChris Wilson
6181eca0778SJani Nikula GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen)));
6190104fdbbSChris Wilson
6200104fdbbSChris Wilson /* We hide that we have no struct page backing our stolen object
6210104fdbbSChris Wilson * by wrapping the contiguous physical allocation with a fake
6220104fdbbSChris Wilson * dma mapping in a single scatterlist.
6230104fdbbSChris Wilson */
6240104fdbbSChris Wilson
6250104fdbbSChris Wilson st = kmalloc(sizeof(*st), GFP_KERNEL);
6260104fdbbSChris Wilson if (st == NULL)
62743e157faSMatthew Auld return ERR_PTR(-ENOMEM);
6280104fdbbSChris Wilson
6290104fdbbSChris Wilson if (sg_alloc_table(st, 1, GFP_KERNEL)) {
6300104fdbbSChris Wilson kfree(st);
63143e157faSMatthew Auld return ERR_PTR(-ENOMEM);
6320104fdbbSChris Wilson }
6330104fdbbSChris Wilson
6340104fdbbSChris Wilson sg = st->sgl;
635ec14ba47SAkash Goel sg->offset = 0;
636ed23abddSImre Deak sg->length = size;
6370104fdbbSChris Wilson
6381eca0778SJani Nikula sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset;
6390104fdbbSChris Wilson sg_dma_len(sg) = size;
6400104fdbbSChris Wilson
6410104fdbbSChris Wilson return st;
6420104fdbbSChris Wilson }
6430104fdbbSChris Wilson
i915_gem_object_get_pages_stolen(struct drm_i915_gem_object * obj)644b91b09eeSMatthew Auld static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
6450104fdbbSChris Wilson {
6465c24c9d2SMichał Winiarski struct drm_i915_private *i915 = to_i915(obj->base.dev);
647b91b09eeSMatthew Auld struct sg_table *pages =
648b91b09eeSMatthew Auld i915_pages_create_for_stolen(obj->base.dev,
64903ac84f1SChris Wilson obj->stolen->start,
65003ac84f1SChris Wilson obj->stolen->size);
651b91b09eeSMatthew Auld if (IS_ERR(pages))
652b91b09eeSMatthew Auld return PTR_ERR(pages);
653b91b09eeSMatthew Auld
6545c24c9d2SMichał Winiarski dbg_poison(to_gt(i915)->ggtt,
655d7085b0fSChris Wilson sg_dma_address(pages->sgl),
656d7085b0fSChris Wilson sg_dma_len(pages->sgl),
657d7085b0fSChris Wilson POISON_INUSE);
658d7085b0fSChris Wilson
6598c949515SMatthew Auld __i915_gem_object_set_pages(obj, pages);
660b91b09eeSMatthew Auld
661b91b09eeSMatthew Auld return 0;
6620104fdbbSChris Wilson }
6630104fdbbSChris Wilson
i915_gem_object_put_pages_stolen(struct drm_i915_gem_object * obj,struct sg_table * pages)66403ac84f1SChris Wilson static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
66503ac84f1SChris Wilson struct sg_table *pages)
6660104fdbbSChris Wilson {
6675c24c9d2SMichał Winiarski struct drm_i915_private *i915 = to_i915(obj->base.dev);
6686288c79eSChris Wilson /* Should only be called from i915_gem_object_release_stolen() */
669d7085b0fSChris Wilson
6705c24c9d2SMichał Winiarski dbg_poison(to_gt(i915)->ggtt,
671d7085b0fSChris Wilson sg_dma_address(pages->sgl),
672d7085b0fSChris Wilson sg_dma_len(pages->sgl),
673d7085b0fSChris Wilson POISON_FREE);
674d7085b0fSChris Wilson
67503ac84f1SChris Wilson sg_free_table(pages);
67603ac84f1SChris Wilson kfree(pages);
6770104fdbbSChris Wilson }
6780104fdbbSChris Wilson
679ef0cf27cSChris Wilson static void
i915_gem_object_release_stolen(struct drm_i915_gem_object * obj)680ef0cf27cSChris Wilson i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
681ef0cf27cSChris Wilson {
682bdce2beaSChris Wilson struct drm_i915_private *i915 = to_i915(obj->base.dev);
6836288c79eSChris Wilson struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
6846288c79eSChris Wilson
6856288c79eSChris Wilson GEM_BUG_ON(!stolen);
686bdce2beaSChris Wilson i915_gem_stolen_remove_node(i915, stolen);
6876288c79eSChris Wilson kfree(stolen);
688f7073fb9SChris Wilson
689f7073fb9SChris Wilson i915_gem_object_release_memory_region(obj);
690ef0cf27cSChris Wilson }
6916288c79eSChris Wilson
6920104fdbbSChris Wilson static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
6937d192daaSChris Wilson .name = "i915_gem_object_stolen",
6940104fdbbSChris Wilson .get_pages = i915_gem_object_get_pages_stolen,
6950104fdbbSChris Wilson .put_pages = i915_gem_object_put_pages_stolen,
696ef0cf27cSChris Wilson .release = i915_gem_object_release_stolen,
6970104fdbbSChris Wilson };
6980104fdbbSChris Wilson
__i915_gem_object_create_stolen(struct intel_memory_region * mem,struct drm_i915_gem_object * obj,struct drm_mm_node * stolen)6990bd08049SMatthew Auld static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
70097d55396SMatthew Auld struct drm_i915_gem_object *obj,
7011e507872SChris Wilson struct drm_mm_node *stolen)
7020104fdbbSChris Wilson {
7037867d709SChris Wilson static struct lock_class_key lock_class;
704b8f55be6SChris Wilson unsigned int cache_level;
705ae7f0dc1SMatthew Auld unsigned int flags;
70697d55396SMatthew Auld int err;
7070104fdbbSChris Wilson
708ae7f0dc1SMatthew Auld /*
709ae7f0dc1SMatthew Auld * Stolen objects are always physically contiguous since we just
710ae7f0dc1SMatthew Auld * allocate one big block underneath using the drm_mm range allocator.
711ae7f0dc1SMatthew Auld */
712ae7f0dc1SMatthew Auld flags = I915_BO_ALLOC_CONTIGUOUS;
713ae7f0dc1SMatthew Auld
7141e507872SChris Wilson drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
715ae7f0dc1SMatthew Auld i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
7160104fdbbSChris Wilson
7170104fdbbSChris Wilson obj->stolen = stolen;
718c0a51fd0SChristian König obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
7191e507872SChris Wilson cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
720b8f55be6SChris Wilson i915_gem_object_set_cache_coherency(obj, cache_level);
7210104fdbbSChris Wilson
722d8be1357SMaarten Lankhorst if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
723a3258dbdSThomas Hellström return -EBUSY;
724a3258dbdSThomas Hellström
725c471748dSMaarten Lankhorst i915_gem_object_init_memory_region(obj, mem);
72600d6dc3dSMatthew Auld
72700d6dc3dSMatthew Auld err = i915_gem_object_pin_pages(obj);
72800d6dc3dSMatthew Auld if (err)
72900d6dc3dSMatthew Auld i915_gem_object_release_memory_region(obj);
730a3258dbdSThomas Hellström i915_gem_object_unlock(obj);
73172405c3dSMatthew Auld
732a3258dbdSThomas Hellström return err;
7330104fdbbSChris Wilson }
7340104fdbbSChris Wilson
_i915_gem_object_stolen_init(struct intel_memory_region * mem,struct drm_i915_gem_object * obj,resource_size_t offset,resource_size_t size,resource_size_t page_size,unsigned int flags)7350bd08049SMatthew Auld static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
73697d55396SMatthew Auld struct drm_i915_gem_object *obj,
7379b78b5daSMatthew Auld resource_size_t offset,
73872405c3dSMatthew Auld resource_size_t size,
739d22632c8SMatthew Auld resource_size_t page_size,
74072405c3dSMatthew Auld unsigned int flags)
7410104fdbbSChris Wilson {
742bdce2beaSChris Wilson struct drm_i915_private *i915 = mem->i915;
7430104fdbbSChris Wilson struct drm_mm_node *stolen;
74406e78edfSDavid Herrmann int ret;
7450104fdbbSChris Wilson
746bdce2beaSChris Wilson if (!drm_mm_initialized(&i915->mm.stolen))
74797d55396SMatthew Auld return -ENODEV;
7480104fdbbSChris Wilson
7490104fdbbSChris Wilson if (size == 0)
75097d55396SMatthew Auld return -EINVAL;
7510104fdbbSChris Wilson
752d511d013SMatthew Auld /*
753d511d013SMatthew Auld * With discrete devices, where we lack a mappable aperture there is no
754d511d013SMatthew Auld * possible way to ever access this memory on the CPU side.
755d511d013SMatthew Auld */
7563c0fa9f4SVille Syrjälä if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
757d511d013SMatthew Auld !(flags & I915_BO_ALLOC_GPU_ONLY))
758d511d013SMatthew Auld return -ENOSPC;
759d511d013SMatthew Auld
76006e78edfSDavid Herrmann stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
76106e78edfSDavid Herrmann if (!stolen)
76297d55396SMatthew Auld return -ENOMEM;
7630104fdbbSChris Wilson
7649b78b5daSMatthew Auld if (offset != I915_BO_INVALID_OFFSET) {
7659b78b5daSMatthew Auld drm_dbg(&i915->drm,
7669b78b5daSMatthew Auld "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
7679b78b5daSMatthew Auld &offset, &size);
7689b78b5daSMatthew Auld
7699b78b5daSMatthew Auld stolen->start = offset;
7709b78b5daSMatthew Auld stolen->size = size;
7719b78b5daSMatthew Auld mutex_lock(&i915->mm.stolen_lock);
7729b78b5daSMatthew Auld ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
7739b78b5daSMatthew Auld mutex_unlock(&i915->mm.stolen_lock);
7749b78b5daSMatthew Auld } else {
775f9a7b01eSCQ Tang ret = i915_gem_stolen_insert_node(i915, stolen, size,
776f9a7b01eSCQ Tang mem->min_page_size);
7779b78b5daSMatthew Auld }
77897d55396SMatthew Auld if (ret)
7790e5493caSCQ Tang goto err_free;
78006e78edfSDavid Herrmann
78197d55396SMatthew Auld ret = __i915_gem_object_create_stolen(mem, obj, stolen);
78297d55396SMatthew Auld if (ret)
7830e5493caSCQ Tang goto err_remove;
7840e5493caSCQ Tang
78597d55396SMatthew Auld return 0;
7860104fdbbSChris Wilson
7870e5493caSCQ Tang err_remove:
788bdce2beaSChris Wilson i915_gem_stolen_remove_node(i915, stolen);
7890e5493caSCQ Tang err_free:
79006e78edfSDavid Herrmann kfree(stolen);
79197d55396SMatthew Auld return ret;
7920104fdbbSChris Wilson }
7930104fdbbSChris Wilson
794866d12b4SChris Wilson struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private * i915,resource_size_t size)795bdce2beaSChris Wilson i915_gem_object_create_stolen(struct drm_i915_private *i915,
79672405c3dSMatthew Auld resource_size_t size)
79772405c3dSMatthew Auld {
798d22632c8SMatthew Auld return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
79972405c3dSMatthew Auld }
80072405c3dSMatthew Auld
init_stolen_smem(struct intel_memory_region * mem)801d57d4a1dSCQ Tang static int init_stolen_smem(struct intel_memory_region *mem)
80272405c3dSMatthew Auld {
803c40bd3b1SLucas De Marchi int err;
804c40bd3b1SLucas De Marchi
80572405c3dSMatthew Auld /*
80672405c3dSMatthew Auld * Initialise stolen early so that we may reserve preallocated
80772405c3dSMatthew Auld * objects for the BIOS to KMS transition.
80872405c3dSMatthew Auld */
809c40bd3b1SLucas De Marchi err = i915_gem_init_stolen(mem);
810c40bd3b1SLucas De Marchi if (err)
811c40bd3b1SLucas De Marchi drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
812c40bd3b1SLucas De Marchi
813c40bd3b1SLucas De Marchi return 0;
81472405c3dSMatthew Auld }
81572405c3dSMatthew Auld
release_stolen_smem(struct intel_memory_region * mem)8168b1f7f92SThomas Hellström static int release_stolen_smem(struct intel_memory_region *mem)
81772405c3dSMatthew Auld {
81872405c3dSMatthew Auld i915_gem_cleanup_stolen(mem->i915);
8198b1f7f92SThomas Hellström return 0;
82072405c3dSMatthew Auld }
82172405c3dSMatthew Auld
822d57d4a1dSCQ Tang static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
823d57d4a1dSCQ Tang .init = init_stolen_smem,
824d57d4a1dSCQ Tang .release = release_stolen_smem,
82597d55396SMatthew Auld .init_object = _i915_gem_object_stolen_init,
82672405c3dSMatthew Auld };
82772405c3dSMatthew Auld
init_stolen_lmem(struct intel_memory_region * mem)828d57d4a1dSCQ Tang static int init_stolen_lmem(struct intel_memory_region *mem)
82972405c3dSMatthew Auld {
830d57d4a1dSCQ Tang int err;
831d57d4a1dSCQ Tang
832d57d4a1dSCQ Tang if (GEM_WARN_ON(resource_size(&mem->region) == 0))
833c40bd3b1SLucas De Marchi return 0;
834d57d4a1dSCQ Tang
835d57d4a1dSCQ Tang err = i915_gem_init_stolen(mem);
836c40bd3b1SLucas De Marchi if (err) {
837c40bd3b1SLucas De Marchi drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
838c40bd3b1SLucas De Marchi return 0;
839b8ca8fefSAkeem G Abodunrin }
840d57d4a1dSCQ Tang
8413c0fa9f4SVille Syrjälä if (resource_size(&mem->io) &&
8423c0fa9f4SVille Syrjälä !io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
843c40bd3b1SLucas De Marchi goto err_cleanup;
844c40bd3b1SLucas De Marchi
845d57d4a1dSCQ Tang return 0;
846d57d4a1dSCQ Tang
847b8ca8fefSAkeem G Abodunrin err_cleanup:
848b8ca8fefSAkeem G Abodunrin i915_gem_cleanup_stolen(mem->i915);
849d57d4a1dSCQ Tang return err;
850d57d4a1dSCQ Tang }
851d57d4a1dSCQ Tang
release_stolen_lmem(struct intel_memory_region * mem)8528b1f7f92SThomas Hellström static int release_stolen_lmem(struct intel_memory_region *mem)
853d57d4a1dSCQ Tang {
8543c0fa9f4SVille Syrjälä if (resource_size(&mem->io))
855d57d4a1dSCQ Tang io_mapping_fini(&mem->iomap);
856d57d4a1dSCQ Tang i915_gem_cleanup_stolen(mem->i915);
8578b1f7f92SThomas Hellström return 0;
858d57d4a1dSCQ Tang }
859d57d4a1dSCQ Tang
860d57d4a1dSCQ Tang static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
861d57d4a1dSCQ Tang .init = init_stolen_lmem,
862d57d4a1dSCQ Tang .release = release_stolen_lmem,
863d57d4a1dSCQ Tang .init_object = _i915_gem_object_stolen_init,
864d57d4a1dSCQ Tang };
865d57d4a1dSCQ Tang
mtl_get_gms_size(struct intel_uncore * uncore)866dbb2ffbfSAravind Iddamsetty static int mtl_get_gms_size(struct intel_uncore *uncore)
867dbb2ffbfSAravind Iddamsetty {
868dbb2ffbfSAravind Iddamsetty u16 ggc, gms;
869dbb2ffbfSAravind Iddamsetty
870dbb2ffbfSAravind Iddamsetty ggc = intel_uncore_read16(uncore, GGC);
871dbb2ffbfSAravind Iddamsetty
872dbb2ffbfSAravind Iddamsetty /* check GGMS, should be fixed 0x3 (8MB) */
873dbb2ffbfSAravind Iddamsetty if ((ggc & GGMS_MASK) != GGMS_MASK)
874dbb2ffbfSAravind Iddamsetty return -EIO;
875dbb2ffbfSAravind Iddamsetty
876dbb2ffbfSAravind Iddamsetty /* return valid GMS value, -EIO if invalid */
877dbb2ffbfSAravind Iddamsetty gms = REG_FIELD_GET(GMS_MASK, ggc);
878dbb2ffbfSAravind Iddamsetty switch (gms) {
879dbb2ffbfSAravind Iddamsetty case 0x0 ... 0x04:
880dbb2ffbfSAravind Iddamsetty return gms * 32;
881dbb2ffbfSAravind Iddamsetty case 0xf0 ... 0xfe:
882dbb2ffbfSAravind Iddamsetty return (gms - 0xf0 + 1) * 4;
883dbb2ffbfSAravind Iddamsetty default:
884dbb2ffbfSAravind Iddamsetty MISSING_CASE(gms);
885dbb2ffbfSAravind Iddamsetty return -EIO;
886dbb2ffbfSAravind Iddamsetty }
887dbb2ffbfSAravind Iddamsetty }
888dbb2ffbfSAravind Iddamsetty
889d57d4a1dSCQ Tang struct intel_memory_region *
i915_gem_stolen_lmem_setup(struct drm_i915_private * i915,u16 type,u16 instance)890d1487389SThomas Hellström i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
891d1487389SThomas Hellström u16 instance)
892d57d4a1dSCQ Tang {
893d57d4a1dSCQ Tang struct intel_uncore *uncore = &i915->uncore;
894ff2d0ba2SThomas Zimmermann struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
895b8ca8fefSAkeem G Abodunrin resource_size_t dsm_size, dsm_base, lmem_size;
896d57d4a1dSCQ Tang struct intel_memory_region *mem;
897b8ca8fefSAkeem G Abodunrin resource_size_t io_start, io_size;
898ca921624SMatthew Auld resource_size_t min_page_size;
899dbb2ffbfSAravind Iddamsetty int ret;
900d57d4a1dSCQ Tang
901b8ca8fefSAkeem G Abodunrin if (WARN_ON_ONCE(instance))
902d57d4a1dSCQ Tang return ERR_PTR(-ENODEV);
903d57d4a1dSCQ Tang
9041bba7323SPiotr Piórkowski if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
9051bba7323SPiotr Piórkowski return ERR_PTR(-ENXIO);
9061bba7323SPiotr Piórkowski
90703eababbSVille Syrjälä if (HAS_LMEMBAR_SMEM_STOLEN(i915) || IS_DG1(i915)) {
9086bba2b30SPiotr Piórkowski lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
909b8ca8fefSAkeem G Abodunrin } else {
910b8ca8fefSAkeem G Abodunrin resource_size_t lmem_range;
911b8ca8fefSAkeem G Abodunrin
912306f7a5bSAndi Shyti lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
9137d809707SMatt Roper lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
914b8ca8fefSAkeem G Abodunrin lmem_size *= SZ_1G;
915b8ca8fefSAkeem G Abodunrin }
916b8ca8fefSAkeem G Abodunrin
91703eababbSVille Syrjälä if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
918dbb2ffbfSAravind Iddamsetty /*
919dbb2ffbfSAravind Iddamsetty * MTL dsm size is in GGC register.
9208d8d062bSDaniele Ceraolo Spurio * Also MTL uses offset to GSMBASE in ptes, so i915
9218d8d062bSDaniele Ceraolo Spurio * uses dsm_base = 8MBs to setup stolen region, since
9228d8d062bSDaniele Ceraolo Spurio * DSMBASE = GSMBASE + 8MB.
923dbb2ffbfSAravind Iddamsetty */
924dbb2ffbfSAravind Iddamsetty ret = mtl_get_gms_size(uncore);
925dbb2ffbfSAravind Iddamsetty if (ret < 0) {
926dbb2ffbfSAravind Iddamsetty drm_err(&i915->drm, "invalid MTL GGC register setting\n");
927dbb2ffbfSAravind Iddamsetty return ERR_PTR(ret);
928dbb2ffbfSAravind Iddamsetty }
929dbb2ffbfSAravind Iddamsetty
9308d8d062bSDaniele Ceraolo Spurio dsm_base = SZ_8M;
931dbb2ffbfSAravind Iddamsetty dsm_size = (resource_size_t)(ret * SZ_1M);
932dbb2ffbfSAravind Iddamsetty
93397acb6a8STvrtko Ursulin GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
9348d8d062bSDaniele Ceraolo Spurio GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
935dbb2ffbfSAravind Iddamsetty } else {
936dbb2ffbfSAravind Iddamsetty /* Use DSM base address instead for stolen memory */
937f8ae1d52SVille Syrjälä dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
93805da7d9fSJonathan Cavitt if (lmem_size < dsm_base) {
93905da7d9fSJonathan Cavitt drm_dbg(&i915->drm,
940e22103b9SAndi Shyti "Disabling stolen memory support due to OOB placement: lmem_size = %pa vs dsm_base = %pa\n",
941e22103b9SAndi Shyti &lmem_size, &dsm_base);
9423bece98bSAndi Shyti return NULL;
94305da7d9fSJonathan Cavitt }
94401f0411fSNirmoy Das dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
945dbb2ffbfSAravind Iddamsetty }
946dbb2ffbfSAravind Iddamsetty
947c08c3641SVille Syrjälä if (i915_direct_stolen_access(i915)) {
948c08c3641SVille Syrjälä drm_dbg(&i915->drm, "Using direct DSM access\n");
949f8ae1d52SVille Syrjälä io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
950c08c3641SVille Syrjälä io_size = dsm_size;
951c08c3641SVille Syrjälä } else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
952b8ca8fefSAkeem G Abodunrin io_start = 0;
953b8ca8fefSAkeem G Abodunrin io_size = 0;
954b8ca8fefSAkeem G Abodunrin } else {
9556bba2b30SPiotr Piórkowski io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
9568d8d062bSDaniele Ceraolo Spurio io_size = dsm_size;
957b8ca8fefSAkeem G Abodunrin }
958d57d4a1dSCQ Tang
959ca921624SMatthew Auld min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
960ca921624SMatthew Auld I915_GTT_PAGE_SIZE_4K;
961ca921624SMatthew Auld
962b8ca8fefSAkeem G Abodunrin mem = intel_memory_region_create(i915, dsm_base, dsm_size,
963235582caSMatthew Auld min_page_size,
964b8ca8fefSAkeem G Abodunrin io_start, io_size,
965d1487389SThomas Hellström type, instance,
966d57d4a1dSCQ Tang &i915_region_stolen_lmem_ops);
967d57d4a1dSCQ Tang if (IS_ERR(mem))
968d57d4a1dSCQ Tang return mem;
969d57d4a1dSCQ Tang
970d57d4a1dSCQ Tang intel_memory_region_set_name(mem, "stolen-local");
971d57d4a1dSCQ Tang
97236150bbaSMatthew Auld mem->private = true;
97336150bbaSMatthew Auld
974d57d4a1dSCQ Tang return mem;
975d57d4a1dSCQ Tang }
976d57d4a1dSCQ Tang
977d57d4a1dSCQ Tang struct intel_memory_region*
i915_gem_stolen_smem_setup(struct drm_i915_private * i915,u16 type,u16 instance)978d1487389SThomas Hellström i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
979d1487389SThomas Hellström u16 instance)
980d57d4a1dSCQ Tang {
981d57d4a1dSCQ Tang struct intel_memory_region *mem;
982d57d4a1dSCQ Tang
983d57d4a1dSCQ Tang mem = intel_memory_region_create(i915,
98472405c3dSMatthew Auld intel_graphics_stolen_res.start,
98572405c3dSMatthew Auld resource_size(&intel_graphics_stolen_res),
986235582caSMatthew Auld PAGE_SIZE, 0, 0, type, instance,
987d57d4a1dSCQ Tang &i915_region_stolen_smem_ops);
988d57d4a1dSCQ Tang if (IS_ERR(mem))
989d57d4a1dSCQ Tang return mem;
990d57d4a1dSCQ Tang
991d57d4a1dSCQ Tang intel_memory_region_set_name(mem, "stolen-system");
992d57d4a1dSCQ Tang
99336150bbaSMatthew Auld mem->private = true;
994c40bd3b1SLucas De Marchi
995d57d4a1dSCQ Tang return mem;
99672405c3dSMatthew Auld }
99772405c3dSMatthew Auld
i915_gem_object_is_stolen(const struct drm_i915_gem_object * obj)99841a9c75dSChris Wilson bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
99941a9c75dSChris Wilson {
100041a9c75dSChris Wilson return obj->ops == &i915_gem_object_stolen_ops;
100141a9c75dSChris Wilson }
10021d47074cSJouni Högander
i915_gem_stolen_initialized(const struct drm_i915_private * i915)10031d47074cSJouni Högander bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
10041d47074cSJouni Högander {
10051d47074cSJouni Högander return drm_mm_initialized(&i915->mm.stolen);
10061d47074cSJouni Högander }
10071d47074cSJouni Högander
i915_gem_stolen_area_address(const struct drm_i915_private * i915)10081d47074cSJouni Högander u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
10091d47074cSJouni Högander {
10101d47074cSJouni Högander return i915->dsm.stolen.start;
10111d47074cSJouni Högander }
10121d47074cSJouni Högander
i915_gem_stolen_area_size(const struct drm_i915_private * i915)10131d47074cSJouni Högander u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
10141d47074cSJouni Högander {
10151d47074cSJouni Högander return resource_size(&i915->dsm.stolen);
10161d47074cSJouni Högander }
10171d47074cSJouni Högander
i915_gem_stolen_node_address(const struct drm_i915_private * i915,const struct drm_mm_node * node)10181d47074cSJouni Högander u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
10191d47074cSJouni Högander const struct drm_mm_node *node)
10201d47074cSJouni Högander {
10211d47074cSJouni Högander return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
10221d47074cSJouni Högander }
10231d47074cSJouni Högander
i915_gem_stolen_node_allocated(const struct drm_mm_node * node)10241d47074cSJouni Högander bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
10251d47074cSJouni Högander {
10261d47074cSJouni Högander return drm_mm_node_allocated(node);
10271d47074cSJouni Högander }
10281d47074cSJouni Högander
i915_gem_stolen_node_offset(const struct drm_mm_node * node)10291d47074cSJouni Högander u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
10301d47074cSJouni Högander {
10311d47074cSJouni Högander return node->start;
10321d47074cSJouni Högander }
10331d47074cSJouni Högander
i915_gem_stolen_node_size(const struct drm_mm_node * node)10341d47074cSJouni Högander u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
10351d47074cSJouni Högander {
10361d47074cSJouni Högander return node->size;
10371d47074cSJouni Högander }
1038