1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022-2023 Intel Corporation
4  */
5 
6 #ifndef _XE_DEVICE_TYPES_H_
7 #define _XE_DEVICE_TYPES_H_
8 
9 #include <linux/pci.h>
10 
11 #include <drm/drm_device.h>
12 #include <drm/drm_file.h>
13 #include <drm/ttm/ttm_device.h>
14 
15 #include "xe_devcoredump_types.h"
16 #include "xe_heci_gsc.h"
17 #include "xe_gt_types.h"
18 #include "xe_lmtt_types.h"
19 #include "xe_platform_types.h"
20 #include "xe_pt_types.h"
21 #include "xe_sriov_types.h"
22 #include "xe_step_types.h"
23 
24 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
25 #include "soc/intel_pch.h"
26 #include "intel_display_core.h"
27 #include "intel_display_device.h"
28 #endif
29 
30 struct xe_ggtt;
31 struct xe_pat_ops;
32 
33 #define XE_BO_INVALID_OFFSET	LONG_MAX
34 
35 #define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100)
36 #define MEDIA_VER(xe) ((xe)->info.media_verx100 / 100)
37 #define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100)
38 #define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
39 #define IS_DGFX(xe) ((xe)->info.is_dgfx)
40 #define HAS_HECI_GSCFI(xe) ((xe)->info.has_heci_gscfi)
41 
42 #define XE_VRAM_FLAGS_NEED64K		BIT(0)
43 
44 #define XE_GT0		0
45 #define XE_GT1		1
46 #define XE_MAX_TILES_PER_DEVICE	(XE_GT1 + 1)
47 
48 #define XE_MAX_ASID	(BIT(20))
49 
50 #define IS_PLATFORM_STEP(_xe, _platform, min_step, max_step)	\
51 	((_xe)->info.platform == (_platform) &&			\
52 	 (_xe)->info.step.graphics >= (min_step) &&		\
53 	 (_xe)->info.step.graphics < (max_step))
54 #define IS_SUBPLATFORM_STEP(_xe, _platform, sub, min_step, max_step)	\
55 	((_xe)->info.platform == (_platform) &&				\
56 	 (_xe)->info.subplatform == (sub) &&				\
57 	 (_xe)->info.step.graphics >= (min_step) &&			\
58 	 (_xe)->info.step.graphics < (max_step))
59 
60 #define tile_to_xe(tile__)								\
61 	_Generic(tile__,								\
62 		 const struct xe_tile * : (const struct xe_device *)((tile__)->xe),	\
63 		 struct xe_tile * : (tile__)->xe)
64 
65 /**
66  * struct xe_mem_region - memory region structure
67  * This is used to describe a memory region in xe
68  * device, such as HBM memory or CXL extension memory.
69  */
70 struct xe_mem_region {
71 	/** @io_start: IO start address of this VRAM instance */
72 	resource_size_t io_start;
73 	/**
74 	 * @io_size: IO size of this VRAM instance
75 	 *
76 	 * This represents how much of this VRAM we can access
77 	 * via the CPU through the VRAM BAR. This can be smaller
78 	 * than @usable_size, in which case only part of VRAM is CPU
79 	 * accessible (typically the first 256M). This
80 	 * configuration is known as small-bar.
81 	 */
82 	resource_size_t io_size;
83 	/** @dpa_base: This memory regions's DPA (device physical address) base */
84 	resource_size_t dpa_base;
85 	/**
86 	 * @usable_size: usable size of VRAM
87 	 *
88 	 * Usable size of VRAM excluding reserved portions
89 	 * (e.g stolen mem)
90 	 */
91 	resource_size_t usable_size;
92 	/**
93 	 * @actual_physical_size: Actual VRAM size
94 	 *
95 	 * Actual VRAM size including reserved portions
96 	 * (e.g stolen mem)
97 	 */
98 	resource_size_t actual_physical_size;
99 	/** @mapping: pointer to VRAM mappable space */
100 	void __iomem *mapping;
101 };
102 
103 /**
104  * struct xe_tile - hardware tile structure
105  *
106  * From a driver perspective, a "tile" is effectively a complete GPU, containing
107  * an SGunit, 1-2 GTs, and (for discrete platforms) VRAM.
108  *
109  * Multi-tile platforms effectively bundle multiple GPUs behind a single PCI
110  * device and designate one "root" tile as being responsible for external PCI
111  * communication.  PCI BAR0 exposes the GGTT and MMIO register space for each
112  * tile in a stacked layout, and PCI BAR2 exposes the local memory associated
113  * with each tile similarly.  Device-wide interrupts can be enabled/disabled
114  * at the root tile, and the MSTR_TILE_INTR register will report which tiles
115  * have interrupts that need servicing.
116  */
117 struct xe_tile {
118 	/** @xe: Backpointer to tile's PCI device */
119 	struct xe_device *xe;
120 
121 	/** @id: ID of the tile */
122 	u8 id;
123 
124 	/**
125 	 * @primary_gt: Primary GT
126 	 */
127 	struct xe_gt *primary_gt;
128 
129 	/**
130 	 * @media_gt: Media GT
131 	 *
132 	 * Only present on devices with media version >= 13.
133 	 */
134 	struct xe_gt *media_gt;
135 
136 	/**
137 	 * @mmio: MMIO info for a tile.
138 	 *
139 	 * Each tile has its own 16MB space in BAR0, laid out as:
140 	 * * 0-4MB: registers
141 	 * * 4MB-8MB: reserved
142 	 * * 8MB-16MB: global GTT
143 	 */
144 	struct {
145 		/** @size: size of tile's MMIO space */
146 		size_t size;
147 
148 		/** @regs: pointer to tile's MMIO space (starting with registers) */
149 		void __iomem *regs;
150 	} mmio;
151 
152 	/**
153 	 * @mmio_ext: MMIO-extension info for a tile.
154 	 *
155 	 * Each tile has its own additional 256MB (28-bit) MMIO-extension space.
156 	 */
157 	struct {
158 		/** @size: size of tile's additional MMIO-extension space */
159 		size_t size;
160 
161 		/** @regs: pointer to tile's additional MMIO-extension space */
162 		void __iomem *regs;
163 	} mmio_ext;
164 
165 	/** @mem: memory management info for tile */
166 	struct {
167 		/**
168 		 * @vram: VRAM info for tile.
169 		 *
170 		 * Although VRAM is associated with a specific tile, it can
171 		 * still be accessed by all tiles' GTs.
172 		 */
173 		struct xe_mem_region vram;
174 
175 		/** @vram_mgr: VRAM TTM manager */
176 		struct xe_ttm_vram_mgr *vram_mgr;
177 
178 		/** @ggtt: Global graphics translation table */
179 		struct xe_ggtt *ggtt;
180 
181 		/**
182 		 * @kernel_bb_pool: Pool from which batchbuffers are allocated.
183 		 *
184 		 * Media GT shares a pool with its primary GT.
185 		 */
186 		struct xe_sa_manager *kernel_bb_pool;
187 	} mem;
188 
189 	/** @sriov: tile level virtualization data */
190 	union {
191 		struct {
192 			/** @sriov.pf.lmtt: Local Memory Translation Table. */
193 			struct xe_lmtt lmtt;
194 		} pf;
195 	} sriov;
196 
197 	/** @migrate: Migration helper for vram blits and clearing */
198 	struct xe_migrate *migrate;
199 
200 	/** @sysfs: sysfs' kobj used by xe_tile_sysfs */
201 	struct kobject *sysfs;
202 };
203 
204 /**
205  * struct xe_device - Top level struct of XE device
206  */
207 struct xe_device {
208 	/** @drm: drm device */
209 	struct drm_device drm;
210 
211 	/** @devcoredump: device coredump */
212 	struct xe_devcoredump devcoredump;
213 
214 	/** @info: device info */
215 	struct intel_device_info {
216 		/** @graphics_name: graphics IP name */
217 		const char *graphics_name;
218 		/** @media_name: media IP name */
219 		const char *media_name;
220 		/** @tile_mmio_ext_size: size of MMIO extension space, per-tile */
221 		u32 tile_mmio_ext_size;
222 		/** @graphics_verx100: graphics IP version */
223 		u32 graphics_verx100;
224 		/** @media_verx100: media IP version */
225 		u32 media_verx100;
226 		/** @mem_region_mask: mask of valid memory regions */
227 		u32 mem_region_mask;
228 		/** @platform: XE platform enum */
229 		enum xe_platform platform;
230 		/** @subplatform: XE subplatform enum */
231 		enum xe_subplatform subplatform;
232 		/** @devid: device ID */
233 		u16 devid;
234 		/** @revid: device revision */
235 		u8 revid;
236 		/** @step: stepping information for each IP */
237 		struct xe_step_info step;
238 		/** @dma_mask_size: DMA address bits */
239 		u8 dma_mask_size;
240 		/** @vram_flags: Vram flags */
241 		u8 vram_flags;
242 		/** @tile_count: Number of tiles */
243 		u8 tile_count;
244 		/** @gt_count: Total number of GTs for entire device */
245 		u8 gt_count;
246 		/** @vm_max_level: Max VM level */
247 		u8 vm_max_level;
248 		/** @va_bits: Maximum bits of a virtual address */
249 		u8 va_bits;
250 
251 		/** @is_dgfx: is discrete device */
252 		u8 is_dgfx:1;
253 		/** @has_asid: Has address space ID */
254 		u8 has_asid:1;
255 		/** @force_execlist: Forced execlist submission */
256 		u8 force_execlist:1;
257 		/** @has_flat_ccs: Whether flat CCS metadata is used */
258 		u8 has_flat_ccs:1;
259 		/** @has_llc: Device has a shared CPU+GPU last level cache */
260 		u8 has_llc:1;
261 		/** @has_mmio_ext: Device has extra MMIO address range */
262 		u8 has_mmio_ext:1;
263 		/** @has_range_tlb_invalidation: Has range based TLB invalidations */
264 		u8 has_range_tlb_invalidation:1;
265 		/** @has_sriov: Supports SR-IOV */
266 		u8 has_sriov:1;
267 		/** @has_usm: Device has unified shared memory support */
268 		u8 has_usm:1;
269 		/** @enable_display: display enabled */
270 		u8 enable_display:1;
271 		/** @skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
272 		u8 skip_mtcfg:1;
273 		/** @skip_pcode: skip access to PCODE uC */
274 		u8 skip_pcode:1;
275 		/** @has_heci_gscfi: device has heci gscfi */
276 		u8 has_heci_gscfi:1;
277 		/** @skip_guc_pc: Skip GuC based PM feature init */
278 		u8 skip_guc_pc:1;
279 
280 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
281 		struct {
282 			u32 rawclk_freq;
283 		} i915_runtime;
284 #endif
285 	} info;
286 
287 	/** @irq: device interrupt state */
288 	struct {
289 		/** @lock: lock for processing irq's on this device */
290 		spinlock_t lock;
291 
292 		/** @enabled: interrupts enabled on this device */
293 		bool enabled;
294 	} irq;
295 
296 	/** @ttm: ttm device */
297 	struct ttm_device ttm;
298 
299 	/** @mmio: mmio info for device */
300 	struct {
301 		/** @size: size of MMIO space for device */
302 		size_t size;
303 		/** @regs: pointer to MMIO space for device */
304 		void __iomem *regs;
305 	} mmio;
306 
307 	/** @mem: memory info for device */
308 	struct {
309 		/** @vram: VRAM info for device */
310 		struct xe_mem_region vram;
311 		/** @sys_mgr: system TTM manager */
312 		struct ttm_resource_manager sys_mgr;
313 	} mem;
314 
315 	/** @sriov: device level virtualization data */
316 	struct {
317 		/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
318 		enum xe_sriov_mode __mode;
319 	} sriov;
320 
321 	/** @clients: drm clients info */
322 	struct {
323 		/** @lock: Protects drm clients info */
324 		spinlock_t lock;
325 
326 		/** @count: number of drm clients */
327 		u64 count;
328 	} clients;
329 
330 	/** @usm: unified memory state */
331 	struct {
332 		/** @asid: convert a ASID to VM */
333 		struct xarray asid_to_vm;
334 		/** @next_asid: next ASID, used to cyclical alloc asids */
335 		u32 next_asid;
336 		/** @num_vm_in_fault_mode: number of VM in fault mode */
337 		u32 num_vm_in_fault_mode;
338 		/** @num_vm_in_non_fault_mode: number of VM in non-fault mode */
339 		u32 num_vm_in_non_fault_mode;
340 		/** @lock: protects UM state */
341 		struct mutex lock;
342 	} usm;
343 
344 	/** @pinned: pinned BO state */
345 	struct {
346 		/** @lock: protected pinned BO list state */
347 		spinlock_t lock;
348 		/** @evicted: pinned kernel BO that are present */
349 		struct list_head kernel_bo_present;
350 		/** @evicted: pinned BO that have been evicted */
351 		struct list_head evicted;
352 		/** @external_vram: pinned external BO in vram*/
353 		struct list_head external_vram;
354 	} pinned;
355 
356 	/** @ufence_wq: user fence wait queue */
357 	wait_queue_head_t ufence_wq;
358 
359 	/** @ordered_wq: used to serialize compute mode resume */
360 	struct workqueue_struct *ordered_wq;
361 
362 	/** @unordered_wq: used to serialize unordered work, mostly display */
363 	struct workqueue_struct *unordered_wq;
364 
365 	/** @tiles: device tiles */
366 	struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
367 
368 	/**
369 	 * @mem_access: keep track of memory access in the device, possibly
370 	 * triggering additional actions when they occur.
371 	 */
372 	struct {
373 		/** @ref: ref count of memory accesses */
374 		atomic_t ref;
375 	} mem_access;
376 
377 	/**
378 	 * @pat: Encapsulate PAT related stuff
379 	 */
380 	struct {
381 		/** Internal operations to abstract platforms */
382 		const struct xe_pat_ops *ops;
383 		/** PAT table to program in the HW */
384 		const struct xe_pat_table_entry *table;
385 		/** Number of PAT entries */
386 		int n_entries;
387 		u32 idx[__XE_CACHE_LEVEL_COUNT];
388 	} pat;
389 
390 	/** @d3cold: Encapsulate d3cold related stuff */
391 	struct {
392 		/** capable: Indicates if root port is d3cold capable */
393 		bool capable;
394 
395 		/** @allowed: Indicates if d3cold is a valid device state */
396 		bool allowed;
397 
398 		/** @power_lost: Indicates if card has really lost power. */
399 		bool power_lost;
400 
401 		/**
402 		 * @vram_threshold:
403 		 *
404 		 * This represents the permissible threshold(in megabytes)
405 		 * for vram save/restore. d3cold will be disallowed,
406 		 * when vram_usages is above or equals the threshold value
407 		 * to avoid the vram save/restore latency.
408 		 * Default threshold value is 300mb.
409 		 */
410 		u32 vram_threshold;
411 		/** @lock: protect vram_threshold */
412 		struct mutex lock;
413 	} d3cold;
414 
415 	/**
416 	 * @pm_callback_task: Track the active task that is running in either
417 	 * the runtime_suspend or runtime_resume callbacks.
418 	 */
419 	struct task_struct *pm_callback_task;
420 
421 	/** @hwmon: hwmon subsystem integration */
422 	struct xe_hwmon *hwmon;
423 
424 	/** @heci_gsc: graphics security controller */
425 	struct xe_heci_gsc heci_gsc;
426 
427 	/** @needs_flr_on_fini: requests function-reset on fini */
428 	bool needs_flr_on_fini;
429 
430 	/* private: */
431 
432 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
433 	/*
434 	 * Any fields below this point are the ones used by display.
435 	 * They are temporarily added here so xe_device can be desguised as
436 	 * drm_i915_private during build. After cleanup these should go away,
437 	 * migrating to the right sub-structs
438 	 */
439 	struct intel_display display;
440 	enum intel_pch pch_type;
441 	u16 pch_id;
442 
443 	struct dram_info {
444 		bool wm_lv_0_adjust_needed;
445 		u8 num_channels;
446 		bool symmetric_memory;
447 		enum intel_dram_type {
448 			INTEL_DRAM_UNKNOWN,
449 			INTEL_DRAM_DDR3,
450 			INTEL_DRAM_DDR4,
451 			INTEL_DRAM_LPDDR3,
452 			INTEL_DRAM_LPDDR4,
453 			INTEL_DRAM_DDR5,
454 			INTEL_DRAM_LPDDR5,
455 		} type;
456 		u8 num_qgv_points;
457 		u8 num_psf_gv_points;
458 	} dram_info;
459 
460 	/*
461 	 * edram size in MB.
462 	 * Cannot be determined by PCIID. You must always read a register.
463 	 */
464 	u32 edram_size_mb;
465 
466 	/* To shut up runtime pm macros.. */
467 	struct xe_runtime_pm {} runtime_pm;
468 
469 	/* For pcode */
470 	struct mutex sb_lock;
471 
472 	/* Should be in struct intel_display */
473 	u32 skl_preferred_vco_freq, max_dotclk_freq, hti_state;
474 	u8 snps_phy_failed_calibration;
475 	struct drm_atomic_state *modeset_restore_state;
476 	struct list_head global_obj_list;
477 
478 	union {
479 		/* only to allow build, not used functionally */
480 		u32 irq_mask;
481 		u32 de_irq_mask[I915_MAX_PIPES];
482 	};
483 	u32 pipestat_irq_mask[I915_MAX_PIPES];
484 
485 	bool display_irqs_enabled;
486 	u32 enabled_irq_mask;
487 
488 	struct intel_uncore {
489 		spinlock_t lock;
490 	} uncore;
491 
492 	/* only to allow build, not used functionally */
493 	struct {
494 		unsigned int hpll_freq;
495 		unsigned int czclk_freq;
496 		unsigned int fsb_freq, mem_freq, is_ddr3;
497 		u8 vblank_enabled;
498 	};
499 	struct {
500 		const char *dmc_firmware_path;
501 	} params;
502 
503 	void *pxp;
504 #endif
505 };
506 
507 /**
508  * struct xe_file - file handle for XE driver
509  */
510 struct xe_file {
511 	/** @xe: xe DEVICE **/
512 	struct xe_device *xe;
513 
514 	/** @drm: base DRM file */
515 	struct drm_file *drm;
516 
517 	/** @vm: VM state for file */
518 	struct {
519 		/** @xe: xarray to store VMs */
520 		struct xarray xa;
521 		/** @lock: protects file VM state */
522 		struct mutex lock;
523 	} vm;
524 
525 	/** @exec_queue: Submission exec queue state for file */
526 	struct {
527 		/** @xe: xarray to store engines */
528 		struct xarray xa;
529 		/** @lock: protects file engine state */
530 		struct mutex lock;
531 	} exec_queue;
532 
533 	/** @client: drm client */
534 	struct xe_drm_client *client;
535 };
536 
537 #endif
538