xref: /linux/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #ifndef __INTEL_UNCORE_H__
7 #define __INTEL_UNCORE_H__
8 
9 #include "xe_device.h"
10 #include "xe_device_types.h"
11 #include "xe_mmio.h"
12 
13 #define FORCEWAKE_ALL XE_FORCEWAKE_ALL
14 
to_intel_uncore(struct drm_device * drm)15 static inline struct intel_uncore *to_intel_uncore(struct drm_device *drm)
16 {
17 	return &to_xe_device(drm)->uncore;
18 }
19 
__compat_uncore_to_mmio(struct intel_uncore * uncore)20 static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore)
21 {
22 	struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
23 
24 	return xe_root_tile_mmio(xe);
25 }
26 
intel_uncore_read(struct intel_uncore * uncore,i915_reg_t i915_reg)27 static inline u32 intel_uncore_read(struct intel_uncore *uncore,
28 				    i915_reg_t i915_reg)
29 {
30 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
31 
32 	return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
33 }
34 
intel_uncore_read8(struct intel_uncore * uncore,i915_reg_t i915_reg)35 static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
36 				    i915_reg_t i915_reg)
37 {
38 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
39 
40 	return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg);
41 }
42 
intel_uncore_read16(struct intel_uncore * uncore,i915_reg_t i915_reg)43 static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
44 				      i915_reg_t i915_reg)
45 {
46 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
47 
48 	return xe_mmio_read16(__compat_uncore_to_mmio(uncore), reg);
49 }
50 
51 static inline u64
intel_uncore_read64_2x32(struct intel_uncore * uncore,i915_reg_t i915_lower_reg,i915_reg_t i915_upper_reg)52 intel_uncore_read64_2x32(struct intel_uncore *uncore,
53 			 i915_reg_t i915_lower_reg, i915_reg_t i915_upper_reg)
54 {
55 	struct xe_reg lower_reg = XE_REG(i915_mmio_reg_offset(i915_lower_reg));
56 	struct xe_reg upper_reg = XE_REG(i915_mmio_reg_offset(i915_upper_reg));
57 	u32 upper, lower, old_upper;
58 	int loop = 0;
59 
60 	upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
61 	do {
62 		old_upper = upper;
63 		lower = xe_mmio_read32(__compat_uncore_to_mmio(uncore), lower_reg);
64 		upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
65 	} while (upper != old_upper && loop++ < 2);
66 
67 	return (u64)upper << 32 | lower;
68 }
69 
intel_uncore_posting_read(struct intel_uncore * uncore,i915_reg_t i915_reg)70 static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
71 					     i915_reg_t i915_reg)
72 {
73 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
74 
75 	xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
76 }
77 
intel_uncore_write(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)78 static inline void intel_uncore_write(struct intel_uncore *uncore,
79 				      i915_reg_t i915_reg, u32 val)
80 {
81 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
82 
83 	xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
84 }
85 
intel_uncore_rmw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 clear,u32 set)86 static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
87 				   i915_reg_t i915_reg, u32 clear, u32 set)
88 {
89 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
90 
91 	return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set);
92 }
93 
intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int timeout)94 static inline int intel_wait_for_register(struct intel_uncore *uncore,
95 					  i915_reg_t i915_reg, u32 mask,
96 					  u32 value, unsigned int timeout)
97 {
98 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
99 
100 	return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
101 			      timeout * USEC_PER_MSEC, NULL, false);
102 }
103 
intel_wait_for_register_fw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int timeout,u32 * out_value)104 static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
105 					     i915_reg_t i915_reg, u32 mask,
106 					     u32 value, unsigned int timeout,
107 					     u32 *out_value)
108 {
109 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
110 
111 	return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
112 			      timeout * USEC_PER_MSEC, out_value, false);
113 }
114 
115 static inline int
__intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms,u32 * out_value)116 __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
117 			  u32 mask, u32 value, unsigned int fast_timeout_us,
118 			  unsigned int slow_timeout_ms, u32 *out_value)
119 {
120 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
121 	bool atomic;
122 
123 	/*
124 	 * Replicate the behavior from i915 here, in which sleep is not
125 	 * performed if slow_timeout_ms == 0. This is necessary because
126 	 * of some paths in display code where waits are done in atomic
127 	 * context.
128 	 */
129 	atomic = !slow_timeout_ms && fast_timeout_us > 0;
130 
131 	return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
132 			      fast_timeout_us + 1000 * slow_timeout_ms,
133 			      out_value, atomic);
134 }
135 
intel_uncore_read_fw(struct intel_uncore * uncore,i915_reg_t i915_reg)136 static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
137 				       i915_reg_t i915_reg)
138 {
139 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
140 
141 	return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
142 }
143 
intel_uncore_write_fw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)144 static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
145 					 i915_reg_t i915_reg, u32 val)
146 {
147 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
148 
149 	xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
150 }
151 
intel_uncore_read_notrace(struct intel_uncore * uncore,i915_reg_t i915_reg)152 static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
153 					    i915_reg_t i915_reg)
154 {
155 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
156 
157 	return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
158 }
159 
intel_uncore_write_notrace(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)160 static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
161 					      i915_reg_t i915_reg, u32 val)
162 {
163 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
164 
165 	xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
166 }
167 
168 #define intel_uncore_forcewake_get(x, y) do { } while (0)
169 #define intel_uncore_forcewake_put(x, y) do { } while (0)
170 
171 #define intel_uncore_arm_unclaimed_mmio_detection(x) do { } while (0)
172 
173 #endif /* __INTEL_UNCORE_H__ */
174