1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #ifndef __INTEL_UNCORE_H__
7 #define __INTEL_UNCORE_H__
8
9 #include "xe_device.h"
10 #include "xe_device_types.h"
11 #include "xe_mmio.h"
12
13 #define FORCEWAKE_ALL XE_FORCEWAKE_ALL
14
to_intel_uncore(struct drm_device * drm)15 static inline struct intel_uncore *to_intel_uncore(struct drm_device *drm)
16 {
17 return &to_xe_device(drm)->uncore;
18 }
19
__compat_uncore_to_mmio(struct intel_uncore * uncore)20 static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore)
21 {
22 struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
23
24 return xe_root_tile_mmio(xe);
25 }
26
__compat_uncore_to_tile(struct intel_uncore * uncore)27 static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
28 {
29 struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
30
31 return xe_device_get_root_tile(xe);
32 }
33
intel_uncore_read(struct intel_uncore * uncore,i915_reg_t i915_reg)34 static inline u32 intel_uncore_read(struct intel_uncore *uncore,
35 i915_reg_t i915_reg)
36 {
37 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
38
39 return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
40 }
41
intel_uncore_read8(struct intel_uncore * uncore,i915_reg_t i915_reg)42 static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
43 i915_reg_t i915_reg)
44 {
45 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
46
47 return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg);
48 }
49
intel_uncore_read16(struct intel_uncore * uncore,i915_reg_t i915_reg)50 static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
51 i915_reg_t i915_reg)
52 {
53 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
54
55 return xe_mmio_read16(__compat_uncore_to_mmio(uncore), reg);
56 }
57
58 static inline u64
intel_uncore_read64_2x32(struct intel_uncore * uncore,i915_reg_t i915_lower_reg,i915_reg_t i915_upper_reg)59 intel_uncore_read64_2x32(struct intel_uncore *uncore,
60 i915_reg_t i915_lower_reg, i915_reg_t i915_upper_reg)
61 {
62 struct xe_reg lower_reg = XE_REG(i915_mmio_reg_offset(i915_lower_reg));
63 struct xe_reg upper_reg = XE_REG(i915_mmio_reg_offset(i915_upper_reg));
64 u32 upper, lower, old_upper;
65 int loop = 0;
66
67 upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
68 do {
69 old_upper = upper;
70 lower = xe_mmio_read32(__compat_uncore_to_mmio(uncore), lower_reg);
71 upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
72 } while (upper != old_upper && loop++ < 2);
73
74 return (u64)upper << 32 | lower;
75 }
76
intel_uncore_posting_read(struct intel_uncore * uncore,i915_reg_t i915_reg)77 static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
78 i915_reg_t i915_reg)
79 {
80 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
81
82 xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
83 }
84
intel_uncore_write(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)85 static inline void intel_uncore_write(struct intel_uncore *uncore,
86 i915_reg_t i915_reg, u32 val)
87 {
88 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
89
90 xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
91 }
92
intel_uncore_rmw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 clear,u32 set)93 static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
94 i915_reg_t i915_reg, u32 clear, u32 set)
95 {
96 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
97
98 return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set);
99 }
100
intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int timeout)101 static inline int intel_wait_for_register(struct intel_uncore *uncore,
102 i915_reg_t i915_reg, u32 mask,
103 u32 value, unsigned int timeout)
104 {
105 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
106
107 return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
108 timeout * USEC_PER_MSEC, NULL, false);
109 }
110
intel_wait_for_register_fw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int timeout)111 static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
112 i915_reg_t i915_reg, u32 mask,
113 u32 value, unsigned int timeout)
114 {
115 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
116
117 return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
118 timeout * USEC_PER_MSEC, NULL, false);
119 }
120
121 static inline int
__intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms,u32 * out_value)122 __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
123 u32 mask, u32 value, unsigned int fast_timeout_us,
124 unsigned int slow_timeout_ms, u32 *out_value)
125 {
126 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
127 bool atomic;
128
129 /*
130 * Replicate the behavior from i915 here, in which sleep is not
131 * performed if slow_timeout_ms == 0. This is necessary because
132 * of some paths in display code where waits are done in atomic
133 * context.
134 */
135 atomic = !slow_timeout_ms && fast_timeout_us > 0;
136
137 return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
138 fast_timeout_us + 1000 * slow_timeout_ms,
139 out_value, atomic);
140 }
141
intel_uncore_read_fw(struct intel_uncore * uncore,i915_reg_t i915_reg)142 static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
143 i915_reg_t i915_reg)
144 {
145 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
146
147 return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
148 }
149
intel_uncore_write_fw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)150 static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
151 i915_reg_t i915_reg, u32 val)
152 {
153 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
154
155 xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
156 }
157
intel_uncore_read_notrace(struct intel_uncore * uncore,i915_reg_t i915_reg)158 static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
159 i915_reg_t i915_reg)
160 {
161 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
162
163 return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
164 }
165
intel_uncore_write_notrace(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)166 static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
167 i915_reg_t i915_reg, u32 val)
168 {
169 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
170
171 xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
172 }
173
174 #define intel_uncore_forcewake_get(x, y) do { } while (0)
175 #define intel_uncore_forcewake_put(x, y) do { } while (0)
176
177 #define intel_uncore_arm_unclaimed_mmio_detection(x) do { } while (0)
178
179 #endif /* __INTEL_UNCORE_H__ */
180