1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #ifndef __INTEL_UNCORE_H__
7 #define __INTEL_UNCORE_H__
8
9 #include "xe_device.h"
10 #include "xe_device_types.h"
11 #include "xe_mmio.h"
12
__compat_uncore_to_gt(struct intel_uncore * uncore)13 static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
14 {
15 struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
16
17 return xe_root_mmio_gt(xe);
18 }
19
intel_uncore_read(struct intel_uncore * uncore,i915_reg_t i915_reg)20 static inline u32 intel_uncore_read(struct intel_uncore *uncore,
21 i915_reg_t i915_reg)
22 {
23 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
24
25 return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
26 }
27
intel_uncore_read8(struct intel_uncore * uncore,i915_reg_t i915_reg)28 static inline u32 intel_uncore_read8(struct intel_uncore *uncore,
29 i915_reg_t i915_reg)
30 {
31 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
32
33 return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
34 }
35
intel_uncore_read16(struct intel_uncore * uncore,i915_reg_t i915_reg)36 static inline u32 intel_uncore_read16(struct intel_uncore *uncore,
37 i915_reg_t i915_reg)
38 {
39 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
40
41 return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg);
42 }
43
44 static inline u64
intel_uncore_read64_2x32(struct intel_uncore * uncore,i915_reg_t i915_lower_reg,i915_reg_t i915_upper_reg)45 intel_uncore_read64_2x32(struct intel_uncore *uncore,
46 i915_reg_t i915_lower_reg, i915_reg_t i915_upper_reg)
47 {
48 struct xe_reg lower_reg = XE_REG(i915_mmio_reg_offset(i915_lower_reg));
49 struct xe_reg upper_reg = XE_REG(i915_mmio_reg_offset(i915_upper_reg));
50 u32 upper, lower, old_upper;
51 int loop = 0;
52
53 upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
54 do {
55 old_upper = upper;
56 lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg);
57 upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
58 } while (upper != old_upper && loop++ < 2);
59
60 return (u64)upper << 32 | lower;
61 }
62
intel_uncore_posting_read(struct intel_uncore * uncore,i915_reg_t i915_reg)63 static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
64 i915_reg_t i915_reg)
65 {
66 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
67
68 xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
69 }
70
intel_uncore_write(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)71 static inline void intel_uncore_write(struct intel_uncore *uncore,
72 i915_reg_t i915_reg, u32 val)
73 {
74 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
75
76 xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
77 }
78
intel_uncore_rmw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 clear,u32 set)79 static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
80 i915_reg_t i915_reg, u32 clear, u32 set)
81 {
82 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
83
84 return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set);
85 }
86
intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int timeout)87 static inline int intel_wait_for_register(struct intel_uncore *uncore,
88 i915_reg_t i915_reg, u32 mask,
89 u32 value, unsigned int timeout)
90 {
91 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
92
93 return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
94 timeout * USEC_PER_MSEC, NULL, false);
95 }
96
intel_wait_for_register_fw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int timeout)97 static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
98 i915_reg_t i915_reg, u32 mask,
99 u32 value, unsigned int timeout)
100 {
101 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
102
103 return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
104 timeout * USEC_PER_MSEC, NULL, false);
105 }
106
107 static inline int
__intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms,u32 * out_value)108 __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
109 u32 mask, u32 value, unsigned int fast_timeout_us,
110 unsigned int slow_timeout_ms, u32 *out_value)
111 {
112 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
113
114 return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
115 fast_timeout_us + 1000 * slow_timeout_ms,
116 out_value, false);
117 }
118
intel_uncore_read_fw(struct intel_uncore * uncore,i915_reg_t i915_reg)119 static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
120 i915_reg_t i915_reg)
121 {
122 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
123
124 return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
125 }
126
intel_uncore_write_fw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)127 static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
128 i915_reg_t i915_reg, u32 val)
129 {
130 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
131
132 xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
133 }
134
intel_uncore_read_notrace(struct intel_uncore * uncore,i915_reg_t i915_reg)135 static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
136 i915_reg_t i915_reg)
137 {
138 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
139
140 return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
141 }
142
intel_uncore_write_notrace(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)143 static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
144 i915_reg_t i915_reg, u32 val)
145 {
146 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
147
148 xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
149 }
150
intel_uncore_regs(struct intel_uncore * uncore)151 static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
152 {
153 struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
154
155 return xe_device_get_root_tile(xe)->mmio.regs;
156 }
157
158 /*
159 * The raw_reg_{read,write} macros are intended as a micro-optimization for
160 * interrupt handlers so that the pointer indirection on uncore->regs can
161 * be computed once (and presumably cached in a register) instead of generating
162 * extra load instructions for each MMIO access.
163 *
164 * Given that these macros are only intended for non-GSI interrupt registers
165 * (and the goal is to avoid extra instructions generated by the compiler),
166 * these macros do not account for uncore->gsi_offset. Any caller that needs
167 * to use these macros on a GSI register is responsible for adding the
168 * appropriate GSI offset to the 'base' parameter.
169 */
170 #define raw_reg_read(base, reg) \
171 readl(base + i915_mmio_reg_offset(reg))
172 #define raw_reg_write(base, reg, value) \
173 writel(value, base + i915_mmio_reg_offset(reg))
174
175 #endif /* __INTEL_UNCORE_H__ */
176