1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2013-2021 Intel Corporation
4 */
5
6 #include <drm/drm_print.h>
7
8 #include "i915_drv.h"
9 #include "i915_iosf_mbi.h"
10 #include "i915_reg.h"
11 #include "vlv_iosf_sb.h"
12
13 /*
14 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
15 * VLV_VLV2_PUNIT_HAS_0.8.docx
16 */
17
18 /* Standard MMIO read, non-posted */
19 #define SB_MRD_NP 0x00
20 /* Standard MMIO write, non-posted */
21 #define SB_MWR_NP 0x01
22 /* Private register read, double-word addressing, non-posted */
23 #define SB_CRRDDA_NP 0x06
24 /* Private register write, double-word addressing, non-posted */
25 #define SB_CRWRDA_NP 0x07
26
ping(void * info)27 static void ping(void *info)
28 {
29 }
30
__vlv_punit_get(struct drm_i915_private * i915)31 static void __vlv_punit_get(struct drm_i915_private *i915)
32 {
33 iosf_mbi_punit_acquire();
34
35 /*
36 * Prevent the cpu from sleeping while we use this sideband, otherwise
37 * the punit may cause a machine hang. The issue appears to be isolated
38 * with changing the power state of the CPU package while changing
39 * the power state via the punit, and we have only observed it
40 * reliably on 4-core Baytail systems suggesting the issue is in the
41 * power delivery mechanism and likely to be board/function
42 * specific. Hence we presume the workaround needs only be applied
43 * to the Valleyview P-unit and not all sideband communications.
44 */
45 if (IS_VALLEYVIEW(i915)) {
46 cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos, 0);
47 on_each_cpu(ping, NULL, 1);
48 }
49 }
50
__vlv_punit_put(struct drm_i915_private * i915)51 static void __vlv_punit_put(struct drm_i915_private *i915)
52 {
53 if (IS_VALLEYVIEW(i915))
54 cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos,
55 PM_QOS_DEFAULT_VALUE);
56
57 iosf_mbi_punit_release();
58 }
59
vlv_iosf_sb_get(struct drm_device * drm,unsigned long unit_mask)60 void vlv_iosf_sb_get(struct drm_device *drm, unsigned long unit_mask)
61 {
62 struct drm_i915_private *i915 = to_i915(drm);
63
64 if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
65 __vlv_punit_get(i915);
66
67 mutex_lock(&i915->vlv_iosf_sb.lock);
68
69 i915->vlv_iosf_sb.locked_unit_mask |= unit_mask;
70 }
71
vlv_iosf_sb_put(struct drm_device * drm,unsigned long unit_mask)72 void vlv_iosf_sb_put(struct drm_device *drm, unsigned long unit_mask)
73 {
74 struct drm_i915_private *i915 = to_i915(drm);
75
76 i915->vlv_iosf_sb.locked_unit_mask &= ~unit_mask;
77
78 drm_WARN_ON(drm, i915->vlv_iosf_sb.locked_unit_mask);
79
80 mutex_unlock(&i915->vlv_iosf_sb.lock);
81
82 if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
83 __vlv_punit_put(i915);
84 }
85
vlv_sideband_rw(struct drm_i915_private * i915,u32 devfn,u32 port,u32 opcode,u32 addr,u32 * val)86 static int vlv_sideband_rw(struct drm_i915_private *i915,
87 u32 devfn, u32 port, u32 opcode,
88 u32 addr, u32 *val)
89 {
90 struct intel_uncore *uncore = &i915->uncore;
91 const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
92 int err;
93
94 lockdep_assert_held(&i915->vlv_iosf_sb.lock);
95 if (port == IOSF_PORT_PUNIT)
96 iosf_mbi_assert_punit_acquired();
97
98 /* Flush the previous comms, just in case it failed last time. */
99 if (intel_wait_for_register(uncore,
100 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
101 5)) {
102 drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
103 is_read ? "read" : "write");
104 return -EAGAIN;
105 }
106
107 preempt_disable();
108
109 intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
110 intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
111 intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
112 (devfn << IOSF_DEVFN_SHIFT) |
113 (opcode << IOSF_OPCODE_SHIFT) |
114 (port << IOSF_PORT_SHIFT) |
115 (0xf << IOSF_BYTE_ENABLES_SHIFT) |
116 (0 << IOSF_BAR_SHIFT) |
117 IOSF_SB_BUSY);
118
119 if (__intel_wait_for_register_fw(uncore,
120 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
121 10000, 0, NULL) == 0) {
122 if (is_read)
123 *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
124 err = 0;
125 } else {
126 drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
127 is_read ? "read" : "write");
128 err = -ETIMEDOUT;
129 }
130
131 preempt_enable();
132
133 return err;
134 }
135
unit_to_devfn(enum vlv_iosf_sb_unit unit)136 static u32 unit_to_devfn(enum vlv_iosf_sb_unit unit)
137 {
138 if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2 ||
139 unit == VLV_IOSF_SB_FLISDSI)
140 return DPIO_DEVFN;
141 else
142 return PCI_DEVFN(0, 0);
143 }
144
unit_to_port(enum vlv_iosf_sb_unit unit)145 static u32 unit_to_port(enum vlv_iosf_sb_unit unit)
146 {
147 switch (unit) {
148 case VLV_IOSF_SB_BUNIT:
149 return IOSF_PORT_BUNIT;
150 case VLV_IOSF_SB_CCK:
151 return IOSF_PORT_CCK;
152 case VLV_IOSF_SB_CCU:
153 return IOSF_PORT_CCU;
154 case VLV_IOSF_SB_DPIO:
155 return IOSF_PORT_DPIO;
156 case VLV_IOSF_SB_DPIO_2:
157 return IOSF_PORT_DPIO_2;
158 case VLV_IOSF_SB_FLISDSI:
159 return IOSF_PORT_FLISDSI;
160 case VLV_IOSF_SB_GPIO:
161 return 0; /* FIXME: unused */
162 case VLV_IOSF_SB_NC:
163 return IOSF_PORT_NC;
164 case VLV_IOSF_SB_PUNIT:
165 return IOSF_PORT_PUNIT;
166 default:
167 return 0;
168 }
169 }
170
unit_to_opcode(enum vlv_iosf_sb_unit unit,bool write)171 static u32 unit_to_opcode(enum vlv_iosf_sb_unit unit, bool write)
172 {
173 if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2)
174 return write ? SB_MWR_NP : SB_MRD_NP;
175 else
176 return write ? SB_CRWRDA_NP : SB_CRRDDA_NP;
177 }
178
vlv_iosf_sb_read(struct drm_device * drm,enum vlv_iosf_sb_unit unit,u32 addr)179 u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr)
180 {
181 struct drm_i915_private *i915 = to_i915(drm);
182 u32 devfn, port, opcode, val = 0;
183
184 devfn = unit_to_devfn(unit);
185 port = unit_to_port(unit);
186 opcode = unit_to_opcode(unit, false);
187
188 if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
189 return 0;
190
191 drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
192
193 vlv_sideband_rw(i915, devfn, port, opcode, addr, &val);
194
195 return val;
196 }
197
vlv_iosf_sb_write(struct drm_device * drm,enum vlv_iosf_sb_unit unit,u32 addr,u32 val)198 int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val)
199 {
200 struct drm_i915_private *i915 = to_i915(drm);
201 u32 devfn, port, opcode;
202
203 devfn = unit_to_devfn(unit);
204 port = unit_to_port(unit);
205 opcode = unit_to_opcode(unit, true);
206
207 if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
208 return -EINVAL;
209
210 drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
211
212 return vlv_sideband_rw(i915, devfn, port, opcode, addr, &val);
213 }
214
vlv_iosf_sb_init(struct drm_i915_private * i915)215 void vlv_iosf_sb_init(struct drm_i915_private *i915)
216 {
217 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
218 mutex_init(&i915->vlv_iosf_sb.lock);
219
220 if (IS_VALLEYVIEW(i915))
221 cpu_latency_qos_add_request(&i915->vlv_iosf_sb.qos, PM_QOS_DEFAULT_VALUE);
222 }
223
vlv_iosf_sb_fini(struct drm_i915_private * i915)224 void vlv_iosf_sb_fini(struct drm_i915_private *i915)
225 {
226 if (IS_VALLEYVIEW(i915))
227 cpu_latency_qos_remove_request(&i915->vlv_iosf_sb.qos);
228
229 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
230 mutex_destroy(&i915->vlv_iosf_sb.lock);
231 }
232