1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <linux/pci.h>
7 #include <linux/string.h>
8
9 #include <drm/drm_print.h>
10
11 #include "intel_atomic.h"
12 #include "intel_display_core.h"
13 #include "intel_display_types.h"
14 #include "intel_global_state.h"
15
16 struct intel_global_commit {
17 struct kref ref;
18 struct completion done;
19 };
20
commit_new(void)21 static struct intel_global_commit *commit_new(void)
22 {
23 struct intel_global_commit *commit;
24
25 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
26 if (!commit)
27 return NULL;
28
29 init_completion(&commit->done);
30 kref_init(&commit->ref);
31
32 return commit;
33 }
34
__commit_free(struct kref * kref)35 static void __commit_free(struct kref *kref)
36 {
37 struct intel_global_commit *commit =
38 container_of(kref, typeof(*commit), ref);
39
40 kfree(commit);
41 }
42
commit_get(struct intel_global_commit * commit)43 static struct intel_global_commit *commit_get(struct intel_global_commit *commit)
44 {
45 if (commit)
46 kref_get(&commit->ref);
47
48 return commit;
49 }
50
commit_put(struct intel_global_commit * commit)51 static void commit_put(struct intel_global_commit *commit)
52 {
53 if (commit)
54 kref_put(&commit->ref, __commit_free);
55 }
56
__intel_atomic_global_state_free(struct kref * kref)57 static void __intel_atomic_global_state_free(struct kref *kref)
58 {
59 struct intel_global_state *obj_state =
60 container_of(kref, struct intel_global_state, ref);
61 struct intel_global_obj *obj = obj_state->obj;
62
63 commit_put(obj_state->commit);
64
65 obj->funcs->atomic_destroy_state(obj, obj_state);
66 }
67
intel_atomic_global_state_put(struct intel_global_state * obj_state)68 static void intel_atomic_global_state_put(struct intel_global_state *obj_state)
69 {
70 kref_put(&obj_state->ref, __intel_atomic_global_state_free);
71 }
72
73 static struct intel_global_state *
intel_atomic_global_state_get(struct intel_global_state * obj_state)74 intel_atomic_global_state_get(struct intel_global_state *obj_state)
75 {
76 kref_get(&obj_state->ref);
77
78 return obj_state;
79 }
80
intel_atomic_global_obj_init(struct intel_display * display,struct intel_global_obj * obj,struct intel_global_state * state,const struct intel_global_state_funcs * funcs)81 void intel_atomic_global_obj_init(struct intel_display *display,
82 struct intel_global_obj *obj,
83 struct intel_global_state *state,
84 const struct intel_global_state_funcs *funcs)
85 {
86 memset(obj, 0, sizeof(*obj));
87
88 state->obj = obj;
89
90 kref_init(&state->ref);
91
92 obj->state = state;
93 obj->funcs = funcs;
94 list_add_tail(&obj->head, &display->global.obj_list);
95 }
96
intel_atomic_global_obj_cleanup(struct intel_display * display)97 void intel_atomic_global_obj_cleanup(struct intel_display *display)
98 {
99 struct intel_global_obj *obj, *next;
100
101 list_for_each_entry_safe(obj, next, &display->global.obj_list, head) {
102 list_del(&obj->head);
103
104 drm_WARN_ON(display->drm, kref_read(&obj->state->ref) != 1);
105 intel_atomic_global_state_put(obj->state);
106 }
107 }
108
assert_global_state_write_locked(struct intel_display * display)109 static void assert_global_state_write_locked(struct intel_display *display)
110 {
111 struct intel_crtc *crtc;
112
113 for_each_intel_crtc(display->drm, crtc)
114 drm_modeset_lock_assert_held(&crtc->base.mutex);
115 }
116
modeset_lock_is_held(struct drm_modeset_acquire_ctx * ctx,struct drm_modeset_lock * lock)117 static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
118 struct drm_modeset_lock *lock)
119 {
120 struct drm_modeset_lock *l;
121
122 list_for_each_entry(l, &ctx->locked, head) {
123 if (lock == l)
124 return true;
125 }
126
127 return false;
128 }
129
assert_global_state_read_locked(struct intel_atomic_state * state)130 static void assert_global_state_read_locked(struct intel_atomic_state *state)
131 {
132 struct intel_display *display = to_intel_display(state);
133 struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
134 struct intel_crtc *crtc;
135
136 for_each_intel_crtc(display->drm, crtc) {
137 if (modeset_lock_is_held(ctx, &crtc->base.mutex))
138 return;
139 }
140
141 drm_WARN(display->drm, 1, "Global state not read locked\n");
142 }
143
144 struct intel_global_state *
intel_atomic_get_global_obj_state(struct intel_atomic_state * state,struct intel_global_obj * obj)145 intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
146 struct intel_global_obj *obj)
147 {
148 struct intel_display *display = to_intel_display(state);
149 int index, num_objs, i;
150 size_t size;
151 struct __intel_global_objs_state *arr;
152 struct intel_global_state *obj_state;
153
154 for (i = 0; i < state->num_global_objs; i++)
155 if (obj == state->global_objs[i].ptr)
156 return state->global_objs[i].state;
157
158 assert_global_state_read_locked(state);
159
160 num_objs = state->num_global_objs + 1;
161 size = sizeof(*state->global_objs) * num_objs;
162 arr = krealloc(state->global_objs, size, GFP_KERNEL);
163 if (!arr)
164 return ERR_PTR(-ENOMEM);
165
166 state->global_objs = arr;
167 index = state->num_global_objs;
168 memset(&state->global_objs[index], 0, sizeof(*state->global_objs));
169
170 obj_state = obj->funcs->atomic_duplicate_state(obj);
171 if (!obj_state)
172 return ERR_PTR(-ENOMEM);
173
174 obj_state->obj = obj;
175 obj_state->changed = false;
176 obj_state->serialized = false;
177 obj_state->commit = NULL;
178
179 kref_init(&obj_state->ref);
180
181 state->global_objs[index].state = obj_state;
182 state->global_objs[index].old_state =
183 intel_atomic_global_state_get(obj->state);
184 state->global_objs[index].new_state = obj_state;
185 state->global_objs[index].ptr = obj;
186 obj_state->state = state;
187
188 state->num_global_objs = num_objs;
189
190 drm_dbg_atomic(display->drm, "Added new global object %p state %p to %p\n",
191 obj, obj_state, state);
192
193 return obj_state;
194 }
195
196 struct intel_global_state *
intel_atomic_get_old_global_obj_state(struct intel_atomic_state * state,struct intel_global_obj * obj)197 intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
198 struct intel_global_obj *obj)
199 {
200 int i;
201
202 for (i = 0; i < state->num_global_objs; i++)
203 if (obj == state->global_objs[i].ptr)
204 return state->global_objs[i].old_state;
205
206 return NULL;
207 }
208
209 struct intel_global_state *
intel_atomic_get_new_global_obj_state(struct intel_atomic_state * state,struct intel_global_obj * obj)210 intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
211 struct intel_global_obj *obj)
212 {
213 int i;
214
215 for (i = 0; i < state->num_global_objs; i++)
216 if (obj == state->global_objs[i].ptr)
217 return state->global_objs[i].new_state;
218
219 return NULL;
220 }
221
intel_atomic_swap_global_state(struct intel_atomic_state * state)222 void intel_atomic_swap_global_state(struct intel_atomic_state *state)
223 {
224 struct intel_display *display = to_intel_display(state);
225 struct intel_global_state *old_obj_state, *new_obj_state;
226 struct intel_global_obj *obj;
227 int i;
228
229 for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
230 new_obj_state, i) {
231 drm_WARN_ON(display->drm, obj->state != old_obj_state);
232
233 /*
234 * If the new state wasn't modified (and properly
235 * locked for write access) we throw it away.
236 */
237 if (!new_obj_state->changed)
238 continue;
239
240 assert_global_state_write_locked(display);
241
242 old_obj_state->state = state;
243 new_obj_state->state = NULL;
244
245 state->global_objs[i].state = old_obj_state;
246
247 intel_atomic_global_state_put(obj->state);
248 obj->state = intel_atomic_global_state_get(new_obj_state);
249 }
250 }
251
intel_atomic_clear_global_state(struct intel_atomic_state * state)252 void intel_atomic_clear_global_state(struct intel_atomic_state *state)
253 {
254 int i;
255
256 for (i = 0; i < state->num_global_objs; i++) {
257 intel_atomic_global_state_put(state->global_objs[i].old_state);
258 intel_atomic_global_state_put(state->global_objs[i].new_state);
259
260 state->global_objs[i].ptr = NULL;
261 state->global_objs[i].state = NULL;
262 state->global_objs[i].old_state = NULL;
263 state->global_objs[i].new_state = NULL;
264 }
265 state->num_global_objs = 0;
266 }
267
intel_atomic_lock_global_state(struct intel_global_state * obj_state)268 int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
269 {
270 struct intel_atomic_state *state = obj_state->state;
271 struct intel_display *display = to_intel_display(state);
272 struct intel_crtc *crtc;
273
274 for_each_intel_crtc(display->drm, crtc) {
275 int ret;
276
277 ret = drm_modeset_lock(&crtc->base.mutex,
278 state->base.acquire_ctx);
279 if (ret)
280 return ret;
281 }
282
283 obj_state->changed = true;
284
285 return 0;
286 }
287
intel_atomic_serialize_global_state(struct intel_global_state * obj_state)288 int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
289 {
290 int ret;
291
292 ret = intel_atomic_lock_global_state(obj_state);
293 if (ret)
294 return ret;
295
296 obj_state->serialized = true;
297
298 return 0;
299 }
300
301 bool
intel_atomic_global_state_is_serialized(struct intel_atomic_state * state)302 intel_atomic_global_state_is_serialized(struct intel_atomic_state *state)
303 {
304 struct intel_display *display = to_intel_display(state);
305 struct intel_crtc *crtc;
306
307 for_each_intel_crtc(display->drm, crtc)
308 if (!intel_atomic_get_new_crtc_state(state, crtc))
309 return false;
310 return true;
311 }
312
313 int
intel_atomic_global_state_setup_commit(struct intel_atomic_state * state)314 intel_atomic_global_state_setup_commit(struct intel_atomic_state *state)
315 {
316 const struct intel_global_state *old_obj_state;
317 struct intel_global_state *new_obj_state;
318 struct intel_global_obj *obj;
319 int i;
320
321 for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
322 new_obj_state, i) {
323 struct intel_global_commit *commit = NULL;
324
325 if (new_obj_state->serialized) {
326 /*
327 * New commit which is going to be completed
328 * after the hardware reprogramming is done.
329 */
330 commit = commit_new();
331 if (!commit)
332 return -ENOMEM;
333 } else if (new_obj_state->changed) {
334 /*
335 * We're going to swap to this state, so carry the
336 * previous commit along, in case it's not yet done.
337 */
338 commit = commit_get(old_obj_state->commit);
339 }
340
341 new_obj_state->commit = commit;
342 }
343
344 return 0;
345 }
346
347 int
intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state * state)348 intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state)
349 {
350 struct intel_display *display = to_intel_display(state);
351 const struct intel_global_state *old_obj_state;
352 struct intel_global_obj *obj;
353 int i;
354
355 for_each_old_global_obj_in_state(state, obj, old_obj_state, i) {
356 struct intel_global_commit *commit = old_obj_state->commit;
357 long ret;
358
359 if (!commit)
360 continue;
361
362 ret = wait_for_completion_timeout(&commit->done, 10 * HZ);
363 if (ret == 0) {
364 drm_err(display->drm, "global state timed out\n");
365 return -ETIMEDOUT;
366 }
367 }
368
369 return 0;
370 }
371
372 void
intel_atomic_global_state_commit_done(struct intel_atomic_state * state)373 intel_atomic_global_state_commit_done(struct intel_atomic_state *state)
374 {
375 const struct intel_global_state *new_obj_state;
376 struct intel_global_obj *obj;
377 int i;
378
379 for_each_new_global_obj_in_state(state, obj, new_obj_state, i) {
380 struct intel_global_commit *commit = new_obj_state->commit;
381
382 if (!new_obj_state->serialized)
383 continue;
384
385 complete_all(&commit->done);
386 }
387 }
388