1 /*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27 #include "drmP.h"
28 #include "drm.h"
29
30 #include <linux/ktime.h>
31 #include <linux/hrtimer.h>
32
33 #include "nouveau_drv.h"
34 #include "nouveau_ramht.h"
35 #include "nouveau_dma.h"
36
37 #define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
38 #define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
39
40 struct nouveau_fence {
41 struct nouveau_channel *channel;
42 struct kref refcount;
43 struct list_head entry;
44
45 uint32_t sequence;
46 bool signalled;
47
48 void (*work)(void *priv, bool signalled);
49 void *priv;
50 };
51
52 struct nouveau_semaphore {
53 struct kref ref;
54 struct drm_device *dev;
55 struct drm_mm_node *mem;
56 };
57
58 static inline struct nouveau_fence *
nouveau_fence(void * sync_obj)59 nouveau_fence(void *sync_obj)
60 {
61 return (struct nouveau_fence *)sync_obj;
62 }
63
64 static void
nouveau_fence_del(struct kref * ref)65 nouveau_fence_del(struct kref *ref)
66 {
67 struct nouveau_fence *fence =
68 container_of(ref, struct nouveau_fence, refcount);
69
70 nouveau_channel_ref(NULL, &fence->channel);
71 kfree(fence);
72 }
73
74 void
nouveau_fence_update(struct nouveau_channel * chan)75 nouveau_fence_update(struct nouveau_channel *chan)
76 {
77 struct drm_device *dev = chan->dev;
78 struct nouveau_fence *tmp, *fence;
79 uint32_t sequence;
80
81 spin_lock(&chan->fence.lock);
82
83 /* Fetch the last sequence if the channel is still up and running */
84 if (likely(!list_empty(&chan->fence.pending))) {
85 if (USE_REFCNT(dev))
86 sequence = nvchan_rd32(chan, 0x48);
87 else
88 sequence = atomic_read(&chan->fence.last_sequence_irq);
89
90 if (chan->fence.sequence_ack == sequence)
91 goto out;
92 chan->fence.sequence_ack = sequence;
93 }
94
95 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
96 sequence = fence->sequence;
97 fence->signalled = true;
98 list_del(&fence->entry);
99
100 if (unlikely(fence->work))
101 fence->work(fence->priv, true);
102
103 kref_put(&fence->refcount, nouveau_fence_del);
104
105 if (sequence == chan->fence.sequence_ack)
106 break;
107 }
108 out:
109 spin_unlock(&chan->fence.lock);
110 }
111
112 int
nouveau_fence_new(struct nouveau_channel * chan,struct nouveau_fence ** pfence,bool emit)113 nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
114 bool emit)
115 {
116 struct nouveau_fence *fence;
117 int ret = 0;
118
119 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
120 if (!fence)
121 return -ENOMEM;
122 kref_init(&fence->refcount);
123 nouveau_channel_ref(chan, &fence->channel);
124
125 if (emit)
126 ret = nouveau_fence_emit(fence);
127
128 if (ret)
129 nouveau_fence_unref(&fence);
130 *pfence = fence;
131 return ret;
132 }
133
134 struct nouveau_channel *
nouveau_fence_channel(struct nouveau_fence * fence)135 nouveau_fence_channel(struct nouveau_fence *fence)
136 {
137 return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
138 }
139
140 int
nouveau_fence_emit(struct nouveau_fence * fence)141 nouveau_fence_emit(struct nouveau_fence *fence)
142 {
143 struct nouveau_channel *chan = fence->channel;
144 struct drm_device *dev = chan->dev;
145 struct drm_nouveau_private *dev_priv = dev->dev_private;
146 int ret;
147
148 ret = RING_SPACE(chan, 2);
149 if (ret)
150 return ret;
151
152 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
153 nouveau_fence_update(chan);
154
155 BUG_ON(chan->fence.sequence ==
156 chan->fence.sequence_ack - 1);
157 }
158
159 fence->sequence = ++chan->fence.sequence;
160
161 kref_get(&fence->refcount);
162 spin_lock(&chan->fence.lock);
163 list_add_tail(&fence->entry, &chan->fence.pending);
164 spin_unlock(&chan->fence.lock);
165
166 if (USE_REFCNT(dev)) {
167 if (dev_priv->card_type < NV_C0)
168 BEGIN_RING(chan, NvSubSw, 0x0050, 1);
169 else
170 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
171 } else {
172 BEGIN_RING(chan, NvSubSw, 0x0150, 1);
173 }
174 OUT_RING (chan, fence->sequence);
175 FIRE_RING(chan);
176
177 return 0;
178 }
179
180 void
nouveau_fence_work(struct nouveau_fence * fence,void (* work)(void * priv,bool signalled),void * priv)181 nouveau_fence_work(struct nouveau_fence *fence,
182 void (*work)(void *priv, bool signalled),
183 void *priv)
184 {
185 BUG_ON(fence->work);
186
187 spin_lock(&fence->channel->fence.lock);
188
189 if (fence->signalled) {
190 work(priv, true);
191 } else {
192 fence->work = work;
193 fence->priv = priv;
194 }
195
196 spin_unlock(&fence->channel->fence.lock);
197 }
198
199 void
__nouveau_fence_unref(void ** sync_obj)200 __nouveau_fence_unref(void **sync_obj)
201 {
202 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
203
204 if (fence)
205 kref_put(&fence->refcount, nouveau_fence_del);
206 *sync_obj = NULL;
207 }
208
209 void *
__nouveau_fence_ref(void * sync_obj)210 __nouveau_fence_ref(void *sync_obj)
211 {
212 struct nouveau_fence *fence = nouveau_fence(sync_obj);
213
214 kref_get(&fence->refcount);
215 return sync_obj;
216 }
217
218 bool
__nouveau_fence_signalled(void * sync_obj,void * sync_arg)219 __nouveau_fence_signalled(void *sync_obj, void *sync_arg)
220 {
221 struct nouveau_fence *fence = nouveau_fence(sync_obj);
222 struct nouveau_channel *chan = fence->channel;
223
224 if (fence->signalled)
225 return true;
226
227 nouveau_fence_update(chan);
228 return fence->signalled;
229 }
230
231 int
__nouveau_fence_wait(void * sync_obj,void * sync_arg,bool lazy,bool intr)232 __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
233 {
234 unsigned long timeout = jiffies + (3 * DRM_HZ);
235 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
236 ktime_t t;
237 int ret = 0;
238
239 while (1) {
240 if (__nouveau_fence_signalled(sync_obj, sync_arg))
241 break;
242
243 if (time_after_eq(jiffies, timeout)) {
244 ret = -EBUSY;
245 break;
246 }
247
248 __set_current_state(intr ? TASK_INTERRUPTIBLE
249 : TASK_UNINTERRUPTIBLE);
250 if (lazy) {
251 t = ktime_set(0, sleep_time);
252 schedule_hrtimeout(&t, HRTIMER_MODE_REL);
253 sleep_time *= 2;
254 if (sleep_time > NSEC_PER_MSEC)
255 sleep_time = NSEC_PER_MSEC;
256 }
257
258 if (intr && signal_pending(current)) {
259 ret = -ERESTARTSYS;
260 break;
261 }
262 }
263
264 __set_current_state(TASK_RUNNING);
265
266 return ret;
267 }
268
269 static struct nouveau_semaphore *
semaphore_alloc(struct drm_device * dev)270 semaphore_alloc(struct drm_device *dev)
271 {
272 struct drm_nouveau_private *dev_priv = dev->dev_private;
273 struct nouveau_semaphore *sema;
274 int size = (dev_priv->chipset < 0x84) ? 4 : 16;
275 int ret, i;
276
277 if (!USE_SEMA(dev))
278 return NULL;
279
280 sema = kmalloc(sizeof(*sema), GFP_KERNEL);
281 if (!sema)
282 goto fail;
283
284 ret = drm_mm_pre_get(&dev_priv->fence.heap);
285 if (ret)
286 goto fail;
287
288 spin_lock(&dev_priv->fence.lock);
289 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
290 if (sema->mem)
291 sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
292 spin_unlock(&dev_priv->fence.lock);
293
294 if (!sema->mem)
295 goto fail;
296
297 kref_init(&sema->ref);
298 sema->dev = dev;
299 for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
300 nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
301
302 return sema;
303 fail:
304 kfree(sema);
305 return NULL;
306 }
307
308 static void
semaphore_free(struct kref * ref)309 semaphore_free(struct kref *ref)
310 {
311 struct nouveau_semaphore *sema =
312 container_of(ref, struct nouveau_semaphore, ref);
313 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
314
315 spin_lock(&dev_priv->fence.lock);
316 drm_mm_put_block(sema->mem);
317 spin_unlock(&dev_priv->fence.lock);
318
319 kfree(sema);
320 }
321
322 static void
semaphore_work(void * priv,bool signalled)323 semaphore_work(void *priv, bool signalled)
324 {
325 struct nouveau_semaphore *sema = priv;
326 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
327
328 if (unlikely(!signalled))
329 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
330
331 kref_put(&sema->ref, semaphore_free);
332 }
333
334 static int
semaphore_acquire(struct nouveau_channel * chan,struct nouveau_semaphore * sema)335 semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
336 {
337 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
338 struct nouveau_fence *fence = NULL;
339 u64 offset = chan->fence.vma.offset + sema->mem->start;
340 int ret;
341
342 if (dev_priv->chipset < 0x84) {
343 ret = RING_SPACE(chan, 4);
344 if (ret)
345 return ret;
346
347 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
348 OUT_RING (chan, NvSema);
349 OUT_RING (chan, offset);
350 OUT_RING (chan, 1);
351 } else
352 if (dev_priv->chipset < 0xc0) {
353 ret = RING_SPACE(chan, 7);
354 if (ret)
355 return ret;
356
357 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
358 OUT_RING (chan, chan->vram_handle);
359 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
360 OUT_RING (chan, upper_32_bits(offset));
361 OUT_RING (chan, lower_32_bits(offset));
362 OUT_RING (chan, 1);
363 OUT_RING (chan, 1); /* ACQUIRE_EQ */
364 } else {
365 ret = RING_SPACE(chan, 5);
366 if (ret)
367 return ret;
368
369 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
370 OUT_RING (chan, upper_32_bits(offset));
371 OUT_RING (chan, lower_32_bits(offset));
372 OUT_RING (chan, 1);
373 OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
374 }
375
376 /* Delay semaphore destruction until its work is done */
377 ret = nouveau_fence_new(chan, &fence, true);
378 if (ret)
379 return ret;
380
381 kref_get(&sema->ref);
382 nouveau_fence_work(fence, semaphore_work, sema);
383 nouveau_fence_unref(&fence);
384 return 0;
385 }
386
387 static int
semaphore_release(struct nouveau_channel * chan,struct nouveau_semaphore * sema)388 semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
389 {
390 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
391 struct nouveau_fence *fence = NULL;
392 u64 offset = chan->fence.vma.offset + sema->mem->start;
393 int ret;
394
395 if (dev_priv->chipset < 0x84) {
396 ret = RING_SPACE(chan, 5);
397 if (ret)
398 return ret;
399
400 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
401 OUT_RING (chan, NvSema);
402 OUT_RING (chan, offset);
403 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
404 OUT_RING (chan, 1);
405 } else
406 if (dev_priv->chipset < 0xc0) {
407 ret = RING_SPACE(chan, 7);
408 if (ret)
409 return ret;
410
411 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
412 OUT_RING (chan, chan->vram_handle);
413 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
414 OUT_RING (chan, upper_32_bits(offset));
415 OUT_RING (chan, lower_32_bits(offset));
416 OUT_RING (chan, 1);
417 OUT_RING (chan, 2); /* RELEASE */
418 } else {
419 ret = RING_SPACE(chan, 5);
420 if (ret)
421 return ret;
422
423 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
424 OUT_RING (chan, upper_32_bits(offset));
425 OUT_RING (chan, lower_32_bits(offset));
426 OUT_RING (chan, 1);
427 OUT_RING (chan, 0x1002); /* RELEASE */
428 }
429
430 /* Delay semaphore destruction until its work is done */
431 ret = nouveau_fence_new(chan, &fence, true);
432 if (ret)
433 return ret;
434
435 kref_get(&sema->ref);
436 nouveau_fence_work(fence, semaphore_work, sema);
437 nouveau_fence_unref(&fence);
438 return 0;
439 }
440
441 int
nouveau_fence_sync(struct nouveau_fence * fence,struct nouveau_channel * wchan)442 nouveau_fence_sync(struct nouveau_fence *fence,
443 struct nouveau_channel *wchan)
444 {
445 struct nouveau_channel *chan = nouveau_fence_channel(fence);
446 struct drm_device *dev = wchan->dev;
447 struct nouveau_semaphore *sema;
448 int ret = 0;
449
450 if (likely(!chan || chan == wchan ||
451 nouveau_fence_signalled(fence)))
452 goto out;
453
454 sema = semaphore_alloc(dev);
455 if (!sema) {
456 /* Early card or broken userspace, fall back to
457 * software sync. */
458 ret = nouveau_fence_wait(fence, true, false);
459 goto out;
460 }
461
462 /* try to take chan's mutex, if we can't take it right away
463 * we have to fallback to software sync to prevent locking
464 * order issues
465 */
466 if (!mutex_trylock(&chan->mutex)) {
467 ret = nouveau_fence_wait(fence, true, false);
468 goto out_unref;
469 }
470
471 /* Make wchan wait until it gets signalled */
472 ret = semaphore_acquire(wchan, sema);
473 if (ret)
474 goto out_unlock;
475
476 /* Signal the semaphore from chan */
477 ret = semaphore_release(chan, sema);
478
479 out_unlock:
480 mutex_unlock(&chan->mutex);
481 out_unref:
482 kref_put(&sema->ref, semaphore_free);
483 out:
484 if (chan)
485 nouveau_channel_put_unlocked(&chan);
486 return ret;
487 }
488
489 int
__nouveau_fence_flush(void * sync_obj,void * sync_arg)490 __nouveau_fence_flush(void *sync_obj, void *sync_arg)
491 {
492 return 0;
493 }
494
495 int
nouveau_fence_channel_init(struct nouveau_channel * chan)496 nouveau_fence_channel_init(struct nouveau_channel *chan)
497 {
498 struct drm_device *dev = chan->dev;
499 struct drm_nouveau_private *dev_priv = dev->dev_private;
500 struct nouveau_gpuobj *obj = NULL;
501 int ret;
502
503 if (dev_priv->card_type < NV_C0) {
504 /* Create an NV_SW object for various sync purposes */
505 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
506 if (ret)
507 return ret;
508
509 ret = RING_SPACE(chan, 2);
510 if (ret)
511 return ret;
512
513 BEGIN_RING(chan, NvSubSw, 0, 1);
514 OUT_RING (chan, NvSw);
515 FIRE_RING (chan);
516 }
517
518 /* Setup area of memory shared between all channels for x-chan sync */
519 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
520 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
521
522 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
523 mem->start << PAGE_SHIFT,
524 mem->size, NV_MEM_ACCESS_RW,
525 NV_MEM_TARGET_VRAM, &obj);
526 if (ret)
527 return ret;
528
529 ret = nouveau_ramht_insert(chan, NvSema, obj);
530 nouveau_gpuobj_ref(NULL, &obj);
531 if (ret)
532 return ret;
533 } else
534 if (USE_SEMA(dev)) {
535 /* map fence bo into channel's vm */
536 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
537 &chan->fence.vma);
538 if (ret)
539 return ret;
540 }
541
542 atomic_set(&chan->fence.last_sequence_irq, 0);
543 return 0;
544 }
545
546 void
nouveau_fence_channel_fini(struct nouveau_channel * chan)547 nouveau_fence_channel_fini(struct nouveau_channel *chan)
548 {
549 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
550 struct nouveau_fence *tmp, *fence;
551
552 spin_lock(&chan->fence.lock);
553 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
554 fence->signalled = true;
555 list_del(&fence->entry);
556
557 if (unlikely(fence->work))
558 fence->work(fence->priv, false);
559
560 kref_put(&fence->refcount, nouveau_fence_del);
561 }
562 spin_unlock(&chan->fence.lock);
563
564 nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
565 }
566
567 int
nouveau_fence_init(struct drm_device * dev)568 nouveau_fence_init(struct drm_device *dev)
569 {
570 struct drm_nouveau_private *dev_priv = dev->dev_private;
571 int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
572 int ret;
573
574 /* Create a shared VRAM heap for cross-channel sync. */
575 if (USE_SEMA(dev)) {
576 ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
577 0, 0, &dev_priv->fence.bo);
578 if (ret)
579 return ret;
580
581 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
582 if (ret)
583 goto fail;
584
585 ret = nouveau_bo_map(dev_priv->fence.bo);
586 if (ret)
587 goto fail;
588
589 ret = drm_mm_init(&dev_priv->fence.heap, 0,
590 dev_priv->fence.bo->bo.mem.size);
591 if (ret)
592 goto fail;
593
594 spin_lock_init(&dev_priv->fence.lock);
595 }
596
597 return 0;
598 fail:
599 nouveau_bo_unmap(dev_priv->fence.bo);
600 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
601 return ret;
602 }
603
604 void
nouveau_fence_fini(struct drm_device * dev)605 nouveau_fence_fini(struct drm_device *dev)
606 {
607 struct drm_nouveau_private *dev_priv = dev->dev_private;
608
609 if (USE_SEMA(dev)) {
610 drm_mm_takedown(&dev_priv->fence.heap);
611 nouveau_bo_unmap(dev_priv->fence.bo);
612 nouveau_bo_unpin(dev_priv->fence.bo);
613 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
614 }
615 }
616