Lines Matching full:lock
50 * Take the heavyweight lock.
52 * \param lock lock pointer.
54 * \return one if the lock is held, or zero otherwise.
56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
63 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local
67 old = *lock; in drm_lock_take()
75 prev = cmpxchg(lock, old, new); in drm_lock_take()
82 DRM_ERROR("%d holds heavyweight lock\n", in drm_lock_take()
90 /* Have lock */ in drm_lock_take()
97 * This takes a lock forcibly and hands it to context. Should ONLY be used
98 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
101 * \param lock lock pointer.
105 * Resets the lock file pointer.
106 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
112 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_transfer() local
116 old = *lock; in drm_lock_transfer()
118 prev = cmpxchg(lock, old, new); in drm_lock_transfer()
127 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_legacy_lock_free() local
139 old = *lock; in drm_legacy_lock_free()
141 prev = cmpxchg(lock, old, new); in drm_legacy_lock_free()
145 DRM_ERROR("%d freed heavyweight lock held by %d\n", in drm_legacy_lock_free()
154 * Lock ioctl.
162 * Add the current task to the lock wait queue, and attempt to take to lock.
168 struct drm_lock *lock = data; in drm_legacy_lock() local
177 if (lock->context == DRM_KERNEL_CONTEXT) { in drm_legacy_lock()
179 task_pid_nr(current), lock->context); in drm_legacy_lock()
183 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", in drm_legacy_lock()
184 lock->context, task_pid_nr(current), in drm_legacy_lock()
185 master->lock.hw_lock ? master->lock.hw_lock->lock : -1, in drm_legacy_lock()
186 lock->flags); in drm_legacy_lock()
188 add_wait_queue(&master->lock.lock_queue, &entry); in drm_legacy_lock()
189 spin_lock_bh(&master->lock.spinlock); in drm_legacy_lock()
190 master->lock.user_waiters++; in drm_legacy_lock()
191 spin_unlock_bh(&master->lock.spinlock); in drm_legacy_lock()
195 if (!master->lock.hw_lock) { in drm_legacy_lock()
201 if (drm_lock_take(&master->lock, lock->context)) { in drm_legacy_lock()
202 master->lock.file_priv = file_priv; in drm_legacy_lock()
203 master->lock.lock_time = jiffies; in drm_legacy_lock()
204 break; /* Got lock */ in drm_legacy_lock()
216 spin_lock_bh(&master->lock.spinlock); in drm_legacy_lock()
217 master->lock.user_waiters--; in drm_legacy_lock()
218 spin_unlock_bh(&master->lock.spinlock); in drm_legacy_lock()
220 remove_wait_queue(&master->lock.lock_queue, &entry); in drm_legacy_lock()
222 DRM_DEBUG("%d %s\n", lock->context, in drm_legacy_lock()
223 ret ? "interrupted" : "has lock"); in drm_legacy_lock()
230 dev->sigdata.context = lock->context; in drm_legacy_lock()
231 dev->sigdata.lock = master->lock.hw_lock; in drm_legacy_lock()
234 if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) in drm_legacy_lock()
238 lock->context); in drm_legacy_lock()
255 * Transfer and free the lock.
259 struct drm_lock *lock = data; in drm_legacy_unlock() local
265 if (lock->context == DRM_KERNEL_CONTEXT) { in drm_legacy_unlock()
267 task_pid_nr(current), lock->context); in drm_legacy_unlock()
271 if (drm_legacy_lock_free(&master->lock, lock->context)) { in drm_legacy_unlock()
279 * This function returns immediately and takes the hw lock
283 * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
284 * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
312 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_legacy_idlelock_release() local
318 old = *lock; in drm_legacy_idlelock_release()
319 prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); in drm_legacy_idlelock_release()
334 return (file_priv->lock_count && master->lock.hw_lock && in drm_legacy_i_have_hw_lock()
335 _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && in drm_legacy_i_have_hw_lock()
336 master->lock.file_priv == file_priv); in drm_legacy_i_have_hw_lock()
343 /* if the master has gone away we can't do anything with the lock */ in drm_legacy_lock_release()
348 DRM_DEBUG("File %p released, freeing lock for context %d\n", in drm_legacy_lock_release()
349 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); in drm_legacy_lock_release()
350 drm_legacy_lock_free(&file_priv->master->lock, in drm_legacy_lock_release()
351 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); in drm_legacy_lock_release()
362 * possibility to lock. in drm_legacy_lock_master_cleanup()
365 if (master->lock.hw_lock) { in drm_legacy_lock_master_cleanup()
366 if (dev->sigdata.lock == master->lock.hw_lock) in drm_legacy_lock_master_cleanup()
367 dev->sigdata.lock = NULL; in drm_legacy_lock_master_cleanup()
368 master->lock.hw_lock = NULL; in drm_legacy_lock_master_cleanup()
369 master->lock.file_priv = NULL; in drm_legacy_lock_master_cleanup()
370 wake_up_interruptible_all(&master->lock.lock_queue); in drm_legacy_lock_master_cleanup()