Lines Matching refs:group

19 	struct xe_hw_engine_group *group = container_of(w, struct xe_hw_engine_group, resume_work);
23 err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode);
30 list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
38 xe_hw_engine_group_put(group);
44 struct xe_hw_engine_group *group;
47 group = drmm_kzalloc(&xe->drm, sizeof(*group), GFP_KERNEL);
48 if (!group)
51 group->resume_wq = alloc_workqueue("xe-resume-lr-jobs-wq", 0, 0);
52 if (!group->resume_wq)
55 err = drmm_add_action_or_reset(&xe->drm, __drmm_workqueue_release, group->resume_wq);
59 init_rwsem(&group->mode_sem);
60 INIT_WORK(&group->resume_work, hw_engine_group_resume_lr_jobs_func);
61 INIT_LIST_HEAD(&group->exec_queue_list);
63 return group;
115 * xe_hw_engine_group_add_exec_queue() - Add an exec queue to a hw engine group
116 * @group: The hw engine group
122 int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q)
127 xe_assert(xe, group);
134 err = down_write_killable(&group->mode_sem);
138 if (xe_vm_in_fault_mode(q->vm) && group->cur_mode == EXEC_MODE_DMA_FENCE) {
144 xe_hw_engine_group_resume_faulting_lr_jobs(group);
147 list_add(&q->hw_engine_group_link, &group->exec_queue_list);
148 up_write(&group->mode_sem);
153 up_write(&group->mode_sem);
159 * xe_hw_engine_group_del_exec_queue() - Delete an exec queue from a hw engine group
160 * @group: The hw engine group
163 void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q)
167 xe_assert(xe, group);
170 down_write(&group->mode_sem);
175 up_write(&group->mode_sem);
179 * xe_hw_engine_group_resume_faulting_lr_jobs() - Asynchronously resume the hw engine group's
181 * @group: The hw engine group
183 void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group)
185 queue_work(group->resume_wq, &group->resume_work);
189 * xe_hw_engine_group_suspend_faulting_lr_jobs() - Suspend the faulting LR jobs of this group
190 * @group: The hw engine group
194 static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group)
200 lockdep_assert_held_write(&group->mode_sem);
202 list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
210 list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
220 xe_hw_engine_group_resume_faulting_lr_jobs(group);
225 up_write(&group->mode_sem);
231 * @group: The hw engine group
239 static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group *group)
245 lockdep_assert_held_write(&group->mode_sem);
247 list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
262 static int switch_mode(struct xe_hw_engine_group *group)
267 lockdep_assert_held_write(&group->mode_sem);
269 switch (group->cur_mode) {
272 err = xe_hw_engine_group_suspend_faulting_lr_jobs(group);
276 err = xe_hw_engine_group_wait_for_dma_fence_jobs(group);
283 group->cur_mode = new_mode;
289 * xe_hw_engine_group_get_mode() - Get the group to execute in the new mode
290 * @group: The hw engine group
296 int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group,
299 __acquires(&group->mode_sem)
301 int err = down_read_interruptible(&group->mode_sem);
306 *previous_mode = group->cur_mode;
308 if (new_mode != group->cur_mode) {
309 up_read(&group->mode_sem);
310 err = down_write_killable(&group->mode_sem);
314 if (new_mode != group->cur_mode) {
315 err = switch_mode(group);
317 up_write(&group->mode_sem);
321 downgrade_write(&group->mode_sem);
328 * xe_hw_engine_group_put() - Put the group
329 * @group: The hw engine group
331 void xe_hw_engine_group_put(struct xe_hw_engine_group *group)
332 __releases(&group->mode_sem)
334 up_read(&group->mode_sem);