Lines Matching refs:sched
746 #define sched_queue_work(sched, wname) \ argument
748 if (!atomic_read(&(sched)->reset.in_progress) && \
749 !panthor_device_reset_is_pending((sched)->ptdev)) \
750 queue_work((sched)->wq, &(sched)->wname ## _work); \
762 #define sched_queue_delayed_work(sched, wname, delay) \ argument
764 if (!atomic_read(&sched->reset.in_progress) && \
765 !panthor_device_reset_is_pending((sched)->ptdev)) \
766 mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
1353 struct panthor_scheduler *sched = ptdev->scheduler; in cs_slot_process_fatal_event_locked() local
1354 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; in cs_slot_process_fatal_event_locked()
1360 lockdep_assert_held(&sched->lock); in cs_slot_process_fatal_event_locked()
1378 cancel_delayed_work(&sched->tick_work); in cs_slot_process_fatal_event_locked()
1380 sched_queue_delayed_work(sched, tick, 0); in cs_slot_process_fatal_event_locked()
1399 struct panthor_scheduler *sched = ptdev->scheduler; in cs_slot_process_fault_event_locked() local
1400 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; in cs_slot_process_fault_event_locked()
1408 lockdep_assert_held(&sched->lock); in cs_slot_process_fault_event_locked()
1451 struct panthor_scheduler *sched = ptdev->scheduler; in group_process_tiler_oom() local
1458 mutex_lock(&sched->lock); in group_process_tiler_oom()
1472 mutex_unlock(&sched->lock); in group_process_tiler_oom()
1499 sched_queue_delayed_work(sched, tick, 0); in group_process_tiler_oom()
1503 mutex_lock(&sched->lock); in group_process_tiler_oom()
1518 mutex_unlock(&sched->lock); in group_process_tiler_oom()
1554 struct panthor_scheduler *sched = ptdev->scheduler; in cs_slot_process_tiler_oom_event_locked() local
1555 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; in cs_slot_process_tiler_oom_event_locked()
1558 lockdep_assert_held(&sched->lock); in cs_slot_process_tiler_oom_event_locked()
1569 if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work)) in cs_slot_process_tiler_oom_event_locked()
1616 struct panthor_scheduler *sched = ptdev->scheduler; in csg_slot_process_idle_event_locked() local
1618 lockdep_assert_held(&sched->lock); in csg_slot_process_idle_event_locked()
1620 sched->might_have_idle_groups = true; in csg_slot_process_idle_event_locked()
1626 sched_queue_delayed_work(sched, tick, 0); in csg_slot_process_idle_event_locked()
1646 struct panthor_scheduler *sched = ptdev->scheduler; in csg_slot_process_progress_timer_event_locked() local
1647 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; in csg_slot_process_progress_timer_event_locked()
1650 lockdep_assert_held(&sched->lock); in csg_slot_process_progress_timer_event_locked()
1662 sched_queue_delayed_work(sched, tick, 0); in csg_slot_process_progress_timer_event_locked()
1757 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler, in process_fw_events_work() local
1759 u32 events = atomic_xchg(&sched->fw_events, 0); in process_fw_events_work()
1760 struct panthor_device *ptdev = sched->ptdev; in process_fw_events_work()
1762 mutex_lock(&sched->lock); in process_fw_events_work()
1776 mutex_unlock(&sched->lock); in process_fw_events_work()
1836 struct panthor_scheduler *sched = ptdev->scheduler; in csgs_upd_ctx_apply_locked() local
1839 lockdep_assert_held(&sched->lock); in csgs_upd_ctx_apply_locked()
1906 tick_ctx_is_full(const struct panthor_scheduler *sched, in tick_ctx_is_full() argument
1909 return ctx->group_count == sched->csg_slot_count; in tick_ctx_is_full()
1935 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched, in tick_ctx_pick_groups_from_list() argument
1943 if (tick_ctx_is_full(sched, ctx)) in tick_ctx_pick_groups_from_list()
1960 if (i == ctx->as_count && ctx->as_count == sched->as_slot_count) in tick_ctx_pick_groups_from_list()
1977 if (tick_ctx_is_full(sched, ctx)) in tick_ctx_pick_groups_from_list()
1983 tick_ctx_insert_old_group(struct panthor_scheduler *sched, in tick_ctx_insert_old_group() argument
1988 struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id]; in tick_ctx_insert_old_group()
2006 struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id]; in tick_ctx_insert_old_group()
2018 tick_ctx_init(struct panthor_scheduler *sched, in tick_ctx_init() argument
2022 struct panthor_device *ptdev = sched->ptdev; in tick_ctx_init()
2036 for (i = 0; i < sched->csg_slot_count; i++) { in tick_ctx_init()
2037 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; in tick_ctx_init()
2058 tick_ctx_insert_old_group(sched, ctx, group, full_tick); in tick_ctx_init()
2130 tick_ctx_cleanup(struct panthor_scheduler *sched, in tick_ctx_cleanup() argument
2133 struct panthor_device *ptdev = sched->ptdev; in tick_ctx_cleanup()
2154 &sched->groups.idle[group->priority] : in tick_ctx_cleanup()
2155 &sched->groups.runnable[group->priority]); in tick_ctx_cleanup()
2174 &sched->groups.idle[group->priority] : in tick_ctx_cleanup()
2175 &sched->groups.runnable[group->priority]); in tick_ctx_cleanup()
2183 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx) in tick_ctx_apply() argument
2186 struct panthor_device *ptdev = sched->ptdev; in tick_ctx_apply()
2204 csg_slot = &sched->csg_slots[csg_id]; in tick_ctx_apply()
2220 csg_slot = &sched->csg_slots[csg_id]; in tick_ctx_apply()
2258 for (i = 0; i < sched->csg_slot_count; i++) { in tick_ctx_apply()
2259 if (!sched->csg_slots[i].group) in tick_ctx_apply()
2282 csg_slot = &sched->csg_slots[csg_id]; in tick_ctx_apply()
2327 list_move_tail(&group->run_node, &sched->groups.idle[prio]); in tick_ctx_apply()
2329 list_move_tail(&group->run_node, &sched->groups.runnable[prio]); in tick_ctx_apply()
2334 sched->used_csg_slot_count = ctx->group_count; in tick_ctx_apply()
2335 sched->might_have_idle_groups = ctx->idle_group_count > 0; in tick_ctx_apply()
2339 tick_ctx_update_resched_target(struct panthor_scheduler *sched, in tick_ctx_update_resched_target() argument
2343 if (!tick_ctx_is_full(sched, ctx)) in tick_ctx_update_resched_target()
2352 if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT)) in tick_ctx_update_resched_target()
2359 if (!list_empty(&sched->groups.runnable[ctx->min_priority])) { in tick_ctx_update_resched_target()
2360 u64 resched_target = sched->last_tick + sched->tick_period; in tick_ctx_update_resched_target()
2362 if (time_before64(sched->resched_target, sched->last_tick) || in tick_ctx_update_resched_target()
2363 time_before64(resched_target, sched->resched_target)) in tick_ctx_update_resched_target()
2364 sched->resched_target = resched_target; in tick_ctx_update_resched_target()
2366 return sched->resched_target - sched->last_tick; in tick_ctx_update_resched_target()
2370 sched->resched_target = U64_MAX; in tick_ctx_update_resched_target()
2376 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler, in tick_work() local
2378 struct panthor_device *ptdev = sched->ptdev; in tick_work()
2391 if (time_before64(now, sched->resched_target)) in tick_work()
2392 remaining_jiffies = sched->resched_target - now; in tick_work()
2394 mutex_lock(&sched->lock); in tick_work()
2395 if (panthor_device_reset_is_pending(sched->ptdev)) in tick_work()
2398 tick_ctx_init(sched, &ctx, remaining_jiffies != 0); in tick_work()
2408 prio >= 0 && !tick_ctx_is_full(sched, &ctx); in tick_work()
2410 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], in tick_work()
2413 tick_ctx_pick_groups_from_list(sched, &ctx, in tick_work()
2414 &sched->groups.runnable[prio], in tick_work()
2422 prio >= 0 && !tick_ctx_is_full(sched, &ctx); in tick_work()
2424 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio], in tick_work()
2426 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true); in tick_work()
2431 prio >= 0 && !tick_ctx_is_full(sched, &ctx); in tick_work()
2434 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true); in tick_work()
2435 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio], in tick_work()
2439 tick_ctx_apply(sched, &ctx); in tick_work()
2444 panthor_devfreq_record_idle(sched->ptdev); in tick_work()
2445 if (sched->pm.has_ref) { in tick_work()
2447 sched->pm.has_ref = false; in tick_work()
2450 panthor_devfreq_record_busy(sched->ptdev); in tick_work()
2451 if (!sched->pm.has_ref) { in tick_work()
2453 sched->pm.has_ref = true; in tick_work()
2457 sched->last_tick = now; in tick_work()
2458 resched_delay = tick_ctx_update_resched_target(sched, &ctx); in tick_work()
2463 sched_queue_delayed_work(sched, tick, resched_delay); in tick_work()
2466 tick_ctx_cleanup(sched, &ctx); in tick_work()
2469 mutex_unlock(&sched->lock); in tick_work()
2508 struct panthor_scheduler *sched = container_of(work, in sync_upd_work() local
2514 mutex_lock(&sched->lock); in sync_upd_work()
2515 list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) { in sync_upd_work()
2536 &sched->groups.runnable[group->priority]); in sync_upd_work()
2545 mutex_unlock(&sched->lock); in sync_upd_work()
2548 sched_queue_delayed_work(sched, tick, 0); in sync_upd_work()
2554 struct panthor_scheduler *sched = ptdev->scheduler; in group_schedule_locked() local
2555 struct list_head *queue = &sched->groups.runnable[group->priority]; in group_schedule_locked()
2571 if (atomic_read(&sched->reset.in_progress)) in group_schedule_locked()
2579 sched_queue_delayed_work(sched, tick, 0); in group_schedule_locked()
2586 if (sched->might_have_idle_groups) { in group_schedule_locked()
2587 sched_queue_delayed_work(sched, tick, 0); in group_schedule_locked()
2592 if (sched->resched_target != U64_MAX) { in group_schedule_locked()
2594 if (sched->used_csg_slot_count < sched->csg_slot_count) in group_schedule_locked()
2595 sched_queue_delayed_work(sched, tick, 0); in group_schedule_locked()
2604 sched->resched_target = sched->last_tick + sched->tick_period; in group_schedule_locked()
2605 if (sched->used_csg_slot_count == sched->csg_slot_count && in group_schedule_locked()
2606 time_before64(now, sched->resched_target)) in group_schedule_locked()
2607 delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX); in group_schedule_locked()
2609 sched_queue_delayed_work(sched, tick, delay_jiffies); in group_schedule_locked()
2631 struct panthor_scheduler *sched = group->ptdev->scheduler; in panthor_group_stop() local
2633 lockdep_assert_held(&sched->reset.lock); in panthor_group_stop()
2639 list_move_tail(&group->run_node, &sched->reset.stopped_groups); in panthor_group_stop()
2644 struct panthor_scheduler *sched = group->ptdev->scheduler; in panthor_group_start() local
2654 &sched->groups.idle[group->priority] : in panthor_group_start()
2655 &sched->groups.runnable[group->priority]); in panthor_group_start()
2667 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_immediate_tick() local
2669 sched_queue_delayed_work(sched, tick, 0); in panthor_sched_immediate_tick()
2690 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_suspend() local
2696 mutex_lock(&sched->lock); in panthor_sched_suspend()
2698 for (i = 0; i < sched->csg_slot_count; i++) { in panthor_sched_suspend()
2699 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; in panthor_sched_suspend()
2721 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; in panthor_sched_suspend()
2745 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; in panthor_sched_suspend()
2769 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; in panthor_sched_suspend()
2780 for (i = 0; i < sched->csg_slot_count; i++) { in panthor_sched_suspend()
2781 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; in panthor_sched_suspend()
2798 &sched->groups.idle[group->priority]); in panthor_sched_suspend()
2808 mutex_unlock(&sched->lock); in panthor_sched_suspend()
2813 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_pre_reset() local
2817 mutex_lock(&sched->reset.lock); in panthor_sched_pre_reset()
2818 atomic_set(&sched->reset.in_progress, true); in panthor_sched_pre_reset()
2823 cancel_work_sync(&sched->sync_upd_work); in panthor_sched_pre_reset()
2824 cancel_delayed_work_sync(&sched->tick_work); in panthor_sched_pre_reset()
2831 for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) { in panthor_sched_pre_reset()
2833 drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i])); in panthor_sched_pre_reset()
2834 list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node) in panthor_sched_pre_reset()
2838 for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) { in panthor_sched_pre_reset()
2839 list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node) in panthor_sched_pre_reset()
2843 mutex_unlock(&sched->reset.lock); in panthor_sched_pre_reset()
2848 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_post_reset() local
2851 mutex_lock(&sched->reset.lock); in panthor_sched_post_reset()
2853 list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) { in panthor_sched_post_reset()
2866 atomic_set(&sched->reset.in_progress, false); in panthor_sched_post_reset()
2867 mutex_unlock(&sched->reset.lock); in panthor_sched_post_reset()
2871 sched_queue_delayed_work(sched, tick, 0); in panthor_sched_post_reset()
2872 sched_queue_work(sched, sync_upd); in panthor_sched_post_reset()
3011 struct panthor_scheduler *sched = ptdev->scheduler; in get_job_cs_params() local
3023 params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0); in get_job_cs_params()
3140 struct panthor_scheduler *sched = ptdev->scheduler; in queue_run_job() local
3159 mutex_lock(&sched->lock); in queue_run_job()
3211 if (!sched->pm.has_ref && in queue_run_job()
3214 sched->pm.has_ref = true; in queue_run_job()
3216 panthor_devfreq_record_busy(sched->ptdev); in queue_run_job()
3226 mutex_unlock(&sched->lock); in queue_run_job()
3239 struct panthor_scheduler *sched = ptdev->scheduler; in queue_timedout_job() local
3245 drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress)); in queue_timedout_job()
3249 mutex_lock(&sched->lock); in queue_timedout_job()
3262 mutex_unlock(&sched->lock); in queue_timedout_job()
3453 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_group_create() local
3559 mutex_lock(&sched->reset.lock); in panthor_group_create()
3560 if (atomic_read(&sched->reset.in_progress)) { in panthor_group_create()
3563 mutex_lock(&sched->lock); in panthor_group_create()
3565 &sched->groups.idle[group->priority]); in panthor_group_create()
3566 mutex_unlock(&sched->lock); in panthor_group_create()
3568 mutex_unlock(&sched->reset.lock); in panthor_group_create()
3586 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_group_destroy() local
3593 mutex_lock(&sched->reset.lock); in panthor_group_destroy()
3594 mutex_lock(&sched->lock); in panthor_group_destroy()
3597 sched_queue_delayed_work(sched, tick, 0); in panthor_group_destroy()
3598 } else if (!atomic_read(&sched->reset.in_progress)) { in panthor_group_destroy()
3606 mutex_unlock(&sched->lock); in panthor_group_destroy()
3607 mutex_unlock(&sched->reset.lock); in panthor_group_destroy()
3630 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_group_get_state() local
3642 mutex_lock(&sched->lock); in panthor_group_get_state()
3651 mutex_unlock(&sched->lock); in panthor_group_get_state()
3856 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_unplug() local
3858 cancel_delayed_work_sync(&sched->tick_work); in panthor_sched_unplug()
3860 mutex_lock(&sched->lock); in panthor_sched_unplug()
3861 if (sched->pm.has_ref) { in panthor_sched_unplug()
3863 sched->pm.has_ref = false; in panthor_sched_unplug()
3865 mutex_unlock(&sched->lock); in panthor_sched_unplug()
3870 struct panthor_scheduler *sched = res; in panthor_sched_fini() local
3873 if (!sched || !sched->csg_slot_count) in panthor_sched_fini()
3876 cancel_delayed_work_sync(&sched->tick_work); in panthor_sched_fini()
3878 if (sched->wq) in panthor_sched_fini()
3879 destroy_workqueue(sched->wq); in panthor_sched_fini()
3881 if (sched->heap_alloc_wq) in panthor_sched_fini()
3882 destroy_workqueue(sched->heap_alloc_wq); in panthor_sched_fini()
3885 drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio])); in panthor_sched_fini()
3886 drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio])); in panthor_sched_fini()
3889 drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting)); in panthor_sched_fini()
3897 struct panthor_scheduler *sched; in panthor_sched_init() local
3901 sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL); in panthor_sched_init()
3902 if (!sched) in panthor_sched_init()
3930 sched->ptdev = ptdev; in panthor_sched_init()
3931 sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features); in panthor_sched_init()
3932 sched->csg_slot_count = num_groups; in panthor_sched_init()
3933 sched->cs_slot_count = csg_iface->control->stream_num; in panthor_sched_init()
3934 sched->as_slot_count = gpu_as_count; in panthor_sched_init()
3935 ptdev->csif_info.csg_slot_count = sched->csg_slot_count; in panthor_sched_init()
3936 ptdev->csif_info.cs_slot_count = sched->cs_slot_count; in panthor_sched_init()
3937 ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count; in panthor_sched_init()
3939 sched->last_tick = 0; in panthor_sched_init()
3940 sched->resched_target = U64_MAX; in panthor_sched_init()
3941 sched->tick_period = msecs_to_jiffies(10); in panthor_sched_init()
3942 INIT_DELAYED_WORK(&sched->tick_work, tick_work); in panthor_sched_init()
3943 INIT_WORK(&sched->sync_upd_work, sync_upd_work); in panthor_sched_init()
3944 INIT_WORK(&sched->fw_events_work, process_fw_events_work); in panthor_sched_init()
3946 ret = drmm_mutex_init(&ptdev->base, &sched->lock); in panthor_sched_init()
3951 INIT_LIST_HEAD(&sched->groups.runnable[prio]); in panthor_sched_init()
3952 INIT_LIST_HEAD(&sched->groups.idle[prio]); in panthor_sched_init()
3954 INIT_LIST_HEAD(&sched->groups.waiting); in panthor_sched_init()
3956 ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock); in panthor_sched_init()
3960 INIT_LIST_HEAD(&sched->reset.stopped_groups); in panthor_sched_init()
3977 sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0); in panthor_sched_init()
3978 sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); in panthor_sched_init()
3979 if (!sched->wq || !sched->heap_alloc_wq) { in panthor_sched_init()
3980 panthor_sched_fini(&ptdev->base, sched); in panthor_sched_init()
3985 ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched); in panthor_sched_init()
3989 ptdev->scheduler = sched; in panthor_sched_init()