1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_gpu_scheduler.h"
7 
xe_sched_process_msg_queue(struct xe_gpu_scheduler * sched)8 static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched)
9 {
10 	if (!READ_ONCE(sched->base.pause_submit))
11 		queue_work(sched->base.submit_wq, &sched->work_process_msg);
12 }
13 
xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler * sched)14 static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
15 {
16 	struct xe_sched_msg *msg;
17 
18 	xe_sched_msg_lock(sched);
19 	msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
20 	if (msg)
21 		xe_sched_process_msg_queue(sched);
22 	xe_sched_msg_unlock(sched);
23 }
24 
25 static struct xe_sched_msg *
xe_sched_get_msg(struct xe_gpu_scheduler * sched)26 xe_sched_get_msg(struct xe_gpu_scheduler *sched)
27 {
28 	struct xe_sched_msg *msg;
29 
30 	xe_sched_msg_lock(sched);
31 	msg = list_first_entry_or_null(&sched->msgs,
32 				       struct xe_sched_msg, link);
33 	if (msg)
34 		list_del_init(&msg->link);
35 	xe_sched_msg_unlock(sched);
36 
37 	return msg;
38 }
39 
xe_sched_process_msg_work(struct work_struct * w)40 static void xe_sched_process_msg_work(struct work_struct *w)
41 {
42 	struct xe_gpu_scheduler *sched =
43 		container_of(w, struct xe_gpu_scheduler, work_process_msg);
44 	struct xe_sched_msg *msg;
45 
46 	if (READ_ONCE(sched->base.pause_submit))
47 		return;
48 
49 	msg = xe_sched_get_msg(sched);
50 	if (msg) {
51 		sched->ops->process_msg(msg);
52 
53 		xe_sched_process_msg_queue_if_ready(sched);
54 	}
55 }
56 
xe_sched_init(struct xe_gpu_scheduler * sched,const struct drm_sched_backend_ops * ops,const struct xe_sched_backend_ops * xe_ops,struct workqueue_struct * submit_wq,uint32_t hw_submission,unsigned hang_limit,long timeout,struct workqueue_struct * timeout_wq,atomic_t * score,const char * name,struct device * dev)57 int xe_sched_init(struct xe_gpu_scheduler *sched,
58 		  const struct drm_sched_backend_ops *ops,
59 		  const struct xe_sched_backend_ops *xe_ops,
60 		  struct workqueue_struct *submit_wq,
61 		  uint32_t hw_submission, unsigned hang_limit,
62 		  long timeout, struct workqueue_struct *timeout_wq,
63 		  atomic_t *score, const char *name,
64 		  struct device *dev)
65 {
66 	const struct drm_sched_init_args args = {
67 		.ops = ops,
68 		.submit_wq = submit_wq,
69 		.num_rqs = 1,
70 		.credit_limit = hw_submission,
71 		.hang_limit = hang_limit,
72 		.timeout = timeout,
73 		.timeout_wq = timeout_wq,
74 		.score = score,
75 		.name = name,
76 		.dev = dev,
77 	};
78 
79 	sched->ops = xe_ops;
80 	INIT_LIST_HEAD(&sched->msgs);
81 	INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
82 
83 	return drm_sched_init(&sched->base, &args);
84 }
85 
xe_sched_fini(struct xe_gpu_scheduler * sched)86 void xe_sched_fini(struct xe_gpu_scheduler *sched)
87 {
88 	xe_sched_submission_stop(sched);
89 	drm_sched_fini(&sched->base);
90 }
91 
xe_sched_submission_start(struct xe_gpu_scheduler * sched)92 void xe_sched_submission_start(struct xe_gpu_scheduler *sched)
93 {
94 	drm_sched_wqueue_start(&sched->base);
95 	queue_work(sched->base.submit_wq, &sched->work_process_msg);
96 }
97 
xe_sched_submission_stop(struct xe_gpu_scheduler * sched)98 void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
99 {
100 	drm_sched_wqueue_stop(&sched->base);
101 	cancel_work_sync(&sched->work_process_msg);
102 }
103 
xe_sched_submission_resume_tdr(struct xe_gpu_scheduler * sched)104 void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
105 {
106 	drm_sched_resume_timeout(&sched->base, sched->base.timeout);
107 }
108 
xe_sched_add_msg(struct xe_gpu_scheduler * sched,struct xe_sched_msg * msg)109 void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
110 		      struct xe_sched_msg *msg)
111 {
112 	xe_sched_msg_lock(sched);
113 	xe_sched_add_msg_locked(sched, msg);
114 	xe_sched_msg_unlock(sched);
115 }
116 
xe_sched_add_msg_locked(struct xe_gpu_scheduler * sched,struct xe_sched_msg * msg)117 void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
118 			     struct xe_sched_msg *msg)
119 {
120 	lockdep_assert_held(&sched->base.job_list_lock);
121 
122 	list_add_tail(&msg->link, &sched->msgs);
123 	xe_sched_process_msg_queue(sched);
124 }
125