xref: /linux/kernel/trace/rv/monitors/deadline/deadline.h (revision fdbfee9fc56e13a1307868829d438ad66ab308a4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #include <linux/kernel.h>
4 #include <linux/uaccess.h>
5 #include <linux/sched/deadline.h>
6 #include <asm/syscall.h>
7 #include <uapi/linux/sched/types.h>
8 #include <trace/events/sched.h>
9 
10 /*
11  * Dummy values if not available
12  */
13 #ifndef __NR_sched_setscheduler
14 #define __NR_sched_setscheduler -__COUNTER__
15 #endif
16 #ifndef __NR_sched_setattr
17 #define __NR_sched_setattr -__COUNTER__
18 #endif
19 
20 extern struct rv_monitor rv_deadline;
21 /* Initialised when registering the deadline container */
22 extern struct sched_class *rv_ext_sched_class;
23 
24 /*
25  * If both have dummy values, the syscalls are not supported and we don't even
26  * need to register the handler.
27  */
should_skip_syscall_handle(void)28 static inline bool should_skip_syscall_handle(void)
29 {
30 	return __NR_sched_setattr < 0 && __NR_sched_setscheduler < 0;
31 }
32 
33 /*
34  * is_supported_type - return true if @type is supported by the deadline monitors
35  */
is_supported_type(u8 type)36 static inline bool is_supported_type(u8 type)
37 {
38 	return type == DL_TASK || type == DL_SERVER_FAIR || type == DL_SERVER_EXT;
39 }
40 
41 /*
42  * is_server_type - return true if @type is a supported server
43  */
is_server_type(u8 type)44 static inline bool is_server_type(u8 type)
45 {
46 	return is_supported_type(type) && type != DL_TASK;
47 }
48 
49 /*
50  * Use negative numbers for the server.
51  * Currently only one fair server per CPU, may change in the future.
52  */
53 #define fair_server_id(cpu) (-cpu)
54 #define ext_server_id(cpu) (-cpu - num_possible_cpus())
55 #define NO_SERVER_ID (-2 * num_possible_cpus())
56 /*
57  * Get a unique id used for dl entities
58  *
59  * The cpu is not required for tasks as the pid is used there, if this function
60  * is called on a dl_se that for sure corresponds to a task, DL_TASK can be
61  * used in place of cpu.
62  * We need the cpu for servers as it is provided in the tracepoint and we
63  * cannot easily retrieve it from the dl_se (requires the struct rq definition).
64  */
get_entity_id(struct sched_dl_entity * dl_se,int cpu,u8 type)65 static inline int get_entity_id(struct sched_dl_entity *dl_se, int cpu, u8 type)
66 {
67 	if (dl_server(dl_se) && type != DL_TASK) {
68 		if (type == DL_SERVER_FAIR)
69 			return fair_server_id(cpu);
70 		if (type == DL_SERVER_EXT)
71 			return ext_server_id(cpu);
72 		return NO_SERVER_ID;
73 	}
74 	return dl_task_of(dl_se)->pid;
75 }
76 
task_is_scx_enabled(struct task_struct * tsk)77 static inline bool task_is_scx_enabled(struct task_struct *tsk)
78 {
79 	return IS_ENABLED(CONFIG_SCHED_CLASS_EXT) &&
80 	       tsk->sched_class == rv_ext_sched_class;
81 }
82 
83 /* Expand id and target as arguments for da functions */
84 #define EXPAND_ID(dl_se, cpu, type) get_entity_id(dl_se, cpu, type), dl_se
85 #define EXPAND_ID_TASK(tsk) get_entity_id(&tsk->dl, task_cpu(tsk), DL_TASK), &tsk->dl
86 
get_server_type(struct task_struct * tsk)87 static inline u8 get_server_type(struct task_struct *tsk)
88 {
89 	if (tsk->policy == SCHED_NORMAL || tsk->policy == SCHED_EXT ||
90 	    tsk->policy == SCHED_BATCH || tsk->policy == SCHED_IDLE)
91 		return task_is_scx_enabled(tsk) ? DL_SERVER_EXT : DL_SERVER_FAIR;
92 	return DL_OTHER;
93 }
94 
extract_params(struct pt_regs * regs,long id,pid_t * pid_out)95 static inline int extract_params(struct pt_regs *regs, long id, pid_t *pid_out)
96 {
97 	size_t size = offsetofend(struct sched_attr, sched_flags);
98 	struct sched_attr __user *uattr, attr;
99 	int new_policy = -1, ret;
100 	unsigned long args[6];
101 
102 	switch (id) {
103 	case __NR_sched_setscheduler:
104 		syscall_get_arguments(current, regs, args);
105 		*pid_out = args[0];
106 		new_policy = args[1];
107 		break;
108 	case __NR_sched_setattr:
109 		syscall_get_arguments(current, regs, args);
110 		*pid_out = args[0];
111 		uattr = (struct sched_attr __user *)args[1];
112 		/*
113 		 * Just copy up to sched_flags, we are not interested after that
114 		 */
115 		ret = copy_struct_from_user(&attr, size, uattr, size);
116 		if (ret)
117 			return ret;
118 		if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
119 			return -EINVAL;
120 		new_policy = attr.sched_policy;
121 		break;
122 	default:
123 		return -EINVAL;
124 	}
125 
126 	return new_policy & ~SCHED_RESET_ON_FORK;
127 }
128 
129 /* Helper functions requiring DA/HA utilities */
130 #ifdef RV_MON_TYPE
131 
132 /*
133  * get_fair_server - get the fair server associated to a task
134  *
135  * If the task is a boosted task, the server is available in the task_struct,
136  * otherwise grab the dl entity saved for the CPU where the task is enqueued.
137  * This function assumes the task is enqueued somewhere.
138  */
get_server(struct task_struct * tsk,u8 type)139 static inline struct sched_dl_entity *get_server(struct task_struct *tsk, u8 type)
140 {
141 	if (tsk->dl_server && get_server_type(tsk) == type)
142 		return tsk->dl_server;
143 	if (type == DL_SERVER_FAIR)
144 		return da_get_target_by_id(fair_server_id(task_cpu(tsk)));
145 	if (type == DL_SERVER_EXT)
146 		return da_get_target_by_id(ext_server_id(task_cpu(tsk)));
147 	return NULL;
148 }
149 
150 /*
151  * Initialise monitors for all tasks and pre-allocate the storage for servers.
152  * This is necessary since we don't have access to the servers here and
153  * allocation can cause deadlocks from their tracepoints. We can only fill
154  * pre-initialised storage from there.
155  */
init_storage(bool skip_tasks)156 static inline int init_storage(bool skip_tasks)
157 {
158 	struct task_struct *g, *p;
159 	int cpu;
160 
161 	for_each_possible_cpu(cpu) {
162 		if (!da_create_empty_storage(fair_server_id(cpu)))
163 			goto fail;
164 		if (IS_ENABLED(CONFIG_SCHED_CLASS_EXT) &&
165 		    !da_create_empty_storage(ext_server_id(cpu)))
166 			goto fail;
167 	}
168 
169 	if (skip_tasks)
170 		return 0;
171 
172 	read_lock(&tasklist_lock);
173 	for_each_process_thread(g, p) {
174 		if (p->policy == SCHED_DEADLINE) {
175 			if (!da_create_storage(EXPAND_ID_TASK(p), NULL)) {
176 				read_unlock(&tasklist_lock);
177 				goto fail;
178 			}
179 		}
180 	}
181 	read_unlock(&tasklist_lock);
182 	return 0;
183 
184 fail:
185 	da_monitor_destroy();
186 	return -ENOMEM;
187 }
188 
handle_newtask(void * data,struct task_struct * task,u64 flags)189 static void __maybe_unused handle_newtask(void *data, struct task_struct *task, u64 flags)
190 {
191 	/* Might be superfluous as tasks are not started with this policy.. */
192 	if (task->policy == SCHED_DEADLINE)
193 		da_create_storage(EXPAND_ID_TASK(task), NULL);
194 }
195 
handle_exit(void * data,struct task_struct * p,bool group_dead)196 static void __maybe_unused handle_exit(void *data, struct task_struct *p, bool group_dead)
197 {
198 	if (p->policy == SCHED_DEADLINE)
199 		da_destroy_storage(get_entity_id(&p->dl, DL_TASK, DL_TASK));
200 }
201 
202 #endif
203