1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Clock Protocol
4  *
5  * Copyright (C) 2018-2022 ARM Ltd.
6  */
7 
8 #include <linux/module.h>
9 #include <linux/limits.h>
10 #include <linux/sort.h>
11 
12 #include "protocols.h"
13 #include "notify.h"
14 #include "quirks.h"
15 
16 /* Updated only after ALL the mandatory features for that version are merged */
17 #define SCMI_PROTOCOL_SUPPORTED_VERSION		0x30000
18 
19 enum scmi_clock_protocol_cmd {
20 	CLOCK_ATTRIBUTES = 0x3,
21 	CLOCK_DESCRIBE_RATES = 0x4,
22 	CLOCK_RATE_SET = 0x5,
23 	CLOCK_RATE_GET = 0x6,
24 	CLOCK_CONFIG_SET = 0x7,
25 	CLOCK_NAME_GET = 0x8,
26 	CLOCK_RATE_NOTIFY = 0x9,
27 	CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
28 	CLOCK_CONFIG_GET = 0xB,
29 	CLOCK_POSSIBLE_PARENTS_GET = 0xC,
30 	CLOCK_PARENT_SET = 0xD,
31 	CLOCK_PARENT_GET = 0xE,
32 	CLOCK_GET_PERMISSIONS = 0xF,
33 };
34 
35 #define CLOCK_STATE_CONTROL_ALLOWED	BIT(31)
36 #define CLOCK_PARENT_CONTROL_ALLOWED	BIT(30)
37 #define CLOCK_RATE_CONTROL_ALLOWED	BIT(29)
38 
39 enum clk_state {
40 	CLK_STATE_DISABLE,
41 	CLK_STATE_ENABLE,
42 	CLK_STATE_RESERVED,
43 	CLK_STATE_UNCHANGED,
44 };
45 
46 struct scmi_msg_resp_clock_protocol_attributes {
47 	__le16 num_clocks;
48 	u8 max_async_req;
49 	u8 reserved;
50 };
51 
52 struct scmi_msg_resp_clock_attributes {
53 	__le32 attributes;
54 #define SUPPORTS_RATE_CHANGED_NOTIF(x)		((x) & BIT(31))
55 #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x)	((x) & BIT(30))
56 #define SUPPORTS_EXTENDED_NAMES(x)		((x) & BIT(29))
57 #define SUPPORTS_PARENT_CLOCK(x)		((x) & BIT(28))
58 #define SUPPORTS_EXTENDED_CONFIG(x)		((x) & BIT(27))
59 #define SUPPORTS_GET_PERMISSIONS(x)		((x) & BIT(1))
60 	u8 name[SCMI_SHORT_NAME_MAX_SIZE];
61 	__le32 clock_enable_latency;
62 };
63 
64 struct scmi_msg_clock_possible_parents {
65 	__le32 id;
66 	__le32 skip_parents;
67 };
68 
69 struct scmi_msg_resp_clock_possible_parents {
70 	__le32 num_parent_flags;
71 #define NUM_PARENTS_RETURNED(x)		((x) & 0xff)
72 #define NUM_PARENTS_REMAINING(x)	((x) >> 24)
73 	__le32 possible_parents[];
74 };
75 
76 struct scmi_msg_clock_set_parent {
77 	__le32 id;
78 	__le32 parent_id;
79 };
80 
81 struct scmi_msg_clock_config_set {
82 	__le32 id;
83 	__le32 attributes;
84 };
85 
86 /* Valid only from SCMI clock v2.1 */
87 struct scmi_msg_clock_config_set_v2 {
88 	__le32 id;
89 	__le32 attributes;
90 #define NULL_OEM_TYPE			0
91 #define REGMASK_OEM_TYPE_SET		GENMASK(23, 16)
92 #define REGMASK_CLK_STATE		GENMASK(1, 0)
93 	__le32 oem_config_val;
94 };
95 
96 struct scmi_msg_clock_config_get {
97 	__le32 id;
98 	__le32 flags;
99 #define REGMASK_OEM_TYPE_GET		GENMASK(7, 0)
100 };
101 
102 struct scmi_msg_resp_clock_config_get {
103 	__le32 attributes;
104 	__le32 config;
105 #define IS_CLK_ENABLED(x)		le32_get_bits((x), BIT(0))
106 	__le32 oem_config_val;
107 };
108 
109 struct scmi_msg_clock_describe_rates {
110 	__le32 id;
111 	__le32 rate_index;
112 };
113 
114 struct scmi_msg_resp_clock_describe_rates {
115 	__le32 num_rates_flags;
116 #define NUM_RETURNED(x)		((x) & 0xfff)
117 #define RATE_DISCRETE(x)	!((x) & BIT(12))
118 #define NUM_REMAINING(x)	((x) >> 16)
119 	struct {
120 		__le32 value_low;
121 		__le32 value_high;
122 	} rate[];
123 #define RATE_TO_U64(X)		\
124 ({				\
125 	typeof(X) x = (X);	\
126 	le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
127 })
128 };
129 
130 struct scmi_clock_set_rate {
131 	__le32 flags;
132 #define CLOCK_SET_ASYNC		BIT(0)
133 #define CLOCK_SET_IGNORE_RESP	BIT(1)
134 #define CLOCK_SET_ROUND_UP	BIT(2)
135 #define CLOCK_SET_ROUND_AUTO	BIT(3)
136 	__le32 id;
137 	__le32 value_low;
138 	__le32 value_high;
139 };
140 
141 struct scmi_msg_resp_set_rate_complete {
142 	__le32 id;
143 	__le32 rate_low;
144 	__le32 rate_high;
145 };
146 
147 struct scmi_msg_clock_rate_notify {
148 	__le32 clk_id;
149 	__le32 notify_enable;
150 };
151 
152 struct scmi_clock_rate_notify_payld {
153 	__le32 agent_id;
154 	__le32 clock_id;
155 	__le32 rate_low;
156 	__le32 rate_high;
157 };
158 
159 struct clock_info {
160 	u32 version;
161 	int num_clocks;
162 	int max_async_req;
163 	bool notify_rate_changed_cmd;
164 	bool notify_rate_change_requested_cmd;
165 	atomic_t cur_async_req;
166 	struct scmi_clock_info *clk;
167 	int (*clock_config_set)(const struct scmi_protocol_handle *ph,
168 				u32 clk_id, enum clk_state state,
169 				enum scmi_clock_oem_config oem_type,
170 				u32 oem_val, bool atomic);
171 	int (*clock_config_get)(const struct scmi_protocol_handle *ph,
172 				u32 clk_id, enum scmi_clock_oem_config oem_type,
173 				u32 *attributes, bool *enabled, u32 *oem_val,
174 				bool atomic);
175 };
176 
177 static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
178 	CLOCK_RATE_NOTIFY,
179 	CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
180 };
181 
182 static inline struct scmi_clock_info *
183 scmi_clock_domain_lookup(struct clock_info *ci, u32 clk_id)
184 {
185 	if (clk_id >= ci->num_clocks)
186 		return ERR_PTR(-EINVAL);
187 
188 	return ci->clk + clk_id;
189 }
190 
191 static int
192 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
193 				   struct clock_info *ci)
194 {
195 	int ret;
196 	struct scmi_xfer *t;
197 	struct scmi_msg_resp_clock_protocol_attributes *attr;
198 
199 	ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
200 				      0, sizeof(*attr), &t);
201 	if (ret)
202 		return ret;
203 
204 	attr = t->rx.buf;
205 
206 	ret = ph->xops->do_xfer(ph, t);
207 	if (!ret) {
208 		ci->num_clocks = le16_to_cpu(attr->num_clocks);
209 		ci->max_async_req = attr->max_async_req;
210 	}
211 
212 	ph->xops->xfer_put(ph, t);
213 
214 	if (!ret) {
215 		if (!ph->hops->protocol_msg_check(ph, CLOCK_RATE_NOTIFY, NULL))
216 			ci->notify_rate_changed_cmd = true;
217 
218 		if (!ph->hops->protocol_msg_check(ph,
219 						  CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
220 						  NULL))
221 			ci->notify_rate_change_requested_cmd = true;
222 	}
223 
224 	return ret;
225 }
226 
227 struct scmi_clk_ipriv {
228 	struct device *dev;
229 	u32 clk_id;
230 	struct scmi_clock_info *clk;
231 };
232 
233 static void iter_clk_possible_parents_prepare_message(void *message, unsigned int desc_index,
234 						      const void *priv)
235 {
236 	struct scmi_msg_clock_possible_parents *msg = message;
237 	const struct scmi_clk_ipriv *p = priv;
238 
239 	msg->id = cpu_to_le32(p->clk_id);
240 	/* Set the number of OPPs to be skipped/already read */
241 	msg->skip_parents = cpu_to_le32(desc_index);
242 }
243 
244 static int iter_clk_possible_parents_update_state(struct scmi_iterator_state *st,
245 						  const void *response, void *priv)
246 {
247 	const struct scmi_msg_resp_clock_possible_parents *r = response;
248 	struct scmi_clk_ipriv *p = priv;
249 	struct device *dev = ((struct scmi_clk_ipriv *)p)->dev;
250 	u32 flags;
251 
252 	flags = le32_to_cpu(r->num_parent_flags);
253 	st->num_returned = NUM_PARENTS_RETURNED(flags);
254 	st->num_remaining = NUM_PARENTS_REMAINING(flags);
255 
256 	/*
257 	 * num parents is not declared previously anywhere so we
258 	 * assume it's returned+remaining on first call.
259 	 */
260 	if (!st->max_resources) {
261 		p->clk->num_parents = st->num_returned + st->num_remaining;
262 		p->clk->parents = devm_kcalloc(dev, p->clk->num_parents,
263 					       sizeof(*p->clk->parents),
264 					       GFP_KERNEL);
265 		if (!p->clk->parents) {
266 			p->clk->num_parents = 0;
267 			return -ENOMEM;
268 		}
269 		st->max_resources = st->num_returned + st->num_remaining;
270 	}
271 
272 	return 0;
273 }
274 
275 static int iter_clk_possible_parents_process_response(const struct scmi_protocol_handle *ph,
276 						      const void *response,
277 						      struct scmi_iterator_state *st,
278 						      void *priv)
279 {
280 	const struct scmi_msg_resp_clock_possible_parents *r = response;
281 	struct scmi_clk_ipriv *p = priv;
282 
283 	u32 *parent = &p->clk->parents[st->desc_index + st->loop_idx];
284 
285 	*parent = le32_to_cpu(r->possible_parents[st->loop_idx]);
286 
287 	return 0;
288 }
289 
290 static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u32 clk_id,
291 				       struct scmi_clock_info *clk)
292 {
293 	struct scmi_iterator_ops ops = {
294 		.prepare_message = iter_clk_possible_parents_prepare_message,
295 		.update_state = iter_clk_possible_parents_update_state,
296 		.process_response = iter_clk_possible_parents_process_response,
297 	};
298 
299 	struct scmi_clk_ipriv ppriv = {
300 		.clk_id = clk_id,
301 		.clk = clk,
302 		.dev = ph->dev,
303 	};
304 	void *iter;
305 	int ret;
306 
307 	iter = ph->hops->iter_response_init(ph, &ops, 0,
308 					    CLOCK_POSSIBLE_PARENTS_GET,
309 					    sizeof(struct scmi_msg_clock_possible_parents),
310 					    &ppriv);
311 	if (IS_ERR(iter))
312 		return PTR_ERR(iter);
313 
314 	ret = ph->hops->iter_response_run(iter);
315 
316 	return ret;
317 }
318 
319 static int
320 scmi_clock_get_permissions(const struct scmi_protocol_handle *ph, u32 clk_id,
321 			   struct scmi_clock_info *clk)
322 {
323 	struct scmi_xfer *t;
324 	u32 perm;
325 	int ret;
326 
327 	ret = ph->xops->xfer_get_init(ph, CLOCK_GET_PERMISSIONS,
328 				      sizeof(clk_id), sizeof(perm), &t);
329 	if (ret)
330 		return ret;
331 
332 	put_unaligned_le32(clk_id, t->tx.buf);
333 
334 	ret = ph->xops->do_xfer(ph, t);
335 	if (!ret) {
336 		perm = get_unaligned_le32(t->rx.buf);
337 
338 		clk->state_ctrl_forbidden = !(perm & CLOCK_STATE_CONTROL_ALLOWED);
339 		clk->rate_ctrl_forbidden = !(perm & CLOCK_RATE_CONTROL_ALLOWED);
340 		clk->parent_ctrl_forbidden = !(perm & CLOCK_PARENT_CONTROL_ALLOWED);
341 	}
342 
343 	ph->xops->xfer_put(ph, t);
344 
345 	return ret;
346 }
347 
348 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
349 				     u32 clk_id, struct clock_info *cinfo,
350 				     u32 version)
351 {
352 	int ret;
353 	u32 attributes;
354 	struct scmi_xfer *t;
355 	struct scmi_msg_resp_clock_attributes *attr;
356 	struct scmi_clock_info *clk = cinfo->clk + clk_id;
357 
358 	ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
359 				      sizeof(clk_id), sizeof(*attr), &t);
360 	if (ret)
361 		return ret;
362 
363 	put_unaligned_le32(clk_id, t->tx.buf);
364 	attr = t->rx.buf;
365 
366 	ret = ph->xops->do_xfer(ph, t);
367 	if (!ret) {
368 		u32 latency = 0;
369 
370 		attributes = le32_to_cpu(attr->attributes);
371 		strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
372 		/* clock_enable_latency field is present only since SCMI v3.1 */
373 		if (PROTOCOL_REV_MAJOR(version) >= 0x2)
374 			latency = le32_to_cpu(attr->clock_enable_latency);
375 		clk->enable_latency = latency ? : U32_MAX;
376 	}
377 
378 	ph->xops->xfer_put(ph, t);
379 
380 	/*
381 	 * If supported overwrite short name with the extended one;
382 	 * on error just carry on and use already provided short name.
383 	 */
384 	if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
385 		if (SUPPORTS_EXTENDED_NAMES(attributes))
386 			ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
387 						    NULL, clk->name,
388 						    SCMI_MAX_STR_SIZE);
389 
390 		if (cinfo->notify_rate_changed_cmd &&
391 		    SUPPORTS_RATE_CHANGED_NOTIF(attributes))
392 			clk->rate_changed_notifications = true;
393 		if (cinfo->notify_rate_change_requested_cmd &&
394 		    SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
395 			clk->rate_change_requested_notifications = true;
396 		if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
397 			if (SUPPORTS_PARENT_CLOCK(attributes))
398 				scmi_clock_possible_parents(ph, clk_id, clk);
399 			if (SUPPORTS_GET_PERMISSIONS(attributes))
400 				scmi_clock_get_permissions(ph, clk_id, clk);
401 			if (SUPPORTS_EXTENDED_CONFIG(attributes))
402 				clk->extended_config = true;
403 		}
404 	}
405 
406 	return ret;
407 }
408 
409 static int rate_cmp_func(const void *_r1, const void *_r2)
410 {
411 	const u64 *r1 = _r1, *r2 = _r2;
412 
413 	if (*r1 < *r2)
414 		return -1;
415 	else if (*r1 == *r2)
416 		return 0;
417 	else
418 		return 1;
419 }
420 
421 static void iter_clk_describe_prepare_message(void *message,
422 					      const unsigned int desc_index,
423 					      const void *priv)
424 {
425 	struct scmi_msg_clock_describe_rates *msg = message;
426 	const struct scmi_clk_ipriv *p = priv;
427 
428 	msg->id = cpu_to_le32(p->clk_id);
429 	/* Set the number of rates to be skipped/already read */
430 	msg->rate_index = cpu_to_le32(desc_index);
431 }
432 
433 #define QUIRK_OUT_OF_SPEC_TRIPLET					       \
434 	({								       \
435 		/*							       \
436 		 * A known quirk: a triplet is returned but num_returned != 3  \
437 		 * Check for a safe payload size and fix.		       \
438 		 */							       \
439 		if (st->num_returned != 3 && st->num_remaining == 0 &&	       \
440 		    st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {       \
441 			st->num_returned = 3;				       \
442 			st->num_remaining = 0;				       \
443 		} else {						       \
444 			dev_err(p->dev,					       \
445 				"Cannot fix out-of-spec reply !\n");	       \
446 			return -EPROTO;					       \
447 		}							       \
448 	})
449 
450 static int
451 iter_clk_describe_update_state(struct scmi_iterator_state *st,
452 			       const void *response, void *priv)
453 {
454 	u32 flags;
455 	struct scmi_clk_ipriv *p = priv;
456 	const struct scmi_msg_resp_clock_describe_rates *r = response;
457 
458 	flags = le32_to_cpu(r->num_rates_flags);
459 	st->num_remaining = NUM_REMAINING(flags);
460 	st->num_returned = NUM_RETURNED(flags);
461 	p->clk->rate_discrete = RATE_DISCRETE(flags);
462 
463 	/* Warn about out of spec replies ... */
464 	if (!p->clk->rate_discrete &&
465 	    (st->num_returned != 3 || st->num_remaining != 0)) {
466 		dev_warn(p->dev,
467 			 "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
468 			 p->clk->name, st->num_returned, st->num_remaining,
469 			 st->rx_len);
470 
471 		SCMI_QUIRK(clock_rates_triplet_out_of_spec,
472 			   QUIRK_OUT_OF_SPEC_TRIPLET);
473 	}
474 
475 	return 0;
476 }
477 
478 static int
479 iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
480 				   const void *response,
481 				   struct scmi_iterator_state *st, void *priv)
482 {
483 	int ret = 0;
484 	struct scmi_clk_ipriv *p = priv;
485 	const struct scmi_msg_resp_clock_describe_rates *r = response;
486 
487 	if (!p->clk->rate_discrete) {
488 		switch (st->desc_index + st->loop_idx) {
489 		case 0:
490 			p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
491 			break;
492 		case 1:
493 			p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
494 			break;
495 		case 2:
496 			p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
497 			break;
498 		default:
499 			ret = -EINVAL;
500 			break;
501 		}
502 	} else {
503 		u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
504 
505 		*rate = RATE_TO_U64(r->rate[st->loop_idx]);
506 		p->clk->list.num_rates++;
507 	}
508 
509 	return ret;
510 }
511 
512 static int
513 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
514 			      struct scmi_clock_info *clk)
515 {
516 	int ret;
517 	void *iter;
518 	struct scmi_iterator_ops ops = {
519 		.prepare_message = iter_clk_describe_prepare_message,
520 		.update_state = iter_clk_describe_update_state,
521 		.process_response = iter_clk_describe_process_response,
522 	};
523 	struct scmi_clk_ipriv cpriv = {
524 		.clk_id = clk_id,
525 		.clk = clk,
526 		.dev = ph->dev,
527 	};
528 
529 	iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
530 					    CLOCK_DESCRIBE_RATES,
531 					    sizeof(struct scmi_msg_clock_describe_rates),
532 					    &cpriv);
533 	if (IS_ERR(iter))
534 		return PTR_ERR(iter);
535 
536 	ret = ph->hops->iter_response_run(iter);
537 	if (ret)
538 		return ret;
539 
540 	if (!clk->rate_discrete) {
541 		dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
542 			clk->range.min_rate, clk->range.max_rate,
543 			clk->range.step_size);
544 	} else if (clk->list.num_rates) {
545 		sort(clk->list.rates, clk->list.num_rates,
546 		     sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
547 	}
548 
549 	return ret;
550 }
551 
552 static int
553 scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
554 		    u32 clk_id, u64 *value)
555 {
556 	int ret;
557 	struct scmi_xfer *t;
558 
559 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
560 				      sizeof(__le32), sizeof(u64), &t);
561 	if (ret)
562 		return ret;
563 
564 	put_unaligned_le32(clk_id, t->tx.buf);
565 
566 	ret = ph->xops->do_xfer(ph, t);
567 	if (!ret)
568 		*value = get_unaligned_le64(t->rx.buf);
569 
570 	ph->xops->xfer_put(ph, t);
571 	return ret;
572 }
573 
574 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
575 			       u32 clk_id, u64 rate)
576 {
577 	int ret;
578 	u32 flags = 0;
579 	struct scmi_xfer *t;
580 	struct scmi_clock_set_rate *cfg;
581 	struct clock_info *ci = ph->get_priv(ph);
582 	struct scmi_clock_info *clk;
583 
584 	clk = scmi_clock_domain_lookup(ci, clk_id);
585 	if (IS_ERR(clk))
586 		return PTR_ERR(clk);
587 
588 	if (clk->rate_ctrl_forbidden)
589 		return -EACCES;
590 
591 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
592 	if (ret)
593 		return ret;
594 
595 	if (ci->max_async_req &&
596 	    atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
597 		flags |= CLOCK_SET_ASYNC;
598 
599 	cfg = t->tx.buf;
600 	cfg->flags = cpu_to_le32(flags);
601 	cfg->id = cpu_to_le32(clk_id);
602 	cfg->value_low = cpu_to_le32(rate & 0xffffffff);
603 	cfg->value_high = cpu_to_le32(rate >> 32);
604 
605 	if (flags & CLOCK_SET_ASYNC) {
606 		ret = ph->xops->do_xfer_with_response(ph, t);
607 		if (!ret) {
608 			struct scmi_msg_resp_set_rate_complete *resp;
609 
610 			resp = t->rx.buf;
611 			if (le32_to_cpu(resp->id) == clk_id)
612 				dev_dbg(ph->dev,
613 					"Clk ID %d set async to %llu\n", clk_id,
614 					get_unaligned_le64(&resp->rate_low));
615 			else
616 				ret = -EPROTO;
617 		}
618 	} else {
619 		ret = ph->xops->do_xfer(ph, t);
620 	}
621 
622 	if (ci->max_async_req)
623 		atomic_dec(&ci->cur_async_req);
624 
625 	ph->xops->xfer_put(ph, t);
626 	return ret;
627 }
628 
629 static int
630 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
631 		      enum clk_state state,
632 		      enum scmi_clock_oem_config __unused0, u32 __unused1,
633 		      bool atomic)
634 {
635 	int ret;
636 	struct scmi_xfer *t;
637 	struct scmi_msg_clock_config_set *cfg;
638 
639 	if (state >= CLK_STATE_RESERVED)
640 		return -EINVAL;
641 
642 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
643 				      sizeof(*cfg), 0, &t);
644 	if (ret)
645 		return ret;
646 
647 	t->hdr.poll_completion = atomic;
648 
649 	cfg = t->tx.buf;
650 	cfg->id = cpu_to_le32(clk_id);
651 	cfg->attributes = cpu_to_le32(state);
652 
653 	ret = ph->xops->do_xfer(ph, t);
654 
655 	ph->xops->xfer_put(ph, t);
656 	return ret;
657 }
658 
659 static int
660 scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
661 		      u32 parent_id)
662 {
663 	int ret;
664 	struct scmi_xfer *t;
665 	struct scmi_msg_clock_set_parent *cfg;
666 	struct clock_info *ci = ph->get_priv(ph);
667 	struct scmi_clock_info *clk;
668 
669 	clk = scmi_clock_domain_lookup(ci, clk_id);
670 	if (IS_ERR(clk))
671 		return PTR_ERR(clk);
672 
673 	if (parent_id >= clk->num_parents)
674 		return -EINVAL;
675 
676 	if (clk->parent_ctrl_forbidden)
677 		return -EACCES;
678 
679 	ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET,
680 				      sizeof(*cfg), 0, &t);
681 	if (ret)
682 		return ret;
683 
684 	t->hdr.poll_completion = false;
685 
686 	cfg = t->tx.buf;
687 	cfg->id = cpu_to_le32(clk_id);
688 	cfg->parent_id = cpu_to_le32(clk->parents[parent_id]);
689 
690 	ret = ph->xops->do_xfer(ph, t);
691 
692 	ph->xops->xfer_put(ph, t);
693 
694 	return ret;
695 }
696 
697 static int
698 scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
699 		      u32 *parent_id)
700 {
701 	int ret;
702 	struct scmi_xfer *t;
703 
704 	ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_GET,
705 				      sizeof(__le32), sizeof(u32), &t);
706 	if (ret)
707 		return ret;
708 
709 	put_unaligned_le32(clk_id, t->tx.buf);
710 
711 	ret = ph->xops->do_xfer(ph, t);
712 	if (!ret)
713 		*parent_id = get_unaligned_le32(t->rx.buf);
714 
715 	ph->xops->xfer_put(ph, t);
716 	return ret;
717 }
718 
719 /* For SCMI clock v3.0 and onwards */
720 static int
721 scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
722 			 enum clk_state state,
723 			 enum scmi_clock_oem_config oem_type, u32 oem_val,
724 			 bool atomic)
725 {
726 	int ret;
727 	u32 attrs;
728 	struct scmi_xfer *t;
729 	struct scmi_msg_clock_config_set_v2 *cfg;
730 
731 	if (state == CLK_STATE_RESERVED ||
732 	    (!oem_type && state == CLK_STATE_UNCHANGED))
733 		return -EINVAL;
734 
735 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
736 				      sizeof(*cfg), 0, &t);
737 	if (ret)
738 		return ret;
739 
740 	t->hdr.poll_completion = atomic;
741 
742 	attrs = FIELD_PREP(REGMASK_OEM_TYPE_SET, oem_type) |
743 		 FIELD_PREP(REGMASK_CLK_STATE, state);
744 
745 	cfg = t->tx.buf;
746 	cfg->id = cpu_to_le32(clk_id);
747 	cfg->attributes = cpu_to_le32(attrs);
748 	/* Clear in any case */
749 	cfg->oem_config_val = cpu_to_le32(0);
750 	if (oem_type)
751 		cfg->oem_config_val = cpu_to_le32(oem_val);
752 
753 	ret = ph->xops->do_xfer(ph, t);
754 
755 	ph->xops->xfer_put(ph, t);
756 	return ret;
757 }
758 
759 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id,
760 			     bool atomic)
761 {
762 	struct clock_info *ci = ph->get_priv(ph);
763 	struct scmi_clock_info *clk;
764 
765 	clk = scmi_clock_domain_lookup(ci, clk_id);
766 	if (IS_ERR(clk))
767 		return PTR_ERR(clk);
768 
769 	if (clk->state_ctrl_forbidden)
770 		return -EACCES;
771 
772 	return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE,
773 				    NULL_OEM_TYPE, 0, atomic);
774 }
775 
776 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id,
777 			      bool atomic)
778 {
779 	struct clock_info *ci = ph->get_priv(ph);
780 	struct scmi_clock_info *clk;
781 
782 	clk = scmi_clock_domain_lookup(ci, clk_id);
783 	if (IS_ERR(clk))
784 		return PTR_ERR(clk);
785 
786 	if (clk->state_ctrl_forbidden)
787 		return -EACCES;
788 
789 	return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE,
790 				    NULL_OEM_TYPE, 0, atomic);
791 }
792 
793 /* For SCMI clock v3.0 and onwards */
794 static int
795 scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
796 			 enum scmi_clock_oem_config oem_type, u32 *attributes,
797 			 bool *enabled, u32 *oem_val, bool atomic)
798 {
799 	int ret;
800 	u32 flags;
801 	struct scmi_xfer *t;
802 	struct scmi_msg_clock_config_get *cfg;
803 
804 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_GET,
805 				      sizeof(*cfg), 0, &t);
806 	if (ret)
807 		return ret;
808 
809 	t->hdr.poll_completion = atomic;
810 
811 	flags = FIELD_PREP(REGMASK_OEM_TYPE_GET, oem_type);
812 
813 	cfg = t->tx.buf;
814 	cfg->id = cpu_to_le32(clk_id);
815 	cfg->flags = cpu_to_le32(flags);
816 
817 	ret = ph->xops->do_xfer(ph, t);
818 	if (!ret) {
819 		struct scmi_msg_resp_clock_config_get *resp = t->rx.buf;
820 
821 		if (attributes)
822 			*attributes = le32_to_cpu(resp->attributes);
823 
824 		if (enabled)
825 			*enabled = IS_CLK_ENABLED(resp->config);
826 
827 		if (oem_val && oem_type)
828 			*oem_val = le32_to_cpu(resp->oem_config_val);
829 	}
830 
831 	ph->xops->xfer_put(ph, t);
832 
833 	return ret;
834 }
835 
836 static int
837 scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id,
838 		      enum scmi_clock_oem_config oem_type, u32 *attributes,
839 		      bool *enabled, u32 *oem_val, bool atomic)
840 {
841 	int ret;
842 	struct scmi_xfer *t;
843 	struct scmi_msg_resp_clock_attributes *resp;
844 
845 	if (!enabled)
846 		return -EINVAL;
847 
848 	ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
849 				      sizeof(clk_id), sizeof(*resp), &t);
850 	if (ret)
851 		return ret;
852 
853 	t->hdr.poll_completion = atomic;
854 	put_unaligned_le32(clk_id, t->tx.buf);
855 	resp = t->rx.buf;
856 
857 	ret = ph->xops->do_xfer(ph, t);
858 	if (!ret)
859 		*enabled = IS_CLK_ENABLED(resp->attributes);
860 
861 	ph->xops->xfer_put(ph, t);
862 
863 	return ret;
864 }
865 
866 static int scmi_clock_state_get(const struct scmi_protocol_handle *ph,
867 				u32 clk_id, bool *enabled, bool atomic)
868 {
869 	struct clock_info *ci = ph->get_priv(ph);
870 
871 	return ci->clock_config_get(ph, clk_id, NULL_OEM_TYPE, NULL,
872 				    enabled, NULL, atomic);
873 }
874 
875 static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph,
876 				     u32 clk_id,
877 				     enum scmi_clock_oem_config oem_type,
878 				     u32 oem_val, bool atomic)
879 {
880 	struct clock_info *ci = ph->get_priv(ph);
881 	struct scmi_clock_info *clk;
882 
883 	clk = scmi_clock_domain_lookup(ci, clk_id);
884 	if (IS_ERR(clk))
885 		return PTR_ERR(clk);
886 
887 	if (!clk->extended_config)
888 		return -EOPNOTSUPP;
889 
890 	return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED,
891 				    oem_type, oem_val, atomic);
892 }
893 
894 static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph,
895 				     u32 clk_id,
896 				     enum scmi_clock_oem_config oem_type,
897 				     u32 *oem_val, u32 *attributes, bool atomic)
898 {
899 	struct clock_info *ci = ph->get_priv(ph);
900 	struct scmi_clock_info *clk;
901 
902 	clk = scmi_clock_domain_lookup(ci, clk_id);
903 	if (IS_ERR(clk))
904 		return PTR_ERR(clk);
905 
906 	if (!clk->extended_config)
907 		return -EOPNOTSUPP;
908 
909 	return ci->clock_config_get(ph, clk_id, oem_type, attributes,
910 				    NULL, oem_val, atomic);
911 }
912 
913 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
914 {
915 	struct clock_info *ci = ph->get_priv(ph);
916 
917 	return ci->num_clocks;
918 }
919 
920 static const struct scmi_clock_info *
921 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
922 {
923 	struct scmi_clock_info *clk;
924 	struct clock_info *ci = ph->get_priv(ph);
925 
926 	clk = scmi_clock_domain_lookup(ci, clk_id);
927 	if (IS_ERR(clk))
928 		return NULL;
929 
930 	if (!clk->name[0])
931 		return NULL;
932 
933 	return clk;
934 }
935 
936 static const struct scmi_clk_proto_ops clk_proto_ops = {
937 	.count_get = scmi_clock_count_get,
938 	.info_get = scmi_clock_info_get,
939 	.rate_get = scmi_clock_rate_get,
940 	.rate_set = scmi_clock_rate_set,
941 	.enable = scmi_clock_enable,
942 	.disable = scmi_clock_disable,
943 	.state_get = scmi_clock_state_get,
944 	.config_oem_get = scmi_clock_config_oem_get,
945 	.config_oem_set = scmi_clock_config_oem_set,
946 	.parent_set = scmi_clock_set_parent,
947 	.parent_get = scmi_clock_get_parent,
948 };
949 
950 static bool scmi_clk_notify_supported(const struct scmi_protocol_handle *ph,
951 				      u8 evt_id, u32 src_id)
952 {
953 	bool supported;
954 	struct scmi_clock_info *clk;
955 	struct clock_info *ci = ph->get_priv(ph);
956 
957 	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
958 		return false;
959 
960 	clk = scmi_clock_domain_lookup(ci, src_id);
961 	if (IS_ERR(clk))
962 		return false;
963 
964 	if (evt_id == SCMI_EVENT_CLOCK_RATE_CHANGED)
965 		supported = clk->rate_changed_notifications;
966 	else
967 		supported = clk->rate_change_requested_notifications;
968 
969 	return supported;
970 }
971 
972 static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
973 				u32 clk_id, int message_id, bool enable)
974 {
975 	int ret;
976 	struct scmi_xfer *t;
977 	struct scmi_msg_clock_rate_notify *notify;
978 
979 	ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
980 	if (ret)
981 		return ret;
982 
983 	notify = t->tx.buf;
984 	notify->clk_id = cpu_to_le32(clk_id);
985 	notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
986 
987 	ret = ph->xops->do_xfer(ph, t);
988 
989 	ph->xops->xfer_put(ph, t);
990 	return ret;
991 }
992 
993 static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
994 				       u8 evt_id, u32 src_id, bool enable)
995 {
996 	int ret, cmd_id;
997 
998 	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
999 		return -EINVAL;
1000 
1001 	cmd_id = evt_2_cmd[evt_id];
1002 	ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
1003 	if (ret)
1004 		pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
1005 			 evt_id, src_id, ret);
1006 
1007 	return ret;
1008 }
1009 
1010 static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
1011 					 u8 evt_id, ktime_t timestamp,
1012 					 const void *payld, size_t payld_sz,
1013 					 void *report, u32 *src_id)
1014 {
1015 	const struct scmi_clock_rate_notify_payld *p = payld;
1016 	struct scmi_clock_rate_notif_report *r = report;
1017 
1018 	if (sizeof(*p) != payld_sz ||
1019 	    (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
1020 	     evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
1021 		return NULL;
1022 
1023 	r->timestamp = timestamp;
1024 	r->agent_id = le32_to_cpu(p->agent_id);
1025 	r->clock_id = le32_to_cpu(p->clock_id);
1026 	r->rate = get_unaligned_le64(&p->rate_low);
1027 	*src_id = r->clock_id;
1028 
1029 	return r;
1030 }
1031 
1032 static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
1033 {
1034 	struct clock_info *ci = ph->get_priv(ph);
1035 
1036 	if (!ci)
1037 		return -EINVAL;
1038 
1039 	return ci->num_clocks;
1040 }
1041 
1042 static const struct scmi_event clk_events[] = {
1043 	{
1044 		.id = SCMI_EVENT_CLOCK_RATE_CHANGED,
1045 		.max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
1046 		.max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
1047 	},
1048 	{
1049 		.id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
1050 		.max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
1051 		.max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
1052 	},
1053 };
1054 
1055 static const struct scmi_event_ops clk_event_ops = {
1056 	.is_notify_supported = scmi_clk_notify_supported,
1057 	.get_num_sources = scmi_clk_get_num_sources,
1058 	.set_notify_enabled = scmi_clk_set_notify_enabled,
1059 	.fill_custom_report = scmi_clk_fill_custom_report,
1060 };
1061 
1062 static const struct scmi_protocol_events clk_protocol_events = {
1063 	.queue_sz = SCMI_PROTO_QUEUE_SZ,
1064 	.ops = &clk_event_ops,
1065 	.evts = clk_events,
1066 	.num_events = ARRAY_SIZE(clk_events),
1067 };
1068 
1069 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
1070 {
1071 	u32 version;
1072 	int clkid, ret;
1073 	struct clock_info *cinfo;
1074 
1075 	ret = ph->xops->version_get(ph, &version);
1076 	if (ret)
1077 		return ret;
1078 
1079 	dev_dbg(ph->dev, "Clock Version %d.%d\n",
1080 		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
1081 
1082 	cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
1083 	if (!cinfo)
1084 		return -ENOMEM;
1085 
1086 	ret = scmi_clock_protocol_attributes_get(ph, cinfo);
1087 	if (ret)
1088 		return ret;
1089 
1090 	cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
1091 				  sizeof(*cinfo->clk), GFP_KERNEL);
1092 	if (!cinfo->clk)
1093 		return -ENOMEM;
1094 
1095 	for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
1096 		struct scmi_clock_info *clk = cinfo->clk + clkid;
1097 
1098 		ret = scmi_clock_attributes_get(ph, clkid, cinfo, version);
1099 		if (!ret)
1100 			scmi_clock_describe_rates_get(ph, clkid, clk);
1101 	}
1102 
1103 	if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
1104 		cinfo->clock_config_set = scmi_clock_config_set_v2;
1105 		cinfo->clock_config_get = scmi_clock_config_get_v2;
1106 	} else {
1107 		cinfo->clock_config_set = scmi_clock_config_set;
1108 		cinfo->clock_config_get = scmi_clock_config_get;
1109 	}
1110 
1111 	cinfo->version = version;
1112 	return ph->set_priv(ph, cinfo, version);
1113 }
1114 
1115 static const struct scmi_protocol scmi_clock = {
1116 	.id = SCMI_PROTOCOL_CLOCK,
1117 	.owner = THIS_MODULE,
1118 	.instance_init = &scmi_clock_protocol_init,
1119 	.ops = &clk_proto_ops,
1120 	.events = &clk_protocol_events,
1121 	.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
1122 };
1123 
1124 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
1125