1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018-2021 ARM Ltd.
15 */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/bitmap.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/idr.h>
24 #include <linux/io.h>
25 #include <linux/io-64-nonatomic-hi-lo.h>
26 #include <linux/kernel.h>
27 #include <linux/ktime.h>
28 #include <linux/hashtable.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/of.h>
32 #include <linux/platform_device.h>
33 #include <linux/processor.h>
34 #include <linux/refcount.h>
35 #include <linux/slab.h>
36
37 #include "common.h"
38 #include "notify.h"
39
40 #include "raw_mode.h"
41
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/scmi.h>
44
45 static DEFINE_IDA(scmi_id);
46
47 static DEFINE_IDR(scmi_protocols);
48 static DEFINE_SPINLOCK(protocol_lock);
49
50 /* List of all SCMI devices active in system */
51 static LIST_HEAD(scmi_list);
52 /* Protection for the entire list */
53 static DEFINE_MUTEX(scmi_list_mutex);
54 /* Track the unique id for the transfers for debug & profiling purpose */
55 static atomic_t transfer_last_id;
56
57 static struct dentry *scmi_top_dentry;
58
59 /**
60 * struct scmi_xfers_info - Structure to manage transfer information
61 *
62 * @xfer_alloc_table: Bitmap table for allocated messages.
63 * Index of this bitmap table is also used for message
64 * sequence identifier.
65 * @xfer_lock: Protection for message allocation
66 * @max_msg: Maximum number of messages that can be pending
67 * @free_xfers: A free list for available to use xfers. It is initialized with
68 * a number of xfers equal to the maximum allowed in-flight
69 * messages.
70 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
71 * currently in-flight messages.
72 */
73 struct scmi_xfers_info {
74 unsigned long *xfer_alloc_table;
75 spinlock_t xfer_lock;
76 int max_msg;
77 struct hlist_head free_xfers;
78 DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
79 };
80
81 /**
82 * struct scmi_protocol_instance - Describe an initialized protocol instance.
83 * @handle: Reference to the SCMI handle associated to this protocol instance.
84 * @proto: A reference to the protocol descriptor.
85 * @gid: A reference for per-protocol devres management.
86 * @users: A refcount to track effective users of this protocol.
87 * @priv: Reference for optional protocol private data.
88 * @version: Protocol version supported by the platform as detected at runtime.
89 * @ph: An embedded protocol handle that will be passed down to protocol
90 * initialization code to identify this instance.
91 *
92 * Each protocol is initialized independently once for each SCMI platform in
93 * which is defined by DT and implemented by the SCMI server fw.
94 */
95 struct scmi_protocol_instance {
96 const struct scmi_handle *handle;
97 const struct scmi_protocol *proto;
98 void *gid;
99 refcount_t users;
100 void *priv;
101 unsigned int version;
102 struct scmi_protocol_handle ph;
103 };
104
105 #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
106
107 /**
108 * struct scmi_debug_info - Debug common info
109 * @top_dentry: A reference to the top debugfs dentry
110 * @name: Name of this SCMI instance
111 * @type: Type of this SCMI instance
112 * @is_atomic: Flag to state if the transport of this instance is atomic
113 */
114 struct scmi_debug_info {
115 struct dentry *top_dentry;
116 const char *name;
117 const char *type;
118 bool is_atomic;
119 };
120
121 /**
122 * struct scmi_info - Structure representing a SCMI instance
123 *
124 * @id: A sequence number starting from zero identifying this instance
125 * @dev: Device pointer
126 * @desc: SoC description for this instance
127 * @version: SCMI revision information containing protocol version,
128 * implementation version and (sub-)vendor identification.
129 * @handle: Instance of SCMI handle to send to clients
130 * @tx_minfo: Universal Transmit Message management info
131 * @rx_minfo: Universal Receive Message management info
132 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
133 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
134 * @protocols: IDR for protocols' instance descriptors initialized for
135 * this SCMI instance: populated on protocol's first attempted
136 * usage.
137 * @protocols_mtx: A mutex to protect protocols instances initialization.
138 * @protocols_imp: List of protocols implemented, currently maximum of
139 * scmi_revision_info.num_protocols elements allocated by the
140 * base protocol
141 * @active_protocols: IDR storing device_nodes for protocols actually defined
142 * in the DT and confirmed as implemented by fw.
143 * @atomic_threshold: Optional system wide DT-configured threshold, expressed
144 * in microseconds, for atomic operations.
145 * Only SCMI synchronous commands reported by the platform
146 * to have an execution latency lesser-equal to the threshold
147 * should be considered for atomic mode operation: such
148 * decision is finally left up to the SCMI drivers.
149 * @notify_priv: Pointer to private data structure specific to notifications.
150 * @node: List head
151 * @users: Number of users of this instance
152 * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
153 * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
154 * bus
155 * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
156 * @dbg: A pointer to debugfs related data (if any)
157 * @raw: An opaque reference handle used by SCMI Raw mode.
158 */
159 struct scmi_info {
160 int id;
161 struct device *dev;
162 const struct scmi_desc *desc;
163 struct scmi_revision_info version;
164 struct scmi_handle handle;
165 struct scmi_xfers_info tx_minfo;
166 struct scmi_xfers_info rx_minfo;
167 struct idr tx_idr;
168 struct idr rx_idr;
169 struct idr protocols;
170 /* Ensure mutual exclusive access to protocols instance array */
171 struct mutex protocols_mtx;
172 u8 *protocols_imp;
173 struct idr active_protocols;
174 unsigned int atomic_threshold;
175 void *notify_priv;
176 struct list_head node;
177 int users;
178 struct notifier_block bus_nb;
179 struct notifier_block dev_req_nb;
180 /* Serialize device creation process for this instance */
181 struct mutex devreq_mtx;
182 struct scmi_debug_info *dbg;
183 void *raw;
184 };
185
186 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
187 #define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
188 #define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
189
scmi_protocol_get(int protocol_id)190 static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
191 {
192 const struct scmi_protocol *proto;
193
194 proto = idr_find(&scmi_protocols, protocol_id);
195 if (!proto || !try_module_get(proto->owner)) {
196 pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
197 return NULL;
198 }
199
200 pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
201
202 return proto;
203 }
204
scmi_protocol_put(int protocol_id)205 static void scmi_protocol_put(int protocol_id)
206 {
207 const struct scmi_protocol *proto;
208
209 proto = idr_find(&scmi_protocols, protocol_id);
210 if (proto)
211 module_put(proto->owner);
212 }
213
scmi_protocol_register(const struct scmi_protocol * proto)214 int scmi_protocol_register(const struct scmi_protocol *proto)
215 {
216 int ret;
217
218 if (!proto) {
219 pr_err("invalid protocol\n");
220 return -EINVAL;
221 }
222
223 if (!proto->instance_init) {
224 pr_err("missing init for protocol 0x%x\n", proto->id);
225 return -EINVAL;
226 }
227
228 spin_lock(&protocol_lock);
229 ret = idr_alloc(&scmi_protocols, (void *)proto,
230 proto->id, proto->id + 1, GFP_ATOMIC);
231 spin_unlock(&protocol_lock);
232 if (ret != proto->id) {
233 pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
234 proto->id, ret);
235 return ret;
236 }
237
238 pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
239
240 return 0;
241 }
242 EXPORT_SYMBOL_GPL(scmi_protocol_register);
243
scmi_protocol_unregister(const struct scmi_protocol * proto)244 void scmi_protocol_unregister(const struct scmi_protocol *proto)
245 {
246 spin_lock(&protocol_lock);
247 idr_remove(&scmi_protocols, proto->id);
248 spin_unlock(&protocol_lock);
249
250 pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
251 }
252 EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
253
254 /**
255 * scmi_create_protocol_devices - Create devices for all pending requests for
256 * this SCMI instance.
257 *
258 * @np: The device node describing the protocol
259 * @info: The SCMI instance descriptor
260 * @prot_id: The protocol ID
261 * @name: The optional name of the device to be created: if not provided this
262 * call will lead to the creation of all the devices currently requested
263 * for the specified protocol.
264 */
scmi_create_protocol_devices(struct device_node * np,struct scmi_info * info,int prot_id,const char * name)265 static void scmi_create_protocol_devices(struct device_node *np,
266 struct scmi_info *info,
267 int prot_id, const char *name)
268 {
269 struct scmi_device *sdev;
270
271 mutex_lock(&info->devreq_mtx);
272 sdev = scmi_device_create(np, info->dev, prot_id, name);
273 if (name && !sdev)
274 dev_err(info->dev,
275 "failed to create device for protocol 0x%X (%s)\n",
276 prot_id, name);
277 mutex_unlock(&info->devreq_mtx);
278 }
279
scmi_destroy_protocol_devices(struct scmi_info * info,int prot_id,const char * name)280 static void scmi_destroy_protocol_devices(struct scmi_info *info,
281 int prot_id, const char *name)
282 {
283 mutex_lock(&info->devreq_mtx);
284 scmi_device_destroy(info->dev, prot_id, name);
285 mutex_unlock(&info->devreq_mtx);
286 }
287
scmi_notification_instance_data_set(const struct scmi_handle * handle,void * priv)288 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
289 void *priv)
290 {
291 struct scmi_info *info = handle_to_scmi_info(handle);
292
293 info->notify_priv = priv;
294 /* Ensure updated protocol private date are visible */
295 smp_wmb();
296 }
297
scmi_notification_instance_data_get(const struct scmi_handle * handle)298 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
299 {
300 struct scmi_info *info = handle_to_scmi_info(handle);
301
302 /* Ensure protocols_private_data has been updated */
303 smp_rmb();
304 return info->notify_priv;
305 }
306
307 /**
308 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
309 *
310 * @minfo: Pointer to Tx/Rx Message management info based on channel type
311 * @xfer: The xfer to act upon
312 *
313 * Pick the next unused monotonically increasing token and set it into
314 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
315 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
316 * of incorrect association of a late and expired xfer with a live in-flight
317 * transaction, both happening to re-use the same token identifier.
318 *
319 * Since platform is NOT required to answer our request in-order we should
320 * account for a few rare but possible scenarios:
321 *
322 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
323 * using find_next_zero_bit() starting from candidate next_token bit
324 *
325 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
326 * are plenty of free tokens at start, so try a second pass using
327 * find_next_zero_bit() and starting from 0.
328 *
329 * X = used in-flight
330 *
331 * Normal
332 * ------
333 *
334 * |- xfer_id picked
335 * -----------+----------------------------------------------------------
336 * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
337 * ----------------------------------------------------------------------
338 * ^
339 * |- next_token
340 *
341 * Out-of-order pending at start
342 * -----------------------------
343 *
344 * |- xfer_id picked, last_token fixed
345 * -----+----------------------------------------------------------------
346 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
347 * ----------------------------------------------------------------------
348 * ^
349 * |- next_token
350 *
351 *
352 * Out-of-order pending at end
353 * ---------------------------
354 *
355 * |- xfer_id picked, last_token fixed
356 * -----+----------------------------------------------------------------
357 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
358 * ----------------------------------------------------------------------
359 * ^
360 * |- next_token
361 *
362 * Context: Assumes to be called with @xfer_lock already acquired.
363 *
364 * Return: 0 on Success or error
365 */
scmi_xfer_token_set(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)366 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
367 struct scmi_xfer *xfer)
368 {
369 unsigned long xfer_id, next_token;
370
371 /*
372 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
373 * using the pre-allocated transfer_id as a base.
374 * Note that the global transfer_id is shared across all message types
375 * so there could be holes in the allocated set of monotonic sequence
376 * numbers, but that is going to limit the effectiveness of the
377 * mitigation only in very rare limit conditions.
378 */
379 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
380
381 /* Pick the next available xfer_id >= next_token */
382 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
383 MSG_TOKEN_MAX, next_token);
384 if (xfer_id == MSG_TOKEN_MAX) {
385 /*
386 * After heavily out-of-order responses, there are no free
387 * tokens ahead, but only at start of xfer_alloc_table so
388 * try again from the beginning.
389 */
390 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
391 MSG_TOKEN_MAX, 0);
392 /*
393 * Something is wrong if we got here since there can be a
394 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
395 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
396 */
397 if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
398 return -ENOMEM;
399 }
400
401 /* Update +/- last_token accordingly if we skipped some hole */
402 if (xfer_id != next_token)
403 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
404
405 xfer->hdr.seq = (u16)xfer_id;
406
407 return 0;
408 }
409
410 /**
411 * scmi_xfer_token_clear - Release the token
412 *
413 * @minfo: Pointer to Tx/Rx Message management info based on channel type
414 * @xfer: The xfer to act upon
415 */
scmi_xfer_token_clear(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)416 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
417 struct scmi_xfer *xfer)
418 {
419 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
420 }
421
422 /**
423 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
424 *
425 * @xfer: The xfer to register
426 * @minfo: Pointer to Tx/Rx Message management info based on channel type
427 *
428 * Note that this helper assumes that the xfer to be registered as in-flight
429 * had been built using an xfer sequence number which still corresponds to a
430 * free slot in the xfer_alloc_table.
431 *
432 * Context: Assumes to be called with @xfer_lock already acquired.
433 */
434 static inline void
scmi_xfer_inflight_register_unlocked(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)435 scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
436 struct scmi_xfers_info *minfo)
437 {
438 /* Set in-flight */
439 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
440 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
441 xfer->pending = true;
442 }
443
444 /**
445 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
446 *
447 * @xfer: The xfer to register
448 * @minfo: Pointer to Tx/Rx Message management info based on channel type
449 *
450 * Note that this helper does NOT assume anything about the sequence number
451 * that was baked into the provided xfer, so it checks at first if it can
452 * be mapped to a free slot and fails with an error if another xfer with the
453 * same sequence number is currently still registered as in-flight.
454 *
455 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
456 * could not rbe mapped to a free slot in the xfer_alloc_table.
457 */
scmi_xfer_inflight_register(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)458 static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
459 struct scmi_xfers_info *minfo)
460 {
461 int ret = 0;
462 unsigned long flags;
463
464 spin_lock_irqsave(&minfo->xfer_lock, flags);
465 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
466 scmi_xfer_inflight_register_unlocked(xfer, minfo);
467 else
468 ret = -EBUSY;
469 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
470
471 return ret;
472 }
473
474 /**
475 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
476 * flight on the TX channel, if possible.
477 *
478 * @handle: Pointer to SCMI entity handle
479 * @xfer: The xfer to register
480 *
481 * Return: 0 on Success, error otherwise
482 */
scmi_xfer_raw_inflight_register(const struct scmi_handle * handle,struct scmi_xfer * xfer)483 int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
484 struct scmi_xfer *xfer)
485 {
486 struct scmi_info *info = handle_to_scmi_info(handle);
487
488 return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
489 }
490
491 /**
492 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
493 * as pending in-flight
494 *
495 * @xfer: The xfer to act upon
496 * @minfo: Pointer to Tx/Rx Message management info based on channel type
497 *
498 * Return: 0 on Success or error otherwise
499 */
scmi_xfer_pending_set(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)500 static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
501 struct scmi_xfers_info *minfo)
502 {
503 int ret;
504 unsigned long flags;
505
506 spin_lock_irqsave(&minfo->xfer_lock, flags);
507 /* Set a new monotonic token as the xfer sequence number */
508 ret = scmi_xfer_token_set(minfo, xfer);
509 if (!ret)
510 scmi_xfer_inflight_register_unlocked(xfer, minfo);
511 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
512
513 return ret;
514 }
515
516 /**
517 * scmi_xfer_get() - Allocate one message
518 *
519 * @handle: Pointer to SCMI entity handle
520 * @minfo: Pointer to Tx/Rx Message management info based on channel type
521 *
522 * Helper function which is used by various message functions that are
523 * exposed to clients of this driver for allocating a message traffic event.
524 *
525 * Picks an xfer from the free list @free_xfers (if any available) and perform
526 * a basic initialization.
527 *
528 * Note that, at this point, still no sequence number is assigned to the
529 * allocated xfer, nor it is registered as a pending transaction.
530 *
531 * The successfully initialized xfer is refcounted.
532 *
533 * Context: Holds @xfer_lock while manipulating @free_xfers.
534 *
535 * Return: An initialized xfer if all went fine, else pointer error.
536 */
scmi_xfer_get(const struct scmi_handle * handle,struct scmi_xfers_info * minfo)537 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
538 struct scmi_xfers_info *minfo)
539 {
540 unsigned long flags;
541 struct scmi_xfer *xfer;
542
543 spin_lock_irqsave(&minfo->xfer_lock, flags);
544 if (hlist_empty(&minfo->free_xfers)) {
545 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
546 return ERR_PTR(-ENOMEM);
547 }
548
549 /* grab an xfer from the free_list */
550 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
551 hlist_del_init(&xfer->node);
552
553 /*
554 * Allocate transfer_id early so that can be used also as base for
555 * monotonic sequence number generation if needed.
556 */
557 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
558
559 refcount_set(&xfer->users, 1);
560 atomic_set(&xfer->busy, SCMI_XFER_FREE);
561 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
562
563 return xfer;
564 }
565
566 /**
567 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
568 *
569 * @handle: Pointer to SCMI entity handle
570 *
571 * Note that xfer is taken from the TX channel structures.
572 *
573 * Return: A valid xfer on Success, or an error-pointer otherwise
574 */
scmi_xfer_raw_get(const struct scmi_handle * handle)575 struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
576 {
577 struct scmi_xfer *xfer;
578 struct scmi_info *info = handle_to_scmi_info(handle);
579
580 xfer = scmi_xfer_get(handle, &info->tx_minfo);
581 if (!IS_ERR(xfer))
582 xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
583
584 return xfer;
585 }
586
587 /**
588 * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
589 * to use for a specific protocol_id Raw transaction.
590 *
591 * @handle: Pointer to SCMI entity handle
592 * @protocol_id: Identifier of the protocol
593 *
594 * Note that in a regular SCMI stack, usually, a protocol has to be defined in
595 * the DT to have an associated channel and be usable; but in Raw mode any
596 * protocol in range is allowed, re-using the Base channel, so as to enable
597 * fuzzing on any protocol without the need of a fully compiled DT.
598 *
599 * Return: A reference to the channel to use, or an ERR_PTR
600 */
601 struct scmi_chan_info *
scmi_xfer_raw_channel_get(const struct scmi_handle * handle,u8 protocol_id)602 scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
603 {
604 struct scmi_chan_info *cinfo;
605 struct scmi_info *info = handle_to_scmi_info(handle);
606
607 cinfo = idr_find(&info->tx_idr, protocol_id);
608 if (!cinfo) {
609 if (protocol_id == SCMI_PROTOCOL_BASE)
610 return ERR_PTR(-EINVAL);
611 /* Use Base channel for protocols not defined for DT */
612 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
613 if (!cinfo)
614 return ERR_PTR(-EINVAL);
615 dev_warn_once(handle->dev,
616 "Using Base channel for protocol 0x%X\n",
617 protocol_id);
618 }
619
620 return cinfo;
621 }
622
623 /**
624 * __scmi_xfer_put() - Release a message
625 *
626 * @minfo: Pointer to Tx/Rx Message management info based on channel type
627 * @xfer: message that was reserved by scmi_xfer_get
628 *
629 * After refcount check, possibly release an xfer, clearing the token slot,
630 * removing xfer from @pending_xfers and putting it back into free_xfers.
631 *
632 * This holds a spinlock to maintain integrity of internal data structures.
633 */
634 static void
__scmi_xfer_put(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)635 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
636 {
637 unsigned long flags;
638
639 spin_lock_irqsave(&minfo->xfer_lock, flags);
640 if (refcount_dec_and_test(&xfer->users)) {
641 if (xfer->pending) {
642 scmi_xfer_token_clear(minfo, xfer);
643 hash_del(&xfer->node);
644 xfer->pending = false;
645 }
646 hlist_add_head(&xfer->node, &minfo->free_xfers);
647 }
648 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
649 }
650
651 /**
652 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
653 *
654 * @handle: Pointer to SCMI entity handle
655 * @xfer: A reference to the xfer to put
656 *
657 * Note that as with other xfer_put() handlers the xfer is really effectively
658 * released only if there are no more users on the system.
659 */
scmi_xfer_raw_put(const struct scmi_handle * handle,struct scmi_xfer * xfer)660 void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
661 {
662 struct scmi_info *info = handle_to_scmi_info(handle);
663
664 xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
665 xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
666 return __scmi_xfer_put(&info->tx_minfo, xfer);
667 }
668
669 /**
670 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
671 *
672 * @minfo: Pointer to Tx/Rx Message management info based on channel type
673 * @xfer_id: Token ID to lookup in @pending_xfers
674 *
675 * Refcounting is untouched.
676 *
677 * Context: Assumes to be called with @xfer_lock already acquired.
678 *
679 * Return: A valid xfer on Success or error otherwise
680 */
681 static struct scmi_xfer *
scmi_xfer_lookup_unlocked(struct scmi_xfers_info * minfo,u16 xfer_id)682 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
683 {
684 struct scmi_xfer *xfer = NULL;
685
686 if (test_bit(xfer_id, minfo->xfer_alloc_table))
687 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
688
689 return xfer ?: ERR_PTR(-EINVAL);
690 }
691
692 /**
693 * scmi_msg_response_validate - Validate message type against state of related
694 * xfer
695 *
696 * @cinfo: A reference to the channel descriptor.
697 * @msg_type: Message type to check
698 * @xfer: A reference to the xfer to validate against @msg_type
699 *
700 * This function checks if @msg_type is congruent with the current state of
701 * a pending @xfer; if an asynchronous delayed response is received before the
702 * related synchronous response (Out-of-Order Delayed Response) the missing
703 * synchronous response is assumed to be OK and completed, carrying on with the
704 * Delayed Response: this is done to address the case in which the underlying
705 * SCMI transport can deliver such out-of-order responses.
706 *
707 * Context: Assumes to be called with xfer->lock already acquired.
708 *
709 * Return: 0 on Success, error otherwise
710 */
scmi_msg_response_validate(struct scmi_chan_info * cinfo,u8 msg_type,struct scmi_xfer * xfer)711 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
712 u8 msg_type,
713 struct scmi_xfer *xfer)
714 {
715 /*
716 * Even if a response was indeed expected on this slot at this point,
717 * a buggy platform could wrongly reply feeding us an unexpected
718 * delayed response we're not prepared to handle: bail-out safely
719 * blaming firmware.
720 */
721 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
722 dev_err(cinfo->dev,
723 "Delayed Response for %d not expected! Buggy F/W ?\n",
724 xfer->hdr.seq);
725 return -EINVAL;
726 }
727
728 switch (xfer->state) {
729 case SCMI_XFER_SENT_OK:
730 if (msg_type == MSG_TYPE_DELAYED_RESP) {
731 /*
732 * Delayed Response expected but delivered earlier.
733 * Assume message RESPONSE was OK and skip state.
734 */
735 xfer->hdr.status = SCMI_SUCCESS;
736 xfer->state = SCMI_XFER_RESP_OK;
737 complete(&xfer->done);
738 dev_warn(cinfo->dev,
739 "Received valid OoO Delayed Response for %d\n",
740 xfer->hdr.seq);
741 }
742 break;
743 case SCMI_XFER_RESP_OK:
744 if (msg_type != MSG_TYPE_DELAYED_RESP)
745 return -EINVAL;
746 break;
747 case SCMI_XFER_DRESP_OK:
748 /* No further message expected once in SCMI_XFER_DRESP_OK */
749 return -EINVAL;
750 }
751
752 return 0;
753 }
754
755 /**
756 * scmi_xfer_state_update - Update xfer state
757 *
758 * @xfer: A reference to the xfer to update
759 * @msg_type: Type of message being processed.
760 *
761 * Note that this message is assumed to have been already successfully validated
762 * by @scmi_msg_response_validate(), so here we just update the state.
763 *
764 * Context: Assumes to be called on an xfer exclusively acquired using the
765 * busy flag.
766 */
scmi_xfer_state_update(struct scmi_xfer * xfer,u8 msg_type)767 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
768 {
769 xfer->hdr.type = msg_type;
770
771 /* Unknown command types were already discarded earlier */
772 if (xfer->hdr.type == MSG_TYPE_COMMAND)
773 xfer->state = SCMI_XFER_RESP_OK;
774 else
775 xfer->state = SCMI_XFER_DRESP_OK;
776 }
777
scmi_xfer_acquired(struct scmi_xfer * xfer)778 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
779 {
780 int ret;
781
782 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
783
784 return ret == SCMI_XFER_FREE;
785 }
786
787 /**
788 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
789 *
790 * @cinfo: A reference to the channel descriptor.
791 * @msg_hdr: A message header to use as lookup key
792 *
793 * When a valid xfer is found for the sequence number embedded in the provided
794 * msg_hdr, reference counting is properly updated and exclusive access to this
795 * xfer is granted till released with @scmi_xfer_command_release.
796 *
797 * Return: A valid @xfer on Success or error otherwise.
798 */
799 static inline struct scmi_xfer *
scmi_xfer_command_acquire(struct scmi_chan_info * cinfo,u32 msg_hdr)800 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
801 {
802 int ret;
803 unsigned long flags;
804 struct scmi_xfer *xfer;
805 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
806 struct scmi_xfers_info *minfo = &info->tx_minfo;
807 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
808 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
809
810 /* Are we even expecting this? */
811 spin_lock_irqsave(&minfo->xfer_lock, flags);
812 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
813 if (IS_ERR(xfer)) {
814 dev_err(cinfo->dev,
815 "Message for %d type %d is not expected!\n",
816 xfer_id, msg_type);
817 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
818 return xfer;
819 }
820 refcount_inc(&xfer->users);
821 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
822
823 spin_lock_irqsave(&xfer->lock, flags);
824 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
825 /*
826 * If a pending xfer was found which was also in a congruent state with
827 * the received message, acquire exclusive access to it setting the busy
828 * flag.
829 * Spins only on the rare limit condition of concurrent reception of
830 * RESP and DRESP for the same xfer.
831 */
832 if (!ret) {
833 spin_until_cond(scmi_xfer_acquired(xfer));
834 scmi_xfer_state_update(xfer, msg_type);
835 }
836 spin_unlock_irqrestore(&xfer->lock, flags);
837
838 if (ret) {
839 dev_err(cinfo->dev,
840 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
841 msg_type, xfer_id, msg_hdr, xfer->state);
842 /* On error the refcount incremented above has to be dropped */
843 __scmi_xfer_put(minfo, xfer);
844 xfer = ERR_PTR(-EINVAL);
845 }
846
847 return xfer;
848 }
849
scmi_xfer_command_release(struct scmi_info * info,struct scmi_xfer * xfer)850 static inline void scmi_xfer_command_release(struct scmi_info *info,
851 struct scmi_xfer *xfer)
852 {
853 atomic_set(&xfer->busy, SCMI_XFER_FREE);
854 __scmi_xfer_put(&info->tx_minfo, xfer);
855 }
856
scmi_clear_channel(struct scmi_info * info,struct scmi_chan_info * cinfo)857 static inline void scmi_clear_channel(struct scmi_info *info,
858 struct scmi_chan_info *cinfo)
859 {
860 if (info->desc->ops->clear_channel)
861 info->desc->ops->clear_channel(cinfo);
862 }
863
scmi_handle_notification(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)864 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
865 u32 msg_hdr, void *priv)
866 {
867 struct scmi_xfer *xfer;
868 struct device *dev = cinfo->dev;
869 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
870 struct scmi_xfers_info *minfo = &info->rx_minfo;
871 ktime_t ts;
872
873 ts = ktime_get_boottime();
874 xfer = scmi_xfer_get(cinfo->handle, minfo);
875 if (IS_ERR(xfer)) {
876 dev_err(dev, "failed to get free message slot (%ld)\n",
877 PTR_ERR(xfer));
878 scmi_clear_channel(info, cinfo);
879 return;
880 }
881
882 unpack_scmi_header(msg_hdr, &xfer->hdr);
883 if (priv)
884 /* Ensure order between xfer->priv store and following ops */
885 smp_store_mb(xfer->priv, priv);
886 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
887 xfer);
888
889 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
890 xfer->hdr.id, "NOTI", xfer->hdr.seq,
891 xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
892
893 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
894 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
895
896 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
897 xfer->hdr.protocol_id, xfer->hdr.seq,
898 MSG_TYPE_NOTIFICATION);
899
900 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
901 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
902 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
903 cinfo->id);
904 }
905
906 __scmi_xfer_put(minfo, xfer);
907
908 scmi_clear_channel(info, cinfo);
909 }
910
scmi_handle_response(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)911 static void scmi_handle_response(struct scmi_chan_info *cinfo,
912 u32 msg_hdr, void *priv)
913 {
914 struct scmi_xfer *xfer;
915 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
916
917 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
918 if (IS_ERR(xfer)) {
919 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
920 scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
921
922 if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
923 scmi_clear_channel(info, cinfo);
924 return;
925 }
926
927 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
928 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
929 xfer->rx.len = info->desc->max_msg_size;
930
931 if (priv)
932 /* Ensure order between xfer->priv store and following ops */
933 smp_store_mb(xfer->priv, priv);
934 info->desc->ops->fetch_response(cinfo, xfer);
935
936 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
937 xfer->hdr.id,
938 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
939 (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
940 (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
941 xfer->hdr.seq, xfer->hdr.status,
942 xfer->rx.buf, xfer->rx.len);
943
944 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
945 xfer->hdr.protocol_id, xfer->hdr.seq,
946 xfer->hdr.type);
947
948 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
949 scmi_clear_channel(info, cinfo);
950 complete(xfer->async_done);
951 } else {
952 complete(&xfer->done);
953 }
954
955 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
956 /*
957 * When in polling mode avoid to queue the Raw xfer on the IRQ
958 * RX path since it will be already queued at the end of the TX
959 * poll loop.
960 */
961 if (!xfer->hdr.poll_completion)
962 scmi_raw_message_report(info->raw, xfer,
963 SCMI_RAW_REPLY_QUEUE,
964 cinfo->id);
965 }
966
967 scmi_xfer_command_release(info, xfer);
968 }
969
970 /**
971 * scmi_rx_callback() - callback for receiving messages
972 *
973 * @cinfo: SCMI channel info
974 * @msg_hdr: Message header
975 * @priv: Transport specific private data.
976 *
977 * Processes one received message to appropriate transfer information and
978 * signals completion of the transfer.
979 *
980 * NOTE: This function will be invoked in IRQ context, hence should be
981 * as optimal as possible.
982 */
scmi_rx_callback(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)983 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
984 {
985 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
986
987 switch (msg_type) {
988 case MSG_TYPE_NOTIFICATION:
989 scmi_handle_notification(cinfo, msg_hdr, priv);
990 break;
991 case MSG_TYPE_COMMAND:
992 case MSG_TYPE_DELAYED_RESP:
993 scmi_handle_response(cinfo, msg_hdr, priv);
994 break;
995 default:
996 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
997 break;
998 }
999 }
1000
1001 /**
1002 * xfer_put() - Release a transmit message
1003 *
1004 * @ph: Pointer to SCMI protocol handle
1005 * @xfer: message that was reserved by xfer_get_init
1006 */
xfer_put(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1007 static void xfer_put(const struct scmi_protocol_handle *ph,
1008 struct scmi_xfer *xfer)
1009 {
1010 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1011 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1012
1013 __scmi_xfer_put(&info->tx_minfo, xfer);
1014 }
1015
scmi_xfer_done_no_timeout(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,ktime_t stop)1016 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
1017 struct scmi_xfer *xfer, ktime_t stop)
1018 {
1019 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1020
1021 /*
1022 * Poll also on xfer->done so that polling can be forcibly terminated
1023 * in case of out-of-order receptions of delayed responses
1024 */
1025 return info->desc->ops->poll_done(cinfo, xfer) ||
1026 try_wait_for_completion(&xfer->done) ||
1027 ktime_after(ktime_get(), stop);
1028 }
1029
scmi_wait_for_reply(struct device * dev,const struct scmi_desc * desc,struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,unsigned int timeout_ms)1030 static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
1031 struct scmi_chan_info *cinfo,
1032 struct scmi_xfer *xfer, unsigned int timeout_ms)
1033 {
1034 int ret = 0;
1035
1036 if (xfer->hdr.poll_completion) {
1037 /*
1038 * Real polling is needed only if transport has NOT declared
1039 * itself to support synchronous commands replies.
1040 */
1041 if (!desc->sync_cmds_completed_on_ret) {
1042 /*
1043 * Poll on xfer using transport provided .poll_done();
1044 * assumes no completion interrupt was available.
1045 */
1046 ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
1047
1048 spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
1049 xfer, stop));
1050 if (ktime_after(ktime_get(), stop)) {
1051 dev_err(dev,
1052 "timed out in resp(caller: %pS) - polling\n",
1053 (void *)_RET_IP_);
1054 ret = -ETIMEDOUT;
1055 }
1056 }
1057
1058 if (!ret) {
1059 unsigned long flags;
1060 struct scmi_info *info =
1061 handle_to_scmi_info(cinfo->handle);
1062
1063 /*
1064 * Do not fetch_response if an out-of-order delayed
1065 * response is being processed.
1066 */
1067 spin_lock_irqsave(&xfer->lock, flags);
1068 if (xfer->state == SCMI_XFER_SENT_OK) {
1069 desc->ops->fetch_response(cinfo, xfer);
1070 xfer->state = SCMI_XFER_RESP_OK;
1071 }
1072 spin_unlock_irqrestore(&xfer->lock, flags);
1073
1074 /* Trace polled replies. */
1075 trace_scmi_msg_dump(info->id, cinfo->id,
1076 xfer->hdr.protocol_id, xfer->hdr.id,
1077 !SCMI_XFER_IS_RAW(xfer) ?
1078 "RESP" : "resp",
1079 xfer->hdr.seq, xfer->hdr.status,
1080 xfer->rx.buf, xfer->rx.len);
1081
1082 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1083 struct scmi_info *info =
1084 handle_to_scmi_info(cinfo->handle);
1085
1086 scmi_raw_message_report(info->raw, xfer,
1087 SCMI_RAW_REPLY_QUEUE,
1088 cinfo->id);
1089 }
1090 }
1091 } else {
1092 /* And we wait for the response. */
1093 if (!wait_for_completion_timeout(&xfer->done,
1094 msecs_to_jiffies(timeout_ms))) {
1095 dev_err(dev, "timed out in resp(caller: %pS)\n",
1096 (void *)_RET_IP_);
1097 ret = -ETIMEDOUT;
1098 }
1099 }
1100
1101 return ret;
1102 }
1103
1104 /**
1105 * scmi_wait_for_message_response - An helper to group all the possible ways of
1106 * waiting for a synchronous message response.
1107 *
1108 * @cinfo: SCMI channel info
1109 * @xfer: Reference to the transfer being waited for.
1110 *
1111 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1112 * configuration flags like xfer->hdr.poll_completion.
1113 *
1114 * Return: 0 on Success, error otherwise.
1115 */
scmi_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)1116 static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
1117 struct scmi_xfer *xfer)
1118 {
1119 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1120 struct device *dev = info->dev;
1121
1122 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1123 xfer->hdr.protocol_id, xfer->hdr.seq,
1124 info->desc->max_rx_timeout_ms,
1125 xfer->hdr.poll_completion);
1126
1127 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1128 info->desc->max_rx_timeout_ms);
1129 }
1130
1131 /**
1132 * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1133 * reply to an xfer raw request on a specific channel for the required timeout.
1134 *
1135 * @cinfo: SCMI channel info
1136 * @xfer: Reference to the transfer being waited for.
1137 * @timeout_ms: The maximum timeout in milliseconds
1138 *
1139 * Return: 0 on Success, error otherwise.
1140 */
scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,unsigned int timeout_ms)1141 int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
1142 struct scmi_xfer *xfer,
1143 unsigned int timeout_ms)
1144 {
1145 int ret;
1146 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1147 struct device *dev = info->dev;
1148
1149 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1150 if (ret)
1151 dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
1152 pack_scmi_header(&xfer->hdr));
1153
1154 return ret;
1155 }
1156
1157 /**
1158 * do_xfer() - Do one transfer
1159 *
1160 * @ph: Pointer to SCMI protocol handle
1161 * @xfer: Transfer to initiate and wait for response
1162 *
1163 * Return: -ETIMEDOUT in case of no response, if transmit error,
1164 * return corresponding error, else if all goes well,
1165 * return 0.
1166 */
do_xfer(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1167 static int do_xfer(const struct scmi_protocol_handle *ph,
1168 struct scmi_xfer *xfer)
1169 {
1170 int ret;
1171 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1172 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1173 struct device *dev = info->dev;
1174 struct scmi_chan_info *cinfo;
1175
1176 /* Check for polling request on custom command xfers at first */
1177 if (xfer->hdr.poll_completion &&
1178 !is_transport_polling_capable(info->desc)) {
1179 dev_warn_once(dev,
1180 "Polling mode is not supported by transport.\n");
1181 return -EINVAL;
1182 }
1183
1184 cinfo = idr_find(&info->tx_idr, pi->proto->id);
1185 if (unlikely(!cinfo))
1186 return -EINVAL;
1187
1188 /* True ONLY if also supported by transport. */
1189 if (is_polling_enabled(cinfo, info->desc))
1190 xfer->hdr.poll_completion = true;
1191
1192 /*
1193 * Initialise protocol id now from protocol handle to avoid it being
1194 * overridden by mistake (or malice) by the protocol code mangling with
1195 * the scmi_xfer structure prior to this.
1196 */
1197 xfer->hdr.protocol_id = pi->proto->id;
1198 reinit_completion(&xfer->done);
1199
1200 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1201 xfer->hdr.protocol_id, xfer->hdr.seq,
1202 xfer->hdr.poll_completion);
1203
1204 /* Clear any stale status */
1205 xfer->hdr.status = SCMI_SUCCESS;
1206 xfer->state = SCMI_XFER_SENT_OK;
1207 /*
1208 * Even though spinlocking is not needed here since no race is possible
1209 * on xfer->state due to the monotonically increasing tokens allocation,
1210 * we must anyway ensure xfer->state initialization is not re-ordered
1211 * after the .send_message() to be sure that on the RX path an early
1212 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1213 */
1214 smp_mb();
1215
1216 ret = info->desc->ops->send_message(cinfo, xfer);
1217 if (ret < 0) {
1218 dev_dbg(dev, "Failed to send message %d\n", ret);
1219 return ret;
1220 }
1221
1222 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1223 xfer->hdr.id, "CMND", xfer->hdr.seq,
1224 xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1225
1226 ret = scmi_wait_for_message_response(cinfo, xfer);
1227 if (!ret && xfer->hdr.status)
1228 ret = scmi_to_linux_errno(xfer->hdr.status);
1229
1230 if (info->desc->ops->mark_txdone)
1231 info->desc->ops->mark_txdone(cinfo, ret, xfer);
1232
1233 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1234 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
1235
1236 return ret;
1237 }
1238
reset_rx_to_maxsz(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1239 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
1240 struct scmi_xfer *xfer)
1241 {
1242 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1243 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1244
1245 xfer->rx.len = info->desc->max_msg_size;
1246 }
1247
1248 /**
1249 * do_xfer_with_response() - Do one transfer and wait until the delayed
1250 * response is received
1251 *
1252 * @ph: Pointer to SCMI protocol handle
1253 * @xfer: Transfer to initiate and wait for response
1254 *
1255 * Using asynchronous commands in atomic/polling mode should be avoided since
1256 * it could cause long busy-waiting here, so ignore polling for the delayed
1257 * response and WARN if it was requested for this command transaction since
1258 * upper layers should refrain from issuing such kind of requests.
1259 *
1260 * The only other option would have been to refrain from using any asynchronous
1261 * command even if made available, when an atomic transport is detected, and
1262 * instead forcibly use the synchronous version (thing that can be easily
1263 * attained at the protocol layer), but this would also have led to longer
1264 * stalls of the channel for synchronous commands and possibly timeouts.
1265 * (in other words there is usually a good reason if a platform provides an
1266 * asynchronous version of a command and we should prefer to use it...just not
1267 * when using atomic/polling mode)
1268 *
1269 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1270 * return corresponding error, else if all goes well, return 0.
1271 */
do_xfer_with_response(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1272 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
1273 struct scmi_xfer *xfer)
1274 {
1275 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
1276 DECLARE_COMPLETION_ONSTACK(async_response);
1277
1278 xfer->async_done = &async_response;
1279
1280 /*
1281 * Delayed responses should not be polled, so an async command should
1282 * not have been used when requiring an atomic/poll context; WARN and
1283 * perform instead a sleeping wait.
1284 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
1285 */
1286 WARN_ON_ONCE(xfer->hdr.poll_completion);
1287
1288 ret = do_xfer(ph, xfer);
1289 if (!ret) {
1290 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1291 dev_err(ph->dev,
1292 "timed out in delayed resp(caller: %pS)\n",
1293 (void *)_RET_IP_);
1294 ret = -ETIMEDOUT;
1295 } else if (xfer->hdr.status) {
1296 ret = scmi_to_linux_errno(xfer->hdr.status);
1297 }
1298 }
1299
1300 xfer->async_done = NULL;
1301 return ret;
1302 }
1303
1304 /**
1305 * xfer_get_init() - Allocate and initialise one message for transmit
1306 *
1307 * @ph: Pointer to SCMI protocol handle
1308 * @msg_id: Message identifier
1309 * @tx_size: transmit message size
1310 * @rx_size: receive message size
1311 * @p: pointer to the allocated and initialised message
1312 *
1313 * This function allocates the message using @scmi_xfer_get and
1314 * initialise the header.
1315 *
1316 * Return: 0 if all went fine with @p pointing to message, else
1317 * corresponding error.
1318 */
xfer_get_init(const struct scmi_protocol_handle * ph,u8 msg_id,size_t tx_size,size_t rx_size,struct scmi_xfer ** p)1319 static int xfer_get_init(const struct scmi_protocol_handle *ph,
1320 u8 msg_id, size_t tx_size, size_t rx_size,
1321 struct scmi_xfer **p)
1322 {
1323 int ret;
1324 struct scmi_xfer *xfer;
1325 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1326 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1327 struct scmi_xfers_info *minfo = &info->tx_minfo;
1328 struct device *dev = info->dev;
1329
1330 /* Ensure we have sane transfer sizes */
1331 if (rx_size > info->desc->max_msg_size ||
1332 tx_size > info->desc->max_msg_size)
1333 return -ERANGE;
1334
1335 xfer = scmi_xfer_get(pi->handle, minfo);
1336 if (IS_ERR(xfer)) {
1337 ret = PTR_ERR(xfer);
1338 dev_err(dev, "failed to get free message slot(%d)\n", ret);
1339 return ret;
1340 }
1341
1342 /* Pick a sequence number and register this xfer as in-flight */
1343 ret = scmi_xfer_pending_set(xfer, minfo);
1344 if (ret) {
1345 dev_err(pi->handle->dev,
1346 "Failed to get monotonic token %d\n", ret);
1347 __scmi_xfer_put(minfo, xfer);
1348 return ret;
1349 }
1350
1351 xfer->tx.len = tx_size;
1352 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1353 xfer->hdr.type = MSG_TYPE_COMMAND;
1354 xfer->hdr.id = msg_id;
1355 xfer->hdr.poll_completion = false;
1356
1357 *p = xfer;
1358
1359 return 0;
1360 }
1361
1362 /**
1363 * version_get() - command to get the revision of the SCMI entity
1364 *
1365 * @ph: Pointer to SCMI protocol handle
1366 * @version: Holds returned version of protocol.
1367 *
1368 * Updates the SCMI information in the internal data structure.
1369 *
1370 * Return: 0 if all went fine, else return appropriate error.
1371 */
version_get(const struct scmi_protocol_handle * ph,u32 * version)1372 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1373 {
1374 int ret;
1375 __le32 *rev_info;
1376 struct scmi_xfer *t;
1377
1378 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1379 if (ret)
1380 return ret;
1381
1382 ret = do_xfer(ph, t);
1383 if (!ret) {
1384 rev_info = t->rx.buf;
1385 *version = le32_to_cpu(*rev_info);
1386 }
1387
1388 xfer_put(ph, t);
1389 return ret;
1390 }
1391
1392 /**
1393 * scmi_set_protocol_priv - Set protocol specific data at init time
1394 *
1395 * @ph: A reference to the protocol handle.
1396 * @priv: The private data to set.
1397 * @version: The detected protocol version for the core to register.
1398 *
1399 * Return: 0 on Success
1400 */
scmi_set_protocol_priv(const struct scmi_protocol_handle * ph,void * priv,u32 version)1401 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1402 void *priv, u32 version)
1403 {
1404 struct scmi_protocol_instance *pi = ph_to_pi(ph);
1405
1406 pi->priv = priv;
1407 pi->version = version;
1408
1409 return 0;
1410 }
1411
1412 /**
1413 * scmi_get_protocol_priv - Set protocol specific data at init time
1414 *
1415 * @ph: A reference to the protocol handle.
1416 *
1417 * Return: Protocol private data if any was set.
1418 */
scmi_get_protocol_priv(const struct scmi_protocol_handle * ph)1419 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1420 {
1421 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1422
1423 return pi->priv;
1424 }
1425
1426 static const struct scmi_xfer_ops xfer_ops = {
1427 .version_get = version_get,
1428 .xfer_get_init = xfer_get_init,
1429 .reset_rx_to_maxsz = reset_rx_to_maxsz,
1430 .do_xfer = do_xfer,
1431 .do_xfer_with_response = do_xfer_with_response,
1432 .xfer_put = xfer_put,
1433 };
1434
1435 struct scmi_msg_resp_domain_name_get {
1436 __le32 flags;
1437 u8 name[SCMI_MAX_STR_SIZE];
1438 };
1439
1440 /**
1441 * scmi_common_extended_name_get - Common helper to get extended resources name
1442 * @ph: A protocol handle reference.
1443 * @cmd_id: The specific command ID to use.
1444 * @res_id: The specific resource ID to use.
1445 * @flags: A pointer to specific flags to use, if any.
1446 * @name: A pointer to the preallocated area where the retrieved name will be
1447 * stored as a NULL terminated string.
1448 * @len: The len in bytes of the @name char array.
1449 *
1450 * Return: 0 on Succcess
1451 */
scmi_common_extended_name_get(const struct scmi_protocol_handle * ph,u8 cmd_id,u32 res_id,u32 * flags,char * name,size_t len)1452 static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1453 u8 cmd_id, u32 res_id, u32 *flags,
1454 char *name, size_t len)
1455 {
1456 int ret;
1457 size_t txlen;
1458 struct scmi_xfer *t;
1459 struct scmi_msg_resp_domain_name_get *resp;
1460
1461 txlen = !flags ? sizeof(res_id) : sizeof(res_id) + sizeof(*flags);
1462 ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t);
1463 if (ret)
1464 goto out;
1465
1466 put_unaligned_le32(res_id, t->tx.buf);
1467 if (flags)
1468 put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id));
1469 resp = t->rx.buf;
1470
1471 ret = ph->xops->do_xfer(ph, t);
1472 if (!ret)
1473 strscpy(name, resp->name, len);
1474
1475 ph->xops->xfer_put(ph, t);
1476 out:
1477 if (ret)
1478 dev_warn(ph->dev,
1479 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1480 res_id, ret, name);
1481 return ret;
1482 }
1483
1484 /**
1485 * struct scmi_iterator - Iterator descriptor
1486 * @msg: A reference to the message TX buffer; filled by @prepare_message with
1487 * a proper custom command payload for each multi-part command request.
1488 * @resp: A reference to the response RX buffer; used by @update_state and
1489 * @process_response to parse the multi-part replies.
1490 * @t: A reference to the underlying xfer initialized and used transparently by
1491 * the iterator internal routines.
1492 * @ph: A reference to the associated protocol handle to be used.
1493 * @ops: A reference to the custom provided iterator operations.
1494 * @state: The current iterator state; used and updated in turn by the iterators
1495 * internal routines and by the caller-provided @scmi_iterator_ops.
1496 * @priv: A reference to optional private data as provided by the caller and
1497 * passed back to the @@scmi_iterator_ops.
1498 */
1499 struct scmi_iterator {
1500 void *msg;
1501 void *resp;
1502 struct scmi_xfer *t;
1503 const struct scmi_protocol_handle *ph;
1504 struct scmi_iterator_ops *ops;
1505 struct scmi_iterator_state state;
1506 void *priv;
1507 };
1508
scmi_iterator_init(const struct scmi_protocol_handle * ph,struct scmi_iterator_ops * ops,unsigned int max_resources,u8 msg_id,size_t tx_size,void * priv)1509 static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1510 struct scmi_iterator_ops *ops,
1511 unsigned int max_resources, u8 msg_id,
1512 size_t tx_size, void *priv)
1513 {
1514 int ret;
1515 struct scmi_iterator *i;
1516
1517 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1518 if (!i)
1519 return ERR_PTR(-ENOMEM);
1520
1521 i->ph = ph;
1522 i->ops = ops;
1523 i->priv = priv;
1524
1525 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1526 if (ret) {
1527 devm_kfree(ph->dev, i);
1528 return ERR_PTR(ret);
1529 }
1530
1531 i->state.max_resources = max_resources;
1532 i->msg = i->t->tx.buf;
1533 i->resp = i->t->rx.buf;
1534
1535 return i;
1536 }
1537
scmi_iterator_run(void * iter)1538 static int scmi_iterator_run(void *iter)
1539 {
1540 int ret = -EINVAL;
1541 struct scmi_iterator_ops *iops;
1542 const struct scmi_protocol_handle *ph;
1543 struct scmi_iterator_state *st;
1544 struct scmi_iterator *i = iter;
1545
1546 if (!i || !i->ops || !i->ph)
1547 return ret;
1548
1549 iops = i->ops;
1550 ph = i->ph;
1551 st = &i->state;
1552
1553 do {
1554 iops->prepare_message(i->msg, st->desc_index, i->priv);
1555 ret = ph->xops->do_xfer(ph, i->t);
1556 if (ret)
1557 break;
1558
1559 st->rx_len = i->t->rx.len;
1560 ret = iops->update_state(st, i->resp, i->priv);
1561 if (ret)
1562 break;
1563
1564 if (st->num_returned > st->max_resources - st->desc_index) {
1565 dev_err(ph->dev,
1566 "No. of resources can't exceed %d\n",
1567 st->max_resources);
1568 ret = -EINVAL;
1569 break;
1570 }
1571
1572 for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1573 st->loop_idx++) {
1574 ret = iops->process_response(ph, i->resp, st, i->priv);
1575 if (ret)
1576 goto out;
1577 }
1578
1579 st->desc_index += st->num_returned;
1580 ph->xops->reset_rx_to_maxsz(ph, i->t);
1581 /*
1582 * check for both returned and remaining to avoid infinite
1583 * loop due to buggy firmware
1584 */
1585 } while (st->num_returned && st->num_remaining);
1586
1587 out:
1588 /* Finalize and destroy iterator */
1589 ph->xops->xfer_put(ph, i->t);
1590 devm_kfree(ph->dev, i);
1591
1592 return ret;
1593 }
1594
1595 struct scmi_msg_get_fc_info {
1596 __le32 domain;
1597 __le32 message_id;
1598 };
1599
1600 struct scmi_msg_resp_desc_fc {
1601 __le32 attr;
1602 #define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
1603 #define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
1604 __le32 rate_limit;
1605 __le32 chan_addr_low;
1606 __le32 chan_addr_high;
1607 __le32 chan_size;
1608 __le32 db_addr_low;
1609 __le32 db_addr_high;
1610 __le32 db_set_lmask;
1611 __le32 db_set_hmask;
1612 __le32 db_preserve_lmask;
1613 __le32 db_preserve_hmask;
1614 };
1615
1616 static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle * ph,u8 describe_id,u32 message_id,u32 valid_size,u32 domain,void __iomem ** p_addr,struct scmi_fc_db_info ** p_db)1617 scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
1618 u8 describe_id, u32 message_id, u32 valid_size,
1619 u32 domain, void __iomem **p_addr,
1620 struct scmi_fc_db_info **p_db)
1621 {
1622 int ret;
1623 u32 flags;
1624 u64 phys_addr;
1625 u8 size;
1626 void __iomem *addr;
1627 struct scmi_xfer *t;
1628 struct scmi_fc_db_info *db = NULL;
1629 struct scmi_msg_get_fc_info *info;
1630 struct scmi_msg_resp_desc_fc *resp;
1631 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1632
1633 if (!p_addr) {
1634 ret = -EINVAL;
1635 goto err_out;
1636 }
1637
1638 ret = ph->xops->xfer_get_init(ph, describe_id,
1639 sizeof(*info), sizeof(*resp), &t);
1640 if (ret)
1641 goto err_out;
1642
1643 info = t->tx.buf;
1644 info->domain = cpu_to_le32(domain);
1645 info->message_id = cpu_to_le32(message_id);
1646
1647 /*
1648 * Bail out on error leaving fc_info addresses zeroed; this includes
1649 * the case in which the requested domain/message_id does NOT support
1650 * fastchannels at all.
1651 */
1652 ret = ph->xops->do_xfer(ph, t);
1653 if (ret)
1654 goto err_xfer;
1655
1656 resp = t->rx.buf;
1657 flags = le32_to_cpu(resp->attr);
1658 size = le32_to_cpu(resp->chan_size);
1659 if (size != valid_size) {
1660 ret = -EINVAL;
1661 goto err_xfer;
1662 }
1663
1664 phys_addr = le32_to_cpu(resp->chan_addr_low);
1665 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1666 addr = devm_ioremap(ph->dev, phys_addr, size);
1667 if (!addr) {
1668 ret = -EADDRNOTAVAIL;
1669 goto err_xfer;
1670 }
1671
1672 *p_addr = addr;
1673
1674 if (p_db && SUPPORTS_DOORBELL(flags)) {
1675 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1676 if (!db) {
1677 ret = -ENOMEM;
1678 goto err_db;
1679 }
1680
1681 size = 1 << DOORBELL_REG_WIDTH(flags);
1682 phys_addr = le32_to_cpu(resp->db_addr_low);
1683 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
1684 addr = devm_ioremap(ph->dev, phys_addr, size);
1685 if (!addr) {
1686 ret = -EADDRNOTAVAIL;
1687 goto err_db_mem;
1688 }
1689
1690 db->addr = addr;
1691 db->width = size;
1692 db->set = le32_to_cpu(resp->db_set_lmask);
1693 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
1694 db->mask = le32_to_cpu(resp->db_preserve_lmask);
1695 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
1696
1697 *p_db = db;
1698 }
1699
1700 ph->xops->xfer_put(ph, t);
1701
1702 dev_dbg(ph->dev,
1703 "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
1704 pi->proto->id, message_id, domain);
1705
1706 return;
1707
1708 err_db_mem:
1709 devm_kfree(ph->dev, db);
1710
1711 err_db:
1712 *p_addr = NULL;
1713
1714 err_xfer:
1715 ph->xops->xfer_put(ph, t);
1716
1717 err_out:
1718 dev_warn(ph->dev,
1719 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
1720 pi->proto->id, message_id, domain, ret);
1721 }
1722
1723 #define SCMI_PROTO_FC_RING_DB(w) \
1724 do { \
1725 u##w val = 0; \
1726 \
1727 if (db->mask) \
1728 val = ioread##w(db->addr) & db->mask; \
1729 iowrite##w((u##w)db->set | val, db->addr); \
1730 } while (0)
1731
scmi_common_fastchannel_db_ring(struct scmi_fc_db_info * db)1732 static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
1733 {
1734 if (!db || !db->addr)
1735 return;
1736
1737 if (db->width == 1)
1738 SCMI_PROTO_FC_RING_DB(8);
1739 else if (db->width == 2)
1740 SCMI_PROTO_FC_RING_DB(16);
1741 else if (db->width == 4)
1742 SCMI_PROTO_FC_RING_DB(32);
1743 else /* db->width == 8 */
1744 #ifdef CONFIG_64BIT
1745 SCMI_PROTO_FC_RING_DB(64);
1746 #else
1747 {
1748 u64 val = 0;
1749
1750 if (db->mask)
1751 val = ioread64_hi_lo(db->addr) & db->mask;
1752 iowrite64_hi_lo(db->set | val, db->addr);
1753 }
1754 #endif
1755 }
1756
1757 static const struct scmi_proto_helpers_ops helpers_ops = {
1758 .extended_name_get = scmi_common_extended_name_get,
1759 .iter_response_init = scmi_iterator_init,
1760 .iter_response_run = scmi_iterator_run,
1761 .fastchannel_init = scmi_common_fastchannel_init,
1762 .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
1763 };
1764
1765 /**
1766 * scmi_revision_area_get - Retrieve version memory area.
1767 *
1768 * @ph: A reference to the protocol handle.
1769 *
1770 * A helper to grab the version memory area reference during SCMI Base protocol
1771 * initialization.
1772 *
1773 * Return: A reference to the version memory area associated to the SCMI
1774 * instance underlying this protocol handle.
1775 */
1776 struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle * ph)1777 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1778 {
1779 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1780
1781 return pi->handle->version;
1782 }
1783
1784 /**
1785 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
1786 * instance descriptor.
1787 * @info: The reference to the related SCMI instance.
1788 * @proto: The protocol descriptor.
1789 *
1790 * Allocate a new protocol instance descriptor, using the provided @proto
1791 * description, against the specified SCMI instance @info, and initialize it;
1792 * all resources management is handled via a dedicated per-protocol devres
1793 * group.
1794 *
1795 * Context: Assumes to be called with @protocols_mtx already acquired.
1796 * Return: A reference to a freshly allocated and initialized protocol instance
1797 * or ERR_PTR on failure. On failure the @proto reference is at first
1798 * put using @scmi_protocol_put() before releasing all the devres group.
1799 */
1800 static struct scmi_protocol_instance *
scmi_alloc_init_protocol_instance(struct scmi_info * info,const struct scmi_protocol * proto)1801 scmi_alloc_init_protocol_instance(struct scmi_info *info,
1802 const struct scmi_protocol *proto)
1803 {
1804 int ret = -ENOMEM;
1805 void *gid;
1806 struct scmi_protocol_instance *pi;
1807 const struct scmi_handle *handle = &info->handle;
1808
1809 /* Protocol specific devres group */
1810 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1811 if (!gid) {
1812 scmi_protocol_put(proto->id);
1813 goto out;
1814 }
1815
1816 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1817 if (!pi)
1818 goto clean;
1819
1820 pi->gid = gid;
1821 pi->proto = proto;
1822 pi->handle = handle;
1823 pi->ph.dev = handle->dev;
1824 pi->ph.xops = &xfer_ops;
1825 pi->ph.hops = &helpers_ops;
1826 pi->ph.set_priv = scmi_set_protocol_priv;
1827 pi->ph.get_priv = scmi_get_protocol_priv;
1828 refcount_set(&pi->users, 1);
1829 /* proto->init is assured NON NULL by scmi_protocol_register */
1830 ret = pi->proto->instance_init(&pi->ph);
1831 if (ret)
1832 goto clean;
1833
1834 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1835 GFP_KERNEL);
1836 if (ret != proto->id)
1837 goto clean;
1838
1839 /*
1840 * Warn but ignore events registration errors since we do not want
1841 * to skip whole protocols if their notifications are messed up.
1842 */
1843 if (pi->proto->events) {
1844 ret = scmi_register_protocol_events(handle, pi->proto->id,
1845 &pi->ph,
1846 pi->proto->events);
1847 if (ret)
1848 dev_warn(handle->dev,
1849 "Protocol:%X - Events Registration Failed - err:%d\n",
1850 pi->proto->id, ret);
1851 }
1852
1853 devres_close_group(handle->dev, pi->gid);
1854 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1855
1856 if (pi->version > proto->supported_version)
1857 dev_warn(handle->dev,
1858 "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X."
1859 "Backward compatibility is NOT assured.\n",
1860 pi->version, pi->proto->id);
1861
1862 return pi;
1863
1864 clean:
1865 /* Take care to put the protocol module's owner before releasing all */
1866 scmi_protocol_put(proto->id);
1867 devres_release_group(handle->dev, gid);
1868 out:
1869 return ERR_PTR(ret);
1870 }
1871
1872 /**
1873 * scmi_get_protocol_instance - Protocol initialization helper.
1874 * @handle: A reference to the SCMI platform instance.
1875 * @protocol_id: The protocol being requested.
1876 *
1877 * In case the required protocol has never been requested before for this
1878 * instance, allocate and initialize all the needed structures while handling
1879 * resource allocation with a dedicated per-protocol devres subgroup.
1880 *
1881 * Return: A reference to an initialized protocol instance or error on failure:
1882 * in particular returns -EPROBE_DEFER when the desired protocol could
1883 * NOT be found.
1884 */
1885 static struct scmi_protocol_instance * __must_check
scmi_get_protocol_instance(const struct scmi_handle * handle,u8 protocol_id)1886 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
1887 {
1888 struct scmi_protocol_instance *pi;
1889 struct scmi_info *info = handle_to_scmi_info(handle);
1890
1891 mutex_lock(&info->protocols_mtx);
1892 pi = idr_find(&info->protocols, protocol_id);
1893
1894 if (pi) {
1895 refcount_inc(&pi->users);
1896 } else {
1897 const struct scmi_protocol *proto;
1898
1899 /* Fails if protocol not registered on bus */
1900 proto = scmi_protocol_get(protocol_id);
1901 if (proto)
1902 pi = scmi_alloc_init_protocol_instance(info, proto);
1903 else
1904 pi = ERR_PTR(-EPROBE_DEFER);
1905 }
1906 mutex_unlock(&info->protocols_mtx);
1907
1908 return pi;
1909 }
1910
1911 /**
1912 * scmi_protocol_acquire - Protocol acquire
1913 * @handle: A reference to the SCMI platform instance.
1914 * @protocol_id: The protocol being requested.
1915 *
1916 * Register a new user for the requested protocol on the specified SCMI
1917 * platform instance, possibly triggering its initialization on first user.
1918 *
1919 * Return: 0 if protocol was acquired successfully.
1920 */
scmi_protocol_acquire(const struct scmi_handle * handle,u8 protocol_id)1921 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
1922 {
1923 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
1924 }
1925
1926 /**
1927 * scmi_protocol_release - Protocol de-initialization helper.
1928 * @handle: A reference to the SCMI platform instance.
1929 * @protocol_id: The protocol being requested.
1930 *
1931 * Remove one user for the specified protocol and triggers de-initialization
1932 * and resources de-allocation once the last user has gone.
1933 */
scmi_protocol_release(const struct scmi_handle * handle,u8 protocol_id)1934 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
1935 {
1936 struct scmi_info *info = handle_to_scmi_info(handle);
1937 struct scmi_protocol_instance *pi;
1938
1939 mutex_lock(&info->protocols_mtx);
1940 pi = idr_find(&info->protocols, protocol_id);
1941 if (WARN_ON(!pi))
1942 goto out;
1943
1944 if (refcount_dec_and_test(&pi->users)) {
1945 void *gid = pi->gid;
1946
1947 if (pi->proto->events)
1948 scmi_deregister_protocol_events(handle, protocol_id);
1949
1950 if (pi->proto->instance_deinit)
1951 pi->proto->instance_deinit(&pi->ph);
1952
1953 idr_remove(&info->protocols, protocol_id);
1954
1955 scmi_protocol_put(protocol_id);
1956
1957 devres_release_group(handle->dev, gid);
1958 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
1959 protocol_id);
1960 }
1961
1962 out:
1963 mutex_unlock(&info->protocols_mtx);
1964 }
1965
scmi_setup_protocol_implemented(const struct scmi_protocol_handle * ph,u8 * prot_imp)1966 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
1967 u8 *prot_imp)
1968 {
1969 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1970 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1971
1972 info->protocols_imp = prot_imp;
1973 }
1974
1975 static bool
scmi_is_protocol_implemented(const struct scmi_handle * handle,u8 prot_id)1976 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
1977 {
1978 int i;
1979 struct scmi_info *info = handle_to_scmi_info(handle);
1980 struct scmi_revision_info *rev = handle->version;
1981
1982 if (!info->protocols_imp)
1983 return false;
1984
1985 for (i = 0; i < rev->num_protocols; i++)
1986 if (info->protocols_imp[i] == prot_id)
1987 return true;
1988 return false;
1989 }
1990
1991 struct scmi_protocol_devres {
1992 const struct scmi_handle *handle;
1993 u8 protocol_id;
1994 };
1995
scmi_devm_release_protocol(struct device * dev,void * res)1996 static void scmi_devm_release_protocol(struct device *dev, void *res)
1997 {
1998 struct scmi_protocol_devres *dres = res;
1999
2000 scmi_protocol_release(dres->handle, dres->protocol_id);
2001 }
2002
2003 static struct scmi_protocol_instance __must_check *
scmi_devres_protocol_instance_get(struct scmi_device * sdev,u8 protocol_id)2004 scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
2005 {
2006 struct scmi_protocol_instance *pi;
2007 struct scmi_protocol_devres *dres;
2008
2009 dres = devres_alloc(scmi_devm_release_protocol,
2010 sizeof(*dres), GFP_KERNEL);
2011 if (!dres)
2012 return ERR_PTR(-ENOMEM);
2013
2014 pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
2015 if (IS_ERR(pi)) {
2016 devres_free(dres);
2017 return pi;
2018 }
2019
2020 dres->handle = sdev->handle;
2021 dres->protocol_id = protocol_id;
2022 devres_add(&sdev->dev, dres);
2023
2024 return pi;
2025 }
2026
2027 /**
2028 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
2029 * @sdev: A reference to an scmi_device whose embedded struct device is to
2030 * be used for devres accounting.
2031 * @protocol_id: The protocol being requested.
2032 * @ph: A pointer reference used to pass back the associated protocol handle.
2033 *
2034 * Get hold of a protocol accounting for its usage, eventually triggering its
2035 * initialization, and returning the protocol specific operations and related
2036 * protocol handle which will be used as first argument in most of the
2037 * protocols operations methods.
2038 * Being a devres based managed method, protocol hold will be automatically
2039 * released, and possibly de-initialized on last user, once the SCMI driver
2040 * owning the scmi_device is unbound from it.
2041 *
2042 * Return: A reference to the requested protocol operations or error.
2043 * Must be checked for errors by caller.
2044 */
2045 static const void __must_check *
scmi_devm_protocol_get(struct scmi_device * sdev,u8 protocol_id,struct scmi_protocol_handle ** ph)2046 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
2047 struct scmi_protocol_handle **ph)
2048 {
2049 struct scmi_protocol_instance *pi;
2050
2051 if (!ph)
2052 return ERR_PTR(-EINVAL);
2053
2054 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2055 if (IS_ERR(pi))
2056 return pi;
2057
2058 *ph = &pi->ph;
2059
2060 return pi->proto->ops;
2061 }
2062
2063 /**
2064 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2065 * @sdev: A reference to an scmi_device whose embedded struct device is to
2066 * be used for devres accounting.
2067 * @protocol_id: The protocol being requested.
2068 *
2069 * Get hold of a protocol accounting for its usage, possibly triggering its
2070 * initialization but without getting access to its protocol specific operations
2071 * and handle.
2072 *
2073 * Being a devres based managed method, protocol hold will be automatically
2074 * released, and possibly de-initialized on last user, once the SCMI driver
2075 * owning the scmi_device is unbound from it.
2076 *
2077 * Return: 0 on SUCCESS
2078 */
scmi_devm_protocol_acquire(struct scmi_device * sdev,u8 protocol_id)2079 static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
2080 u8 protocol_id)
2081 {
2082 struct scmi_protocol_instance *pi;
2083
2084 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2085 if (IS_ERR(pi))
2086 return PTR_ERR(pi);
2087
2088 return 0;
2089 }
2090
scmi_devm_protocol_match(struct device * dev,void * res,void * data)2091 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
2092 {
2093 struct scmi_protocol_devres *dres = res;
2094
2095 if (WARN_ON(!dres || !data))
2096 return 0;
2097
2098 return dres->protocol_id == *((u8 *)data);
2099 }
2100
2101 /**
2102 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
2103 * @sdev: A reference to an scmi_device whose embedded struct device is to
2104 * be used for devres accounting.
2105 * @protocol_id: The protocol being requested.
2106 *
2107 * Explicitly release a protocol hold previously obtained calling the above
2108 * @scmi_devm_protocol_get.
2109 */
scmi_devm_protocol_put(struct scmi_device * sdev,u8 protocol_id)2110 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
2111 {
2112 int ret;
2113
2114 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
2115 scmi_devm_protocol_match, &protocol_id);
2116 WARN_ON(ret);
2117 }
2118
2119 /**
2120 * scmi_is_transport_atomic - Method to check if underlying transport for an
2121 * SCMI instance is configured as atomic.
2122 *
2123 * @handle: A reference to the SCMI platform instance.
2124 * @atomic_threshold: An optional return value for the system wide currently
2125 * configured threshold for atomic operations.
2126 *
2127 * Return: True if transport is configured as atomic
2128 */
scmi_is_transport_atomic(const struct scmi_handle * handle,unsigned int * atomic_threshold)2129 static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
2130 unsigned int *atomic_threshold)
2131 {
2132 bool ret;
2133 struct scmi_info *info = handle_to_scmi_info(handle);
2134
2135 ret = info->desc->atomic_enabled &&
2136 is_transport_polling_capable(info->desc);
2137 if (ret && atomic_threshold)
2138 *atomic_threshold = info->atomic_threshold;
2139
2140 return ret;
2141 }
2142
2143 /**
2144 * scmi_handle_get() - Get the SCMI handle for a device
2145 *
2146 * @dev: pointer to device for which we want SCMI handle
2147 *
2148 * NOTE: The function does not track individual clients of the framework
2149 * and is expected to be maintained by caller of SCMI protocol library.
2150 * scmi_handle_put must be balanced with successful scmi_handle_get
2151 *
2152 * Return: pointer to handle if successful, NULL on error
2153 */
scmi_handle_get(struct device * dev)2154 static struct scmi_handle *scmi_handle_get(struct device *dev)
2155 {
2156 struct list_head *p;
2157 struct scmi_info *info;
2158 struct scmi_handle *handle = NULL;
2159
2160 mutex_lock(&scmi_list_mutex);
2161 list_for_each(p, &scmi_list) {
2162 info = list_entry(p, struct scmi_info, node);
2163 if (dev->parent == info->dev) {
2164 info->users++;
2165 handle = &info->handle;
2166 break;
2167 }
2168 }
2169 mutex_unlock(&scmi_list_mutex);
2170
2171 return handle;
2172 }
2173
2174 /**
2175 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2176 *
2177 * @handle: handle acquired by scmi_handle_get
2178 *
2179 * NOTE: The function does not track individual clients of the framework
2180 * and is expected to be maintained by caller of SCMI protocol library.
2181 * scmi_handle_put must be balanced with successful scmi_handle_get
2182 *
2183 * Return: 0 is successfully released
2184 * if null was passed, it returns -EINVAL;
2185 */
scmi_handle_put(const struct scmi_handle * handle)2186 static int scmi_handle_put(const struct scmi_handle *handle)
2187 {
2188 struct scmi_info *info;
2189
2190 if (!handle)
2191 return -EINVAL;
2192
2193 info = handle_to_scmi_info(handle);
2194 mutex_lock(&scmi_list_mutex);
2195 if (!WARN_ON(!info->users))
2196 info->users--;
2197 mutex_unlock(&scmi_list_mutex);
2198
2199 return 0;
2200 }
2201
scmi_device_link_add(struct device * consumer,struct device * supplier)2202 static void scmi_device_link_add(struct device *consumer,
2203 struct device *supplier)
2204 {
2205 struct device_link *link;
2206
2207 link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
2208
2209 WARN_ON(!link);
2210 }
2211
scmi_set_handle(struct scmi_device * scmi_dev)2212 static void scmi_set_handle(struct scmi_device *scmi_dev)
2213 {
2214 scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
2215 if (scmi_dev->handle)
2216 scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
2217 }
2218
__scmi_xfer_info_init(struct scmi_info * sinfo,struct scmi_xfers_info * info)2219 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
2220 struct scmi_xfers_info *info)
2221 {
2222 int i;
2223 struct scmi_xfer *xfer;
2224 struct device *dev = sinfo->dev;
2225 const struct scmi_desc *desc = sinfo->desc;
2226
2227 /* Pre-allocated messages, no more than what hdr.seq can support */
2228 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
2229 dev_err(dev,
2230 "Invalid maximum messages %d, not in range [1 - %lu]\n",
2231 info->max_msg, MSG_TOKEN_MAX);
2232 return -EINVAL;
2233 }
2234
2235 hash_init(info->pending_xfers);
2236
2237 /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
2238 info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
2239 GFP_KERNEL);
2240 if (!info->xfer_alloc_table)
2241 return -ENOMEM;
2242
2243 /*
2244 * Preallocate a number of xfers equal to max inflight messages,
2245 * pre-initialize the buffer pointer to pre-allocated buffers and
2246 * attach all of them to the free list
2247 */
2248 INIT_HLIST_HEAD(&info->free_xfers);
2249 for (i = 0; i < info->max_msg; i++) {
2250 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
2251 if (!xfer)
2252 return -ENOMEM;
2253
2254 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2255 GFP_KERNEL);
2256 if (!xfer->rx.buf)
2257 return -ENOMEM;
2258
2259 xfer->tx.buf = xfer->rx.buf;
2260 init_completion(&xfer->done);
2261 spin_lock_init(&xfer->lock);
2262
2263 /* Add initialized xfer to the free list */
2264 hlist_add_head(&xfer->node, &info->free_xfers);
2265 }
2266
2267 spin_lock_init(&info->xfer_lock);
2268
2269 return 0;
2270 }
2271
scmi_channels_max_msg_configure(struct scmi_info * sinfo)2272 static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
2273 {
2274 const struct scmi_desc *desc = sinfo->desc;
2275
2276 if (!desc->ops->get_max_msg) {
2277 sinfo->tx_minfo.max_msg = desc->max_msg;
2278 sinfo->rx_minfo.max_msg = desc->max_msg;
2279 } else {
2280 struct scmi_chan_info *base_cinfo;
2281
2282 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
2283 if (!base_cinfo)
2284 return -EINVAL;
2285 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
2286
2287 /* RX channel is optional so can be skipped */
2288 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
2289 if (base_cinfo)
2290 sinfo->rx_minfo.max_msg =
2291 desc->ops->get_max_msg(base_cinfo);
2292 }
2293
2294 return 0;
2295 }
2296
scmi_xfer_info_init(struct scmi_info * sinfo)2297 static int scmi_xfer_info_init(struct scmi_info *sinfo)
2298 {
2299 int ret;
2300
2301 ret = scmi_channels_max_msg_configure(sinfo);
2302 if (ret)
2303 return ret;
2304
2305 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
2306 if (!ret && !idr_is_empty(&sinfo->rx_idr))
2307 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
2308
2309 return ret;
2310 }
2311
scmi_chan_setup(struct scmi_info * info,struct device_node * of_node,int prot_id,bool tx)2312 static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
2313 int prot_id, bool tx)
2314 {
2315 int ret, idx;
2316 char name[32];
2317 struct scmi_chan_info *cinfo;
2318 struct idr *idr;
2319 struct scmi_device *tdev = NULL;
2320
2321 /* Transmit channel is first entry i.e. index 0 */
2322 idx = tx ? 0 : 1;
2323 idr = tx ? &info->tx_idr : &info->rx_idr;
2324
2325 if (!info->desc->ops->chan_available(of_node, idx)) {
2326 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
2327 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
2328 return -EINVAL;
2329 goto idr_alloc;
2330 }
2331
2332 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2333 if (!cinfo)
2334 return -ENOMEM;
2335
2336 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
2337
2338 /* Create a unique name for this transport device */
2339 snprintf(name, 32, "__scmi_transport_device_%s_%02X",
2340 idx ? "rx" : "tx", prot_id);
2341 /* Create a uniquely named, dedicated transport device for this chan */
2342 tdev = scmi_device_create(of_node, info->dev, prot_id, name);
2343 if (!tdev) {
2344 dev_err(info->dev,
2345 "failed to create transport device (%s)\n", name);
2346 devm_kfree(info->dev, cinfo);
2347 return -EINVAL;
2348 }
2349 of_node_get(of_node);
2350
2351 cinfo->id = prot_id;
2352 cinfo->dev = &tdev->dev;
2353 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2354 if (ret) {
2355 of_node_put(of_node);
2356 scmi_device_destroy(info->dev, prot_id, name);
2357 devm_kfree(info->dev, cinfo);
2358 return ret;
2359 }
2360
2361 if (tx && is_polling_required(cinfo, info->desc)) {
2362 if (is_transport_polling_capable(info->desc))
2363 dev_info(&tdev->dev,
2364 "Enabled polling mode TX channel - prot_id:%d\n",
2365 prot_id);
2366 else
2367 dev_warn(&tdev->dev,
2368 "Polling mode NOT supported by transport.\n");
2369 }
2370
2371 idr_alloc:
2372 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
2373 if (ret != prot_id) {
2374 dev_err(info->dev,
2375 "unable to allocate SCMI idr slot err %d\n", ret);
2376 /* Destroy channel and device only if created by this call. */
2377 if (tdev) {
2378 of_node_put(of_node);
2379 scmi_device_destroy(info->dev, prot_id, name);
2380 devm_kfree(info->dev, cinfo);
2381 }
2382 return ret;
2383 }
2384
2385 cinfo->handle = &info->handle;
2386 return 0;
2387 }
2388
2389 static inline int
scmi_txrx_setup(struct scmi_info * info,struct device_node * of_node,int prot_id)2390 scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
2391 int prot_id)
2392 {
2393 int ret = scmi_chan_setup(info, of_node, prot_id, true);
2394
2395 if (!ret) {
2396 /* Rx is optional, report only memory errors */
2397 ret = scmi_chan_setup(info, of_node, prot_id, false);
2398 if (ret && ret != -ENOMEM)
2399 ret = 0;
2400 }
2401
2402 return ret;
2403 }
2404
2405 /**
2406 * scmi_channels_setup - Helper to initialize all required channels
2407 *
2408 * @info: The SCMI instance descriptor.
2409 *
2410 * Initialize all the channels found described in the DT against the underlying
2411 * configured transport using custom defined dedicated devices instead of
2412 * borrowing devices from the SCMI drivers; this way channels are initialized
2413 * upfront during core SCMI stack probing and are no more coupled with SCMI
2414 * devices used by SCMI drivers.
2415 *
2416 * Note that, even though a pair of TX/RX channels is associated to each
2417 * protocol defined in the DT, a distinct freshly initialized channel is
2418 * created only if the DT node for the protocol at hand describes a dedicated
2419 * channel: in all the other cases the common BASE protocol channel is reused.
2420 *
2421 * Return: 0 on Success
2422 */
scmi_channels_setup(struct scmi_info * info)2423 static int scmi_channels_setup(struct scmi_info *info)
2424 {
2425 int ret;
2426 struct device_node *child, *top_np = info->dev->of_node;
2427
2428 /* Initialize a common generic channel at first */
2429 ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
2430 if (ret)
2431 return ret;
2432
2433 for_each_available_child_of_node(top_np, child) {
2434 u32 prot_id;
2435
2436 if (of_property_read_u32(child, "reg", &prot_id))
2437 continue;
2438
2439 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2440 dev_err(info->dev,
2441 "Out of range protocol %d\n", prot_id);
2442
2443 ret = scmi_txrx_setup(info, child, prot_id);
2444 if (ret) {
2445 of_node_put(child);
2446 return ret;
2447 }
2448 }
2449
2450 return 0;
2451 }
2452
scmi_chan_destroy(int id,void * p,void * idr)2453 static int scmi_chan_destroy(int id, void *p, void *idr)
2454 {
2455 struct scmi_chan_info *cinfo = p;
2456
2457 if (cinfo->dev) {
2458 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
2459 struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
2460
2461 of_node_put(cinfo->dev->of_node);
2462 scmi_device_destroy(info->dev, id, sdev->name);
2463 cinfo->dev = NULL;
2464 }
2465
2466 idr_remove(idr, id);
2467
2468 return 0;
2469 }
2470
scmi_cleanup_channels(struct scmi_info * info,struct idr * idr)2471 static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
2472 {
2473 /* At first free all channels at the transport layer ... */
2474 idr_for_each(idr, info->desc->ops->chan_free, idr);
2475
2476 /* ...then destroy all underlying devices */
2477 idr_for_each(idr, scmi_chan_destroy, idr);
2478
2479 idr_destroy(idr);
2480 }
2481
scmi_cleanup_txrx_channels(struct scmi_info * info)2482 static void scmi_cleanup_txrx_channels(struct scmi_info *info)
2483 {
2484 scmi_cleanup_channels(info, &info->tx_idr);
2485
2486 scmi_cleanup_channels(info, &info->rx_idr);
2487 }
2488
scmi_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)2489 static int scmi_bus_notifier(struct notifier_block *nb,
2490 unsigned long action, void *data)
2491 {
2492 struct scmi_info *info = bus_nb_to_scmi_info(nb);
2493 struct scmi_device *sdev = to_scmi_dev(data);
2494
2495 /* Skip transport devices and devices of different SCMI instances */
2496 if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
2497 sdev->dev.parent != info->dev)
2498 return NOTIFY_DONE;
2499
2500 switch (action) {
2501 case BUS_NOTIFY_BIND_DRIVER:
2502 /* setup handle now as the transport is ready */
2503 scmi_set_handle(sdev);
2504 break;
2505 case BUS_NOTIFY_UNBOUND_DRIVER:
2506 scmi_handle_put(sdev->handle);
2507 sdev->handle = NULL;
2508 break;
2509 default:
2510 return NOTIFY_DONE;
2511 }
2512
2513 dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
2514 sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
2515 "about to be BOUND." : "UNBOUND.");
2516
2517 return NOTIFY_OK;
2518 }
2519
scmi_device_request_notifier(struct notifier_block * nb,unsigned long action,void * data)2520 static int scmi_device_request_notifier(struct notifier_block *nb,
2521 unsigned long action, void *data)
2522 {
2523 struct device_node *np;
2524 struct scmi_device_id *id_table = data;
2525 struct scmi_info *info = req_nb_to_scmi_info(nb);
2526
2527 np = idr_find(&info->active_protocols, id_table->protocol_id);
2528 if (!np)
2529 return NOTIFY_DONE;
2530
2531 dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
2532 action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
2533 id_table->name, id_table->protocol_id);
2534
2535 switch (action) {
2536 case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
2537 scmi_create_protocol_devices(np, info, id_table->protocol_id,
2538 id_table->name);
2539 break;
2540 case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
2541 scmi_destroy_protocol_devices(info, id_table->protocol_id,
2542 id_table->name);
2543 break;
2544 default:
2545 return NOTIFY_DONE;
2546 }
2547
2548 return NOTIFY_OK;
2549 }
2550
scmi_debugfs_common_cleanup(void * d)2551 static void scmi_debugfs_common_cleanup(void *d)
2552 {
2553 struct scmi_debug_info *dbg = d;
2554
2555 if (!dbg)
2556 return;
2557
2558 debugfs_remove_recursive(dbg->top_dentry);
2559 kfree(dbg->name);
2560 kfree(dbg->type);
2561 }
2562
scmi_debugfs_common_setup(struct scmi_info * info)2563 static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
2564 {
2565 char top_dir[16];
2566 struct dentry *trans, *top_dentry;
2567 struct scmi_debug_info *dbg;
2568 const char *c_ptr = NULL;
2569
2570 dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
2571 if (!dbg)
2572 return NULL;
2573
2574 dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
2575 if (!dbg->name) {
2576 devm_kfree(info->dev, dbg);
2577 return NULL;
2578 }
2579
2580 of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
2581 dbg->type = kstrdup(c_ptr, GFP_KERNEL);
2582 if (!dbg->type) {
2583 kfree(dbg->name);
2584 devm_kfree(info->dev, dbg);
2585 return NULL;
2586 }
2587
2588 snprintf(top_dir, 16, "%d", info->id);
2589 top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
2590 trans = debugfs_create_dir("transport", top_dentry);
2591
2592 dbg->is_atomic = info->desc->atomic_enabled &&
2593 is_transport_polling_capable(info->desc);
2594
2595 debugfs_create_str("instance_name", 0400, top_dentry,
2596 (char **)&dbg->name);
2597
2598 debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
2599 &info->atomic_threshold);
2600
2601 debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
2602
2603 debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
2604
2605 debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
2606 (u32 *)&info->desc->max_rx_timeout_ms);
2607
2608 debugfs_create_u32("max_msg_size", 0400, trans,
2609 (u32 *)&info->desc->max_msg_size);
2610
2611 debugfs_create_u32("tx_max_msg", 0400, trans,
2612 (u32 *)&info->tx_minfo.max_msg);
2613
2614 debugfs_create_u32("rx_max_msg", 0400, trans,
2615 (u32 *)&info->rx_minfo.max_msg);
2616
2617 dbg->top_dentry = top_dentry;
2618
2619 if (devm_add_action_or_reset(info->dev,
2620 scmi_debugfs_common_cleanup, dbg)) {
2621 scmi_debugfs_common_cleanup(dbg);
2622 return NULL;
2623 }
2624
2625 return dbg;
2626 }
2627
scmi_debugfs_raw_mode_setup(struct scmi_info * info)2628 static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
2629 {
2630 int id, num_chans = 0, ret = 0;
2631 struct scmi_chan_info *cinfo;
2632 u8 channels[SCMI_MAX_CHANNELS] = {};
2633 DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
2634
2635 if (!info->dbg)
2636 return -EINVAL;
2637
2638 /* Enumerate all channels to collect their ids */
2639 idr_for_each_entry(&info->tx_idr, cinfo, id) {
2640 /*
2641 * Cannot happen, but be defensive.
2642 * Zero as num_chans is ok, warn and carry on.
2643 */
2644 if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
2645 dev_warn(info->dev,
2646 "SCMI RAW - Error enumerating channels\n");
2647 break;
2648 }
2649
2650 if (!test_bit(cinfo->id, protos)) {
2651 channels[num_chans++] = cinfo->id;
2652 set_bit(cinfo->id, protos);
2653 }
2654 }
2655
2656 info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
2657 info->id, channels, num_chans,
2658 info->desc, info->tx_minfo.max_msg);
2659 if (IS_ERR(info->raw)) {
2660 dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
2661 ret = PTR_ERR(info->raw);
2662 info->raw = NULL;
2663 }
2664
2665 return ret;
2666 }
2667
scmi_probe(struct platform_device * pdev)2668 static int scmi_probe(struct platform_device *pdev)
2669 {
2670 int ret;
2671 struct scmi_handle *handle;
2672 const struct scmi_desc *desc;
2673 struct scmi_info *info;
2674 bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
2675 struct device *dev = &pdev->dev;
2676 struct device_node *child, *np = dev->of_node;
2677
2678 desc = of_device_get_match_data(dev);
2679 if (!desc)
2680 return -EINVAL;
2681
2682 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
2683 if (!info)
2684 return -ENOMEM;
2685
2686 info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
2687 if (info->id < 0)
2688 return info->id;
2689
2690 info->dev = dev;
2691 info->desc = desc;
2692 info->bus_nb.notifier_call = scmi_bus_notifier;
2693 info->dev_req_nb.notifier_call = scmi_device_request_notifier;
2694 INIT_LIST_HEAD(&info->node);
2695 idr_init(&info->protocols);
2696 mutex_init(&info->protocols_mtx);
2697 idr_init(&info->active_protocols);
2698 mutex_init(&info->devreq_mtx);
2699
2700 platform_set_drvdata(pdev, info);
2701 idr_init(&info->tx_idr);
2702 idr_init(&info->rx_idr);
2703
2704 handle = &info->handle;
2705 handle->dev = info->dev;
2706 handle->version = &info->version;
2707 handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
2708 handle->devm_protocol_get = scmi_devm_protocol_get;
2709 handle->devm_protocol_put = scmi_devm_protocol_put;
2710
2711 /* System wide atomic threshold for atomic ops .. if any */
2712 if (!of_property_read_u32(np, "atomic-threshold-us",
2713 &info->atomic_threshold))
2714 dev_info(dev,
2715 "SCMI System wide atomic threshold set to %d us\n",
2716 info->atomic_threshold);
2717 handle->is_transport_atomic = scmi_is_transport_atomic;
2718
2719 if (desc->ops->link_supplier) {
2720 ret = desc->ops->link_supplier(dev);
2721 if (ret)
2722 goto clear_ida;
2723 }
2724
2725 /* Setup all channels described in the DT at first */
2726 ret = scmi_channels_setup(info);
2727 if (ret)
2728 goto clear_ida;
2729
2730 ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
2731 if (ret)
2732 goto clear_txrx_setup;
2733
2734 ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
2735 &info->dev_req_nb);
2736 if (ret)
2737 goto clear_bus_notifier;
2738
2739 ret = scmi_xfer_info_init(info);
2740 if (ret)
2741 goto clear_dev_req_notifier;
2742
2743 if (scmi_top_dentry) {
2744 info->dbg = scmi_debugfs_common_setup(info);
2745 if (!info->dbg)
2746 dev_warn(dev, "Failed to setup SCMI debugfs.\n");
2747
2748 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
2749 ret = scmi_debugfs_raw_mode_setup(info);
2750 if (!coex) {
2751 if (ret)
2752 goto clear_dev_req_notifier;
2753
2754 /* Bail out anyway when coex disabled. */
2755 return 0;
2756 }
2757
2758 /* Coex enabled, carry on in any case. */
2759 dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
2760 }
2761 }
2762
2763 if (scmi_notification_init(handle))
2764 dev_err(dev, "SCMI Notifications NOT available.\n");
2765
2766 if (info->desc->atomic_enabled &&
2767 !is_transport_polling_capable(info->desc))
2768 dev_err(dev,
2769 "Transport is not polling capable. Atomic mode not supported.\n");
2770
2771 /*
2772 * Trigger SCMI Base protocol initialization.
2773 * It's mandatory and won't be ever released/deinit until the
2774 * SCMI stack is shutdown/unloaded as a whole.
2775 */
2776 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
2777 if (ret) {
2778 dev_err(dev, "unable to communicate with SCMI\n");
2779 if (coex)
2780 return 0;
2781 goto notification_exit;
2782 }
2783
2784 mutex_lock(&scmi_list_mutex);
2785 list_add_tail(&info->node, &scmi_list);
2786 mutex_unlock(&scmi_list_mutex);
2787
2788 for_each_available_child_of_node(np, child) {
2789 u32 prot_id;
2790
2791 if (of_property_read_u32(child, "reg", &prot_id))
2792 continue;
2793
2794 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2795 dev_err(dev, "Out of range protocol %d\n", prot_id);
2796
2797 if (!scmi_is_protocol_implemented(handle, prot_id)) {
2798 dev_err(dev, "SCMI protocol %d not implemented\n",
2799 prot_id);
2800 continue;
2801 }
2802
2803 /*
2804 * Save this valid DT protocol descriptor amongst
2805 * @active_protocols for this SCMI instance/
2806 */
2807 ret = idr_alloc(&info->active_protocols, child,
2808 prot_id, prot_id + 1, GFP_KERNEL);
2809 if (ret != prot_id) {
2810 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
2811 prot_id);
2812 continue;
2813 }
2814
2815 of_node_get(child);
2816 scmi_create_protocol_devices(child, info, prot_id, NULL);
2817 }
2818
2819 return 0;
2820
2821 notification_exit:
2822 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
2823 scmi_raw_mode_cleanup(info->raw);
2824 scmi_notification_exit(&info->handle);
2825 clear_dev_req_notifier:
2826 blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
2827 &info->dev_req_nb);
2828 clear_bus_notifier:
2829 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
2830 clear_txrx_setup:
2831 scmi_cleanup_txrx_channels(info);
2832 clear_ida:
2833 ida_free(&scmi_id, info->id);
2834 return ret;
2835 }
2836
scmi_remove(struct platform_device * pdev)2837 static void scmi_remove(struct platform_device *pdev)
2838 {
2839 int id;
2840 struct scmi_info *info = platform_get_drvdata(pdev);
2841 struct device_node *child;
2842
2843 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
2844 scmi_raw_mode_cleanup(info->raw);
2845
2846 mutex_lock(&scmi_list_mutex);
2847 if (info->users)
2848 dev_warn(&pdev->dev,
2849 "Still active SCMI users will be forcibly unbound.\n");
2850 list_del(&info->node);
2851 mutex_unlock(&scmi_list_mutex);
2852
2853 scmi_notification_exit(&info->handle);
2854
2855 mutex_lock(&info->protocols_mtx);
2856 idr_destroy(&info->protocols);
2857 mutex_unlock(&info->protocols_mtx);
2858
2859 idr_for_each_entry(&info->active_protocols, child, id)
2860 of_node_put(child);
2861 idr_destroy(&info->active_protocols);
2862
2863 blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
2864 &info->dev_req_nb);
2865 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
2866
2867 /* Safe to free channels since no more users */
2868 scmi_cleanup_txrx_channels(info);
2869
2870 ida_free(&scmi_id, info->id);
2871 }
2872
protocol_version_show(struct device * dev,struct device_attribute * attr,char * buf)2873 static ssize_t protocol_version_show(struct device *dev,
2874 struct device_attribute *attr, char *buf)
2875 {
2876 struct scmi_info *info = dev_get_drvdata(dev);
2877
2878 return sprintf(buf, "%u.%u\n", info->version.major_ver,
2879 info->version.minor_ver);
2880 }
2881 static DEVICE_ATTR_RO(protocol_version);
2882
firmware_version_show(struct device * dev,struct device_attribute * attr,char * buf)2883 static ssize_t firmware_version_show(struct device *dev,
2884 struct device_attribute *attr, char *buf)
2885 {
2886 struct scmi_info *info = dev_get_drvdata(dev);
2887
2888 return sprintf(buf, "0x%x\n", info->version.impl_ver);
2889 }
2890 static DEVICE_ATTR_RO(firmware_version);
2891
vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)2892 static ssize_t vendor_id_show(struct device *dev,
2893 struct device_attribute *attr, char *buf)
2894 {
2895 struct scmi_info *info = dev_get_drvdata(dev);
2896
2897 return sprintf(buf, "%s\n", info->version.vendor_id);
2898 }
2899 static DEVICE_ATTR_RO(vendor_id);
2900
sub_vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)2901 static ssize_t sub_vendor_id_show(struct device *dev,
2902 struct device_attribute *attr, char *buf)
2903 {
2904 struct scmi_info *info = dev_get_drvdata(dev);
2905
2906 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
2907 }
2908 static DEVICE_ATTR_RO(sub_vendor_id);
2909
2910 static struct attribute *versions_attrs[] = {
2911 &dev_attr_firmware_version.attr,
2912 &dev_attr_protocol_version.attr,
2913 &dev_attr_vendor_id.attr,
2914 &dev_attr_sub_vendor_id.attr,
2915 NULL,
2916 };
2917 ATTRIBUTE_GROUPS(versions);
2918
2919 /* Each compatible listed below must have descriptor associated with it */
2920 static const struct of_device_id scmi_of_match[] = {
2921 #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
2922 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
2923 #endif
2924 #ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
2925 { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2926 #endif
2927 #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
2928 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2929 { .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
2930 { .compatible = "qcom,scmi-smc", .data = &scmi_smc_desc},
2931 #endif
2932 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
2933 { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
2934 #endif
2935 { /* Sentinel */ },
2936 };
2937
2938 MODULE_DEVICE_TABLE(of, scmi_of_match);
2939
2940 static struct platform_driver scmi_driver = {
2941 .driver = {
2942 .name = "arm-scmi",
2943 .suppress_bind_attrs = true,
2944 .of_match_table = scmi_of_match,
2945 .dev_groups = versions_groups,
2946 },
2947 .probe = scmi_probe,
2948 .remove_new = scmi_remove,
2949 };
2950
2951 /**
2952 * __scmi_transports_setup - Common helper to call transport-specific
2953 * .init/.exit code if provided.
2954 *
2955 * @init: A flag to distinguish between init and exit.
2956 *
2957 * Note that, if provided, we invoke .init/.exit functions for all the
2958 * transports currently compiled in.
2959 *
2960 * Return: 0 on Success.
2961 */
__scmi_transports_setup(bool init)2962 static inline int __scmi_transports_setup(bool init)
2963 {
2964 int ret = 0;
2965 const struct of_device_id *trans;
2966
2967 for (trans = scmi_of_match; trans->data; trans++) {
2968 const struct scmi_desc *tdesc = trans->data;
2969
2970 if ((init && !tdesc->transport_init) ||
2971 (!init && !tdesc->transport_exit))
2972 continue;
2973
2974 if (init)
2975 ret = tdesc->transport_init();
2976 else
2977 tdesc->transport_exit();
2978
2979 if (ret) {
2980 pr_err("SCMI transport %s FAILED initialization!\n",
2981 trans->compatible);
2982 break;
2983 }
2984 }
2985
2986 return ret;
2987 }
2988
scmi_transports_init(void)2989 static int __init scmi_transports_init(void)
2990 {
2991 return __scmi_transports_setup(true);
2992 }
2993
scmi_transports_exit(void)2994 static void __exit scmi_transports_exit(void)
2995 {
2996 __scmi_transports_setup(false);
2997 }
2998
scmi_debugfs_init(void)2999 static struct dentry *scmi_debugfs_init(void)
3000 {
3001 struct dentry *d;
3002
3003 d = debugfs_create_dir("scmi", NULL);
3004 if (IS_ERR(d)) {
3005 pr_err("Could NOT create SCMI top dentry.\n");
3006 return NULL;
3007 }
3008
3009 return d;
3010 }
3011
scmi_driver_init(void)3012 static int __init scmi_driver_init(void)
3013 {
3014 int ret;
3015
3016 /* Bail out if no SCMI transport was configured */
3017 if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
3018 return -EINVAL;
3019
3020 /* Initialize any compiled-in transport which provided an init/exit */
3021 ret = scmi_transports_init();
3022 if (ret)
3023 return ret;
3024
3025 if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
3026 scmi_top_dentry = scmi_debugfs_init();
3027
3028 scmi_base_register();
3029
3030 scmi_clock_register();
3031 scmi_perf_register();
3032 scmi_power_register();
3033 scmi_reset_register();
3034 scmi_sensors_register();
3035 scmi_voltage_register();
3036 scmi_system_register();
3037 scmi_powercap_register();
3038
3039 return platform_driver_register(&scmi_driver);
3040 }
3041 module_init(scmi_driver_init);
3042
scmi_driver_exit(void)3043 static void __exit scmi_driver_exit(void)
3044 {
3045 scmi_base_unregister();
3046
3047 scmi_clock_unregister();
3048 scmi_perf_unregister();
3049 scmi_power_unregister();
3050 scmi_reset_unregister();
3051 scmi_sensors_unregister();
3052 scmi_voltage_unregister();
3053 scmi_system_unregister();
3054 scmi_powercap_unregister();
3055
3056 scmi_transports_exit();
3057
3058 platform_driver_unregister(&scmi_driver);
3059
3060 debugfs_remove_recursive(scmi_top_dentry);
3061 }
3062 module_exit(scmi_driver_exit);
3063
3064 MODULE_ALIAS("platform:arm-scmi");
3065 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
3066 MODULE_DESCRIPTION("ARM SCMI protocol driver");
3067 MODULE_LICENSE("GPL v2");
3068