1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Trace Module (STM) infrastructure
4 * Copyright (c) 2014, Intel Corporation.
5 *
6 * STM class implements generic infrastructure for System Trace Module devices
7 * as defined in MIPI STPv2 specification.
8 */
9
10 #include <linux/pm_runtime.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/compat.h>
16 #include <linux/kdev_t.h>
17 #include <linux/srcu.h>
18 #include <linux/slab.h>
19 #include <linux/stm.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/vmalloc.h>
23 #include "stm.h"
24
25 #include <uapi/linux/stm.h>
26
27 static unsigned int stm_core_up;
28
29 /*
30 * The SRCU here makes sure that STM device doesn't disappear from under a
31 * stm_source_write() caller, which may want to have as little overhead as
32 * possible.
33 */
34 static struct srcu_struct stm_source_srcu;
35
masters_show(struct device * dev,struct device_attribute * attr,char * buf)36 static ssize_t masters_show(struct device *dev,
37 struct device_attribute *attr,
38 char *buf)
39 {
40 struct stm_device *stm = to_stm_device(dev);
41 int ret;
42
43 ret = sprintf(buf, "%u %u\n", stm->data->sw_start, stm->data->sw_end);
44
45 return ret;
46 }
47
48 static DEVICE_ATTR_RO(masters);
49
channels_show(struct device * dev,struct device_attribute * attr,char * buf)50 static ssize_t channels_show(struct device *dev,
51 struct device_attribute *attr,
52 char *buf)
53 {
54 struct stm_device *stm = to_stm_device(dev);
55 int ret;
56
57 ret = sprintf(buf, "%u\n", stm->data->sw_nchannels);
58
59 return ret;
60 }
61
62 static DEVICE_ATTR_RO(channels);
63
hw_override_show(struct device * dev,struct device_attribute * attr,char * buf)64 static ssize_t hw_override_show(struct device *dev,
65 struct device_attribute *attr,
66 char *buf)
67 {
68 struct stm_device *stm = to_stm_device(dev);
69 int ret;
70
71 ret = sprintf(buf, "%u\n", stm->data->hw_override);
72
73 return ret;
74 }
75
76 static DEVICE_ATTR_RO(hw_override);
77
78 static struct attribute *stm_attrs[] = {
79 &dev_attr_masters.attr,
80 &dev_attr_channels.attr,
81 &dev_attr_hw_override.attr,
82 NULL,
83 };
84
85 ATTRIBUTE_GROUPS(stm);
86
87 static struct class stm_class = {
88 .name = "stm",
89 .dev_groups = stm_groups,
90 };
91
92 /**
93 * stm_find_device() - find stm device by name
94 * @buf: character buffer containing the name
95 *
96 * This is called when either policy gets assigned to an stm device or an
97 * stm_source device gets linked to an stm device.
98 *
99 * This grabs device's reference (get_device()) and module reference, both
100 * of which the calling path needs to make sure to drop with stm_put_device().
101 *
102 * Return: stm device pointer or null if lookup failed.
103 */
stm_find_device(const char * buf)104 struct stm_device *stm_find_device(const char *buf)
105 {
106 struct stm_device *stm;
107 struct device *dev;
108
109 if (!stm_core_up)
110 return NULL;
111
112 dev = class_find_device_by_name(&stm_class, buf);
113 if (!dev)
114 return NULL;
115
116 stm = to_stm_device(dev);
117 if (!try_module_get(stm->owner)) {
118 /* matches class_find_device() above */
119 put_device(dev);
120 return NULL;
121 }
122
123 return stm;
124 }
125
126 /**
127 * stm_put_device() - drop references on the stm device
128 * @stm: stm device, previously acquired by stm_find_device()
129 *
130 * This drops the module reference and device reference taken by
131 * stm_find_device() or stm_char_open().
132 */
stm_put_device(struct stm_device * stm)133 void stm_put_device(struct stm_device *stm)
134 {
135 module_put(stm->owner);
136 put_device(&stm->dev);
137 }
138
139 /*
140 * Internally we only care about software-writable masters here, that is the
141 * ones in the range [stm_data->sw_start..stm_data..sw_end], however we need
142 * original master numbers to be visible externally, since they are the ones
143 * that will appear in the STP stream. Thus, the internal bookkeeping uses
144 * $master - stm_data->sw_start to reference master descriptors and such.
145 */
146
147 #define __stm_master(_s, _m) \
148 ((_s)->masters[(_m) - (_s)->data->sw_start])
149
150 static inline struct stp_master *
stm_master(struct stm_device * stm,unsigned int idx)151 stm_master(struct stm_device *stm, unsigned int idx)
152 {
153 if (idx < stm->data->sw_start || idx > stm->data->sw_end)
154 return NULL;
155
156 return __stm_master(stm, idx);
157 }
158
stp_master_alloc(struct stm_device * stm,unsigned int idx)159 static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
160 {
161 struct stp_master *master;
162
163 master = kzalloc_flex(*master, chan_map,
164 BITS_TO_LONGS(stm->data->sw_nchannels),
165 GFP_ATOMIC);
166 if (!master)
167 return -ENOMEM;
168
169 master->nr_free = stm->data->sw_nchannels;
170 __stm_master(stm, idx) = master;
171
172 return 0;
173 }
174
stp_master_free(struct stm_device * stm,unsigned int idx)175 static void stp_master_free(struct stm_device *stm, unsigned int idx)
176 {
177 struct stp_master *master = stm_master(stm, idx);
178
179 if (!master)
180 return;
181
182 __stm_master(stm, idx) = NULL;
183 kfree(master);
184 }
185
stm_output_claim(struct stm_device * stm,struct stm_output * output)186 static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
187 {
188 struct stp_master *master = stm_master(stm, output->master);
189
190 lockdep_assert_held(&stm->mc_lock);
191 lockdep_assert_held(&output->lock);
192
193 if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
194 return;
195
196 bitmap_allocate_region(&master->chan_map[0], output->channel,
197 ilog2(output->nr_chans));
198
199 master->nr_free -= output->nr_chans;
200 }
201
202 static void
stm_output_disclaim(struct stm_device * stm,struct stm_output * output)203 stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
204 {
205 struct stp_master *master = stm_master(stm, output->master);
206
207 lockdep_assert_held(&stm->mc_lock);
208 lockdep_assert_held(&output->lock);
209
210 bitmap_release_region(&master->chan_map[0], output->channel,
211 ilog2(output->nr_chans));
212
213 master->nr_free += output->nr_chans;
214 output->nr_chans = 0;
215 }
216
217 /*
218 * This is like bitmap_find_free_region(), except it can ignore @start bits
219 * at the beginning.
220 */
find_free_channels(unsigned long * bitmap,unsigned int start,unsigned int end,unsigned int width)221 static int find_free_channels(unsigned long *bitmap, unsigned int start,
222 unsigned int end, unsigned int width)
223 {
224 unsigned int pos;
225 int i;
226
227 for (pos = start; pos < end + 1; pos = ALIGN(pos, width)) {
228 pos = find_next_zero_bit(bitmap, end + 1, pos);
229 if (pos + width > end + 1)
230 break;
231
232 if (pos & (width - 1))
233 continue;
234
235 for (i = 1; i < width && !test_bit(pos + i, bitmap); i++)
236 ;
237 if (i == width)
238 return pos;
239
240 /* step over [pos..pos+i) to continue search */
241 pos += i;
242 }
243
244 return -1;
245 }
246
247 static int
stm_find_master_chan(struct stm_device * stm,unsigned int width,unsigned int * mstart,unsigned int mend,unsigned int * cstart,unsigned int cend)248 stm_find_master_chan(struct stm_device *stm, unsigned int width,
249 unsigned int *mstart, unsigned int mend,
250 unsigned int *cstart, unsigned int cend)
251 {
252 struct stp_master *master;
253 unsigned int midx;
254 int pos, err;
255
256 for (midx = *mstart; midx <= mend; midx++) {
257 if (!stm_master(stm, midx)) {
258 err = stp_master_alloc(stm, midx);
259 if (err)
260 return err;
261 }
262
263 master = stm_master(stm, midx);
264
265 if (!master->nr_free)
266 continue;
267
268 pos = find_free_channels(master->chan_map, *cstart, cend,
269 width);
270 if (pos < 0)
271 continue;
272
273 *mstart = midx;
274 *cstart = pos;
275 return 0;
276 }
277
278 return -ENOSPC;
279 }
280
stm_output_assign(struct stm_device * stm,unsigned int width,struct stp_policy_node * policy_node,struct stm_output * output)281 static int stm_output_assign(struct stm_device *stm, unsigned int width,
282 struct stp_policy_node *policy_node,
283 struct stm_output *output)
284 {
285 unsigned int midx, cidx, mend, cend;
286 int ret = -EINVAL;
287
288 if (width > stm->data->sw_nchannels)
289 return -EINVAL;
290
291 /* We no longer accept policy_node==NULL here */
292 if (WARN_ON_ONCE(!policy_node))
293 return -EINVAL;
294
295 /*
296 * Also, the caller holds reference to policy_node, so it won't
297 * disappear on us.
298 */
299 stp_policy_node_get_ranges(policy_node, &midx, &mend, &cidx, &cend);
300
301 spin_lock(&stm->mc_lock);
302 spin_lock(&output->lock);
303 /* output is already assigned -- shouldn't happen */
304 if (WARN_ON_ONCE(output->nr_chans))
305 goto unlock;
306
307 ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend);
308 if (ret < 0)
309 goto unlock;
310
311 output->master = midx;
312 output->channel = cidx;
313 output->nr_chans = width;
314 if (stm->pdrv->output_open) {
315 void *priv = stp_policy_node_priv(policy_node);
316
317 if (WARN_ON_ONCE(!priv))
318 goto unlock;
319
320 /* configfs subsys mutex is held by the caller */
321 ret = stm->pdrv->output_open(priv, output);
322 if (ret)
323 goto unlock;
324 }
325
326 stm_output_claim(stm, output);
327 dev_dbg(&stm->dev, "assigned %u:%u (+%u)\n", midx, cidx, width);
328
329 ret = 0;
330 unlock:
331 if (ret)
332 output->nr_chans = 0;
333
334 spin_unlock(&output->lock);
335 spin_unlock(&stm->mc_lock);
336
337 return ret;
338 }
339
stm_output_free(struct stm_device * stm,struct stm_output * output)340 static void stm_output_free(struct stm_device *stm, struct stm_output *output)
341 {
342 spin_lock(&stm->mc_lock);
343 spin_lock(&output->lock);
344 if (output->nr_chans)
345 stm_output_disclaim(stm, output);
346 if (stm->pdrv && stm->pdrv->output_close)
347 stm->pdrv->output_close(output);
348 spin_unlock(&output->lock);
349 spin_unlock(&stm->mc_lock);
350 }
351
stm_output_init(struct stm_output * output)352 static void stm_output_init(struct stm_output *output)
353 {
354 spin_lock_init(&output->lock);
355 }
356
major_match(struct device * dev,const void * data)357 static int major_match(struct device *dev, const void *data)
358 {
359 unsigned int major = *(unsigned int *)data;
360
361 return MAJOR(dev->devt) == major;
362 }
363
364 /*
365 * Framing protocol management
366 * Modules can implement STM protocol drivers and (un-)register them
367 * with the STM class framework.
368 */
369 static struct list_head stm_pdrv_head;
370 static struct mutex stm_pdrv_mutex;
371
372 struct stm_pdrv_entry {
373 struct list_head entry;
374 const struct stm_protocol_driver *pdrv;
375 const struct config_item_type *node_type;
376 };
377
378 static const struct stm_pdrv_entry *
__stm_lookup_protocol(const char * name)379 __stm_lookup_protocol(const char *name)
380 {
381 struct stm_pdrv_entry *pe;
382
383 /*
384 * If no name is given (NULL or ""), fall back to "p_basic".
385 */
386 if (!name || !*name)
387 name = "p_basic";
388
389 list_for_each_entry(pe, &stm_pdrv_head, entry) {
390 if (!strcmp(name, pe->pdrv->name))
391 return pe;
392 }
393
394 return NULL;
395 }
396
stm_register_protocol(const struct stm_protocol_driver * pdrv)397 int stm_register_protocol(const struct stm_protocol_driver *pdrv)
398 {
399 struct stm_pdrv_entry *pe = NULL;
400 int ret = -ENOMEM;
401
402 mutex_lock(&stm_pdrv_mutex);
403
404 if (__stm_lookup_protocol(pdrv->name)) {
405 ret = -EEXIST;
406 goto unlock;
407 }
408
409 pe = kzalloc_obj(*pe);
410 if (!pe)
411 goto unlock;
412
413 if (pdrv->policy_attr) {
414 pe->node_type = get_policy_node_type(pdrv->policy_attr);
415 if (!pe->node_type)
416 goto unlock;
417 }
418
419 list_add_tail(&pe->entry, &stm_pdrv_head);
420 pe->pdrv = pdrv;
421
422 ret = 0;
423 unlock:
424 mutex_unlock(&stm_pdrv_mutex);
425
426 if (ret)
427 kfree(pe);
428
429 return ret;
430 }
431 EXPORT_SYMBOL_GPL(stm_register_protocol);
432
stm_unregister_protocol(const struct stm_protocol_driver * pdrv)433 void stm_unregister_protocol(const struct stm_protocol_driver *pdrv)
434 {
435 struct stm_pdrv_entry *pe, *iter;
436
437 mutex_lock(&stm_pdrv_mutex);
438
439 list_for_each_entry_safe(pe, iter, &stm_pdrv_head, entry) {
440 if (pe->pdrv == pdrv) {
441 list_del(&pe->entry);
442
443 if (pe->node_type) {
444 kfree(pe->node_type->ct_attrs);
445 kfree(pe->node_type);
446 }
447 kfree(pe);
448 break;
449 }
450 }
451
452 mutex_unlock(&stm_pdrv_mutex);
453 }
454 EXPORT_SYMBOL_GPL(stm_unregister_protocol);
455
stm_get_protocol(const struct stm_protocol_driver * pdrv)456 static bool stm_get_protocol(const struct stm_protocol_driver *pdrv)
457 {
458 return try_module_get(pdrv->owner);
459 }
460
stm_put_protocol(const struct stm_protocol_driver * pdrv)461 void stm_put_protocol(const struct stm_protocol_driver *pdrv)
462 {
463 module_put(pdrv->owner);
464 }
465
stm_lookup_protocol(const char * name,const struct stm_protocol_driver ** pdrv,const struct config_item_type ** node_type)466 int stm_lookup_protocol(const char *name,
467 const struct stm_protocol_driver **pdrv,
468 const struct config_item_type **node_type)
469 {
470 const struct stm_pdrv_entry *pe;
471
472 mutex_lock(&stm_pdrv_mutex);
473
474 pe = __stm_lookup_protocol(name);
475 if (pe && pe->pdrv && stm_get_protocol(pe->pdrv)) {
476 *pdrv = pe->pdrv;
477 *node_type = pe->node_type;
478 }
479
480 mutex_unlock(&stm_pdrv_mutex);
481
482 return pe ? 0 : -ENOENT;
483 }
484
stm_char_open(struct inode * inode,struct file * file)485 static int stm_char_open(struct inode *inode, struct file *file)
486 {
487 struct stm_file *stmf;
488 struct device *dev;
489 unsigned int major = imajor(inode);
490 int err = -ENOMEM;
491
492 dev = class_find_device(&stm_class, NULL, &major, major_match);
493 if (!dev)
494 return -ENODEV;
495
496 stmf = kzalloc_obj(*stmf);
497 if (!stmf)
498 goto err_put_device;
499
500 err = -ENODEV;
501 stm_output_init(&stmf->output);
502 stmf->stm = to_stm_device(dev);
503
504 if (!try_module_get(stmf->stm->owner))
505 goto err_free;
506
507 file->private_data = stmf;
508
509 return nonseekable_open(inode, file);
510
511 err_free:
512 kfree(stmf);
513 err_put_device:
514 /* matches class_find_device() above */
515 put_device(dev);
516
517 return err;
518 }
519
stm_char_release(struct inode * inode,struct file * file)520 static int stm_char_release(struct inode *inode, struct file *file)
521 {
522 struct stm_file *stmf = file->private_data;
523 struct stm_device *stm = stmf->stm;
524
525 if (stm->data->unlink)
526 stm->data->unlink(stm->data, stmf->output.master,
527 stmf->output.channel);
528
529 stm_output_free(stm, &stmf->output);
530
531 /*
532 * matches the stm_char_open()'s
533 * class_find_device() + try_module_get()
534 */
535 stm_put_device(stm);
536 kfree(stmf);
537
538 return 0;
539 }
540
541 static int
stm_assign_first_policy(struct stm_device * stm,struct stm_output * output,char ** ids,unsigned int width)542 stm_assign_first_policy(struct stm_device *stm, struct stm_output *output,
543 char **ids, unsigned int width)
544 {
545 struct stp_policy_node *pn;
546 int err, n;
547
548 /*
549 * On success, stp_policy_node_lookup() will return holding the
550 * configfs subsystem mutex, which is then released in
551 * stp_policy_node_put(). This allows the pdrv->output_open() in
552 * stm_output_assign() to serialize against the attribute accessors.
553 */
554 for (n = 0, pn = NULL; ids[n] && !pn; n++)
555 pn = stp_policy_node_lookup(stm, ids[n]);
556
557 if (!pn)
558 return -EINVAL;
559
560 err = stm_output_assign(stm, width, pn, output);
561
562 stp_policy_node_put(pn);
563
564 return err;
565 }
566
567 /**
568 * stm_data_write() - send the given payload as data packets
569 * @data: stm driver's data
570 * @m: STP master
571 * @c: STP channel
572 * @ts_first: timestamp the first packet
573 * @buf: data payload buffer
574 * @count: data payload size
575 */
stm_data_write(struct stm_data * data,unsigned int m,unsigned int c,bool ts_first,const void * buf,size_t count)576 ssize_t notrace stm_data_write(struct stm_data *data, unsigned int m,
577 unsigned int c, bool ts_first, const void *buf,
578 size_t count)
579 {
580 unsigned int flags = ts_first ? STP_PACKET_TIMESTAMPED : 0;
581 ssize_t sz;
582 size_t pos;
583
584 for (pos = 0, sz = 0; pos < count; pos += sz) {
585 sz = min_t(unsigned int, count - pos, 8);
586 sz = data->packet(data, m, c, STP_PACKET_DATA, flags, sz,
587 &((u8 *)buf)[pos]);
588 if (sz <= 0)
589 break;
590
591 if (ts_first) {
592 flags = 0;
593 ts_first = false;
594 }
595 }
596
597 return sz < 0 ? sz : pos;
598 }
599 EXPORT_SYMBOL_GPL(stm_data_write);
600
601 static ssize_t notrace
stm_write(struct stm_device * stm,struct stm_output * output,unsigned int chan,const char * buf,size_t count,struct stm_source_data * source)602 stm_write(struct stm_device *stm, struct stm_output *output,
603 unsigned int chan, const char *buf, size_t count, struct stm_source_data *source)
604 {
605 int err;
606
607 /* stm->pdrv is serialized against policy_mutex */
608 if (!stm->pdrv)
609 return -ENODEV;
610
611 err = stm->pdrv->write(stm->data, output, chan, buf, count, source);
612 if (err < 0)
613 return err;
614
615 return err;
616 }
617
stm_char_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)618 static ssize_t stm_char_write(struct file *file, const char __user *buf,
619 size_t count, loff_t *ppos)
620 {
621 struct stm_file *stmf = file->private_data;
622 struct stm_device *stm = stmf->stm;
623 char *kbuf;
624 int err;
625
626 if (count + 1 > PAGE_SIZE)
627 count = PAGE_SIZE - 1;
628
629 /*
630 * If no m/c have been assigned to this writer up to this
631 * point, try to use the task name and "default" policy entries.
632 */
633 if (!stmf->output.nr_chans) {
634 char comm[sizeof(current->comm)];
635 char *ids[] = { comm, "default", NULL };
636
637 get_task_comm(comm, current);
638
639 err = stm_assign_first_policy(stmf->stm, &stmf->output, ids, 1);
640 /*
641 * EBUSY means that somebody else just assigned this
642 * output, which is just fine for write()
643 */
644 if (err)
645 return err;
646 }
647
648 kbuf = kmalloc(count + 1, GFP_KERNEL);
649 if (!kbuf)
650 return -ENOMEM;
651
652 err = copy_from_user(kbuf, buf, count);
653 if (err) {
654 kfree(kbuf);
655 return -EFAULT;
656 }
657
658 pm_runtime_get_sync(&stm->dev);
659
660 count = stm_write(stm, &stmf->output, 0, kbuf, count, NULL);
661
662 pm_runtime_mark_last_busy(&stm->dev);
663 pm_runtime_put_autosuspend(&stm->dev);
664 kfree(kbuf);
665
666 return count;
667 }
668
stm_mmap_mapped(unsigned long start,unsigned long end,pgoff_t pgoff,const struct file * file,void ** vm_private_data)669 static int stm_mmap_mapped(unsigned long start, unsigned long end, pgoff_t pgoff,
670 const struct file *file, void **vm_private_data)
671 {
672 struct stm_file *stmf = file->private_data;
673 struct stm_device *stm = stmf->stm;
674
675 pm_runtime_get_sync(&stm->dev);
676 return 0;
677 }
678
stm_mmap_open(struct vm_area_struct * vma)679 static void stm_mmap_open(struct vm_area_struct *vma)
680 {
681 struct stm_file *stmf = vma->vm_file->private_data;
682 struct stm_device *stm = stmf->stm;
683
684 pm_runtime_get(&stm->dev);
685 }
686
stm_mmap_close(struct vm_area_struct * vma)687 static void stm_mmap_close(struct vm_area_struct *vma)
688 {
689 struct stm_file *stmf = vma->vm_file->private_data;
690 struct stm_device *stm = stmf->stm;
691
692 pm_runtime_mark_last_busy(&stm->dev);
693 pm_runtime_put_autosuspend(&stm->dev);
694 }
695
696 static const struct vm_operations_struct stm_mmap_vmops = {
697 .mapped = stm_mmap_mapped,
698 .open = stm_mmap_open,
699 .close = stm_mmap_close,
700 };
701
stm_char_mmap_prepare(struct vm_area_desc * desc)702 static int stm_char_mmap_prepare(struct vm_area_desc *desc)
703 {
704 struct file *file = desc->file;
705 struct stm_file *stmf = file->private_data;
706 struct stm_device *stm = stmf->stm;
707 unsigned long size, phys;
708
709 if (!stm->data->mmio_addr)
710 return -EOPNOTSUPP;
711
712 if (desc->pgoff)
713 return -EINVAL;
714
715 size = vma_desc_size(desc);
716
717 if (stmf->output.nr_chans * stm->data->sw_mmiosz != size)
718 return -EINVAL;
719
720 phys = stm->data->mmio_addr(stm->data, stmf->output.master,
721 stmf->output.channel,
722 stmf->output.nr_chans);
723
724 if (!phys)
725 return -EINVAL;
726
727 desc->page_prot = pgprot_noncached(desc->page_prot);
728 vma_desc_set_flags(desc, VMA_IO_BIT, VMA_DONTEXPAND_BIT,
729 VMA_DONTDUMP_BIT);
730 desc->vm_ops = &stm_mmap_vmops;
731
732 mmap_action_simple_ioremap(desc, phys, size);
733 return 0;
734 }
735
stm_char_policy_set_ioctl(struct stm_file * stmf,void __user * arg)736 static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
737 {
738 struct stm_device *stm = stmf->stm;
739 struct stp_policy_id *id;
740 char *ids[] = { NULL, NULL };
741 int ret = -EINVAL, wlimit = 1;
742 u32 size;
743
744 if (stmf->output.nr_chans)
745 return -EBUSY;
746
747 if (copy_from_user(&size, arg, sizeof(size)))
748 return -EFAULT;
749
750 if (size < sizeof(*id) || size >= PATH_MAX + sizeof(*id))
751 return -EINVAL;
752
753 /*
754 * size + 1 to make sure the .id string at the bottom is terminated,
755 * which is also why memdup_user() is not useful here
756 */
757 id = kzalloc(size + 1, GFP_KERNEL);
758 if (!id)
759 return -ENOMEM;
760
761 if (copy_from_user(id, arg, size)) {
762 ret = -EFAULT;
763 goto err_free;
764 }
765
766 if (id->__reserved_0 || id->__reserved_1)
767 goto err_free;
768
769 if (stm->data->sw_mmiosz)
770 wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
771
772 if (id->width < 1 || id->width > wlimit)
773 goto err_free;
774
775 ids[0] = id->id;
776 ret = stm_assign_first_policy(stmf->stm, &stmf->output, ids,
777 id->width);
778 if (ret)
779 goto err_free;
780
781 if (stm->data->link)
782 ret = stm->data->link(stm->data, stmf->output.master,
783 stmf->output.channel);
784
785 if (ret)
786 stm_output_free(stmf->stm, &stmf->output);
787
788 err_free:
789 kfree(id);
790
791 return ret;
792 }
793
stm_char_policy_get_ioctl(struct stm_file * stmf,void __user * arg)794 static int stm_char_policy_get_ioctl(struct stm_file *stmf, void __user *arg)
795 {
796 struct stp_policy_id id = {
797 .size = sizeof(id),
798 .master = stmf->output.master,
799 .channel = stmf->output.channel,
800 .width = stmf->output.nr_chans,
801 .__reserved_0 = 0,
802 .__reserved_1 = 0,
803 };
804
805 return copy_to_user(arg, &id, id.size) ? -EFAULT : 0;
806 }
807
808 static long
stm_char_ioctl(struct file * file,unsigned int cmd,unsigned long arg)809 stm_char_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
810 {
811 struct stm_file *stmf = file->private_data;
812 struct stm_data *stm_data = stmf->stm->data;
813 int err = -ENOTTY;
814 u64 options;
815
816 switch (cmd) {
817 case STP_POLICY_ID_SET:
818 err = stm_char_policy_set_ioctl(stmf, (void __user *)arg);
819 if (err)
820 return err;
821
822 return stm_char_policy_get_ioctl(stmf, (void __user *)arg);
823
824 case STP_POLICY_ID_GET:
825 return stm_char_policy_get_ioctl(stmf, (void __user *)arg);
826
827 case STP_SET_OPTIONS:
828 if (copy_from_user(&options, (u64 __user *)arg, sizeof(u64)))
829 return -EFAULT;
830
831 if (stm_data->set_options)
832 err = stm_data->set_options(stm_data,
833 stmf->output.master,
834 stmf->output.channel,
835 stmf->output.nr_chans,
836 options);
837
838 break;
839 default:
840 break;
841 }
842
843 return err;
844 }
845
846 static const struct file_operations stm_fops = {
847 .open = stm_char_open,
848 .release = stm_char_release,
849 .write = stm_char_write,
850 .mmap_prepare = stm_char_mmap_prepare,
851 .unlocked_ioctl = stm_char_ioctl,
852 .compat_ioctl = compat_ptr_ioctl,
853 };
854
stm_device_release(struct device * dev)855 static void stm_device_release(struct device *dev)
856 {
857 struct stm_device *stm = to_stm_device(dev);
858
859 vfree(stm);
860 }
861
stm_register_device(struct device * parent,struct stm_data * stm_data,struct module * owner)862 int stm_register_device(struct device *parent, struct stm_data *stm_data,
863 struct module *owner)
864 {
865 struct stm_device *stm;
866 unsigned int nmasters;
867 int err = -ENOMEM;
868
869 if (!stm_core_up)
870 return -EPROBE_DEFER;
871
872 if (!stm_data->packet || !stm_data->sw_nchannels)
873 return -EINVAL;
874
875 nmasters = stm_data->sw_end - stm_data->sw_start + 1;
876 stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
877 if (!stm)
878 return -ENOMEM;
879
880 stm->major = register_chrdev(0, stm_data->name, &stm_fops);
881 if (stm->major < 0) {
882 err = stm->major;
883 vfree(stm);
884 return err;
885 }
886
887 device_initialize(&stm->dev);
888 stm->dev.devt = MKDEV(stm->major, 0);
889 stm->dev.class = &stm_class;
890 stm->dev.parent = parent;
891 stm->dev.release = stm_device_release;
892
893 mutex_init(&stm->link_mutex);
894 spin_lock_init(&stm->link_lock);
895 INIT_LIST_HEAD(&stm->link_list);
896
897 /* initialize the object before it is accessible via sysfs */
898 spin_lock_init(&stm->mc_lock);
899 mutex_init(&stm->policy_mutex);
900 stm->sw_nmasters = nmasters;
901 stm->owner = owner;
902 stm->data = stm_data;
903 stm_data->stm = stm;
904
905 err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
906 if (err)
907 goto err_device;
908
909 err = device_add(&stm->dev);
910 if (err)
911 goto err_device;
912
913 /*
914 * Use delayed autosuspend to avoid bouncing back and forth
915 * on recurring character device writes, with the initial
916 * delay time of 2 seconds.
917 */
918 pm_runtime_no_callbacks(&stm->dev);
919 pm_runtime_use_autosuspend(&stm->dev);
920 pm_runtime_set_autosuspend_delay(&stm->dev, 2000);
921 pm_runtime_set_suspended(&stm->dev);
922 pm_runtime_enable(&stm->dev);
923
924 return 0;
925
926 err_device:
927 unregister_chrdev(stm->major, stm_data->name);
928
929 /* calls stm_device_release() */
930 put_device(&stm->dev);
931
932 return err;
933 }
934 EXPORT_SYMBOL_GPL(stm_register_device);
935
936 static int __stm_source_link_drop(struct stm_source_device *src,
937 struct stm_device *stm);
938
stm_unregister_device(struct stm_data * stm_data)939 void stm_unregister_device(struct stm_data *stm_data)
940 {
941 struct stm_device *stm = stm_data->stm;
942 struct stm_source_device *src, *iter;
943 int i, ret;
944
945 pm_runtime_dont_use_autosuspend(&stm->dev);
946 pm_runtime_disable(&stm->dev);
947
948 mutex_lock(&stm->link_mutex);
949 list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
950 ret = __stm_source_link_drop(src, stm);
951 /*
952 * src <-> stm link must not change under the same
953 * stm::link_mutex, so complain loudly if it has;
954 * also in this situation ret!=0 means this src is
955 * not connected to this stm and it should be otherwise
956 * safe to proceed with the tear-down of stm.
957 */
958 WARN_ON_ONCE(ret);
959 }
960 mutex_unlock(&stm->link_mutex);
961
962 synchronize_srcu(&stm_source_srcu);
963
964 unregister_chrdev(stm->major, stm_data->name);
965
966 mutex_lock(&stm->policy_mutex);
967 if (stm->policy)
968 stp_policy_unbind(stm->policy);
969 mutex_unlock(&stm->policy_mutex);
970
971 for (i = stm->data->sw_start; i <= stm->data->sw_end; i++)
972 stp_master_free(stm, i);
973
974 device_unregister(&stm->dev);
975 stm_data->stm = NULL;
976 }
977 EXPORT_SYMBOL_GPL(stm_unregister_device);
978
979 /*
980 * stm::link_list access serialization uses a spinlock and a mutex; holding
981 * either of them guarantees that the list is stable; modification requires
982 * holding both of them.
983 *
984 * Lock ordering is as follows:
985 * stm::link_mutex
986 * stm::link_lock
987 * src::link_lock
988 */
989
990 /**
991 * stm_source_link_add() - connect an stm_source device to an stm device
992 * @src: stm_source device
993 * @stm: stm device
994 *
995 * This function establishes a link from stm_source to an stm device so that
996 * the former can send out trace data to the latter.
997 *
998 * Return: 0 on success, -errno otherwise.
999 */
stm_source_link_add(struct stm_source_device * src,struct stm_device * stm)1000 static int stm_source_link_add(struct stm_source_device *src,
1001 struct stm_device *stm)
1002 {
1003 char *ids[] = { NULL, "default", NULL };
1004 int err = -ENOMEM;
1005
1006 mutex_lock(&stm->link_mutex);
1007 spin_lock(&stm->link_lock);
1008 spin_lock(&src->link_lock);
1009
1010 /* src->link is dereferenced under stm_source_srcu but not the list */
1011 rcu_assign_pointer(src->link, stm);
1012 list_add_tail(&src->link_entry, &stm->link_list);
1013
1014 spin_unlock(&src->link_lock);
1015 spin_unlock(&stm->link_lock);
1016 mutex_unlock(&stm->link_mutex);
1017
1018 ids[0] = kstrdup(src->data->name, GFP_KERNEL);
1019 if (!ids[0])
1020 goto fail_detach;
1021
1022 err = stm_assign_first_policy(stm, &src->output, ids,
1023 src->data->nr_chans);
1024 kfree(ids[0]);
1025
1026 if (err)
1027 goto fail_detach;
1028
1029 /* this is to notify the STM device that a new link has been made */
1030 if (stm->data->link)
1031 err = stm->data->link(stm->data, src->output.master,
1032 src->output.channel);
1033
1034 if (err)
1035 goto fail_free_output;
1036
1037 /* this is to let the source carry out all necessary preparations */
1038 if (src->data->link)
1039 src->data->link(src->data);
1040
1041 return 0;
1042
1043 fail_free_output:
1044 stm_output_free(stm, &src->output);
1045
1046 fail_detach:
1047 mutex_lock(&stm->link_mutex);
1048 spin_lock(&stm->link_lock);
1049 spin_lock(&src->link_lock);
1050
1051 rcu_assign_pointer(src->link, NULL);
1052 list_del_init(&src->link_entry);
1053
1054 spin_unlock(&src->link_lock);
1055 spin_unlock(&stm->link_lock);
1056 mutex_unlock(&stm->link_mutex);
1057
1058 return err;
1059 }
1060
1061 /**
1062 * __stm_source_link_drop() - detach stm_source from an stm device
1063 * @src: stm_source device
1064 * @stm: stm device
1065 *
1066 * If @stm is @src::link, disconnect them from one another and put the
1067 * reference on the @stm device.
1068 *
1069 * Caller must hold stm::link_mutex.
1070 */
__stm_source_link_drop(struct stm_source_device * src,struct stm_device * stm)1071 static int __stm_source_link_drop(struct stm_source_device *src,
1072 struct stm_device *stm)
1073 {
1074 struct stm_device *link;
1075 int ret = 0;
1076
1077 lockdep_assert_held(&stm->link_mutex);
1078
1079 /* for stm::link_list modification, we hold both mutex and spinlock */
1080 spin_lock(&stm->link_lock);
1081 spin_lock(&src->link_lock);
1082 link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
1083
1084 /*
1085 * The linked device may have changed since we last looked, because
1086 * we weren't holding the src::link_lock back then; if this is the
1087 * case, tell the caller to retry.
1088 */
1089 if (link != stm) {
1090 ret = -EAGAIN;
1091 goto unlock;
1092 }
1093
1094 stm_output_free(link, &src->output);
1095 list_del_init(&src->link_entry);
1096 pm_runtime_mark_last_busy(&link->dev);
1097 pm_runtime_put_autosuspend(&link->dev);
1098 /* matches stm_find_device() from stm_source_link_store() */
1099 stm_put_device(link);
1100 rcu_assign_pointer(src->link, NULL);
1101
1102 unlock:
1103 spin_unlock(&src->link_lock);
1104 spin_unlock(&stm->link_lock);
1105
1106 /*
1107 * Call the unlink callbacks for both source and stm, when we know
1108 * that we have actually performed the unlinking.
1109 */
1110 if (!ret) {
1111 if (src->data->unlink)
1112 src->data->unlink(src->data);
1113
1114 if (stm->data->unlink)
1115 stm->data->unlink(stm->data, src->output.master,
1116 src->output.channel);
1117 }
1118
1119 return ret;
1120 }
1121
1122 /**
1123 * stm_source_link_drop() - detach stm_source from its stm device
1124 * @src: stm_source device
1125 *
1126 * Unlinking means disconnecting from source's STM device; after this
1127 * writes will be unsuccessful until it is linked to a new STM device.
1128 *
1129 * This will happen on "stm_source_link" sysfs attribute write to undo
1130 * the existing link (if any), or on linked STM device's de-registration.
1131 */
stm_source_link_drop(struct stm_source_device * src)1132 static void stm_source_link_drop(struct stm_source_device *src)
1133 {
1134 struct stm_device *stm;
1135 int idx, ret;
1136
1137 retry:
1138 idx = srcu_read_lock(&stm_source_srcu);
1139 /*
1140 * The stm device will be valid for the duration of this
1141 * read section, but the link may change before we grab
1142 * the src::link_lock in __stm_source_link_drop().
1143 */
1144 stm = srcu_dereference(src->link, &stm_source_srcu);
1145
1146 ret = 0;
1147 if (stm) {
1148 mutex_lock(&stm->link_mutex);
1149 ret = __stm_source_link_drop(src, stm);
1150 mutex_unlock(&stm->link_mutex);
1151 }
1152
1153 srcu_read_unlock(&stm_source_srcu, idx);
1154
1155 /* if it did change, retry */
1156 if (ret == -EAGAIN)
1157 goto retry;
1158 }
1159
stm_source_link_show(struct device * dev,struct device_attribute * attr,char * buf)1160 static ssize_t stm_source_link_show(struct device *dev,
1161 struct device_attribute *attr,
1162 char *buf)
1163 {
1164 struct stm_source_device *src = to_stm_source_device(dev);
1165 struct stm_device *stm;
1166 int idx, ret;
1167
1168 idx = srcu_read_lock(&stm_source_srcu);
1169 stm = srcu_dereference(src->link, &stm_source_srcu);
1170 ret = sprintf(buf, "%s\n",
1171 stm ? dev_name(&stm->dev) : "<none>");
1172 srcu_read_unlock(&stm_source_srcu, idx);
1173
1174 return ret;
1175 }
1176
stm_source_link_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1177 static ssize_t stm_source_link_store(struct device *dev,
1178 struct device_attribute *attr,
1179 const char *buf, size_t count)
1180 {
1181 struct stm_source_device *src = to_stm_source_device(dev);
1182 struct stm_device *link;
1183 int err;
1184
1185 stm_source_link_drop(src);
1186
1187 link = stm_find_device(buf);
1188 if (!link)
1189 return -EINVAL;
1190
1191 pm_runtime_get(&link->dev);
1192
1193 err = stm_source_link_add(src, link);
1194 if (err) {
1195 pm_runtime_put_autosuspend(&link->dev);
1196 /* matches the stm_find_device() above */
1197 stm_put_device(link);
1198 }
1199
1200 return err ? : count;
1201 }
1202
1203 static DEVICE_ATTR_RW(stm_source_link);
1204
1205 static struct attribute *stm_source_attrs[] = {
1206 &dev_attr_stm_source_link.attr,
1207 NULL,
1208 };
1209
1210 ATTRIBUTE_GROUPS(stm_source);
1211
1212 static struct class stm_source_class = {
1213 .name = "stm_source",
1214 .dev_groups = stm_source_groups,
1215 };
1216
stm_source_device_release(struct device * dev)1217 static void stm_source_device_release(struct device *dev)
1218 {
1219 struct stm_source_device *src = to_stm_source_device(dev);
1220
1221 kfree(src);
1222 }
1223
1224 /**
1225 * stm_source_register_device() - register an stm_source device
1226 * @parent: parent device
1227 * @data: device description structure
1228 *
1229 * This will create a device of stm_source class that can write
1230 * data to an stm device once linked.
1231 *
1232 * Return: 0 on success, -errno otherwise.
1233 */
stm_source_register_device(struct device * parent,struct stm_source_data * data)1234 int stm_source_register_device(struct device *parent,
1235 struct stm_source_data *data)
1236 {
1237 struct stm_source_device *src;
1238 int err;
1239
1240 if (!stm_core_up)
1241 return -EPROBE_DEFER;
1242
1243 src = kzalloc_obj(*src);
1244 if (!src)
1245 return -ENOMEM;
1246
1247 device_initialize(&src->dev);
1248 src->dev.class = &stm_source_class;
1249 src->dev.parent = parent;
1250 src->dev.release = stm_source_device_release;
1251
1252 err = kobject_set_name(&src->dev.kobj, "%s", data->name);
1253 if (err)
1254 goto err;
1255
1256 pm_runtime_no_callbacks(&src->dev);
1257 pm_runtime_forbid(&src->dev);
1258
1259 err = device_add(&src->dev);
1260 if (err)
1261 goto err;
1262
1263 stm_output_init(&src->output);
1264 spin_lock_init(&src->link_lock);
1265 INIT_LIST_HEAD(&src->link_entry);
1266 src->data = data;
1267 data->src = src;
1268
1269 return 0;
1270
1271 err:
1272 put_device(&src->dev);
1273
1274 return err;
1275 }
1276 EXPORT_SYMBOL_GPL(stm_source_register_device);
1277
1278 /**
1279 * stm_source_unregister_device() - unregister an stm_source device
1280 * @data: device description that was used to register the device
1281 *
1282 * This will remove a previously created stm_source device from the system.
1283 */
stm_source_unregister_device(struct stm_source_data * data)1284 void stm_source_unregister_device(struct stm_source_data *data)
1285 {
1286 struct stm_source_device *src = data->src;
1287
1288 stm_source_link_drop(src);
1289
1290 device_unregister(&src->dev);
1291 }
1292 EXPORT_SYMBOL_GPL(stm_source_unregister_device);
1293
stm_source_write(struct stm_source_data * data,unsigned int chan,const char * buf,size_t count)1294 int notrace stm_source_write(struct stm_source_data *data,
1295 unsigned int chan,
1296 const char *buf, size_t count)
1297 {
1298 struct stm_source_device *src = data->src;
1299 struct stm_device *stm;
1300 int idx;
1301
1302 if (!src->output.nr_chans)
1303 return -ENODEV;
1304
1305 if (chan >= src->output.nr_chans)
1306 return -EINVAL;
1307
1308 idx = srcu_read_lock(&stm_source_srcu);
1309
1310 stm = srcu_dereference(src->link, &stm_source_srcu);
1311 if (stm)
1312 count = stm_write(stm, &src->output, chan, buf, count, data);
1313 else
1314 count = -ENODEV;
1315
1316 srcu_read_unlock(&stm_source_srcu, idx);
1317
1318 return count;
1319 }
1320 EXPORT_SYMBOL_GPL(stm_source_write);
1321
stm_core_init(void)1322 static int __init stm_core_init(void)
1323 {
1324 int err;
1325
1326 err = class_register(&stm_class);
1327 if (err)
1328 return err;
1329
1330 err = class_register(&stm_source_class);
1331 if (err)
1332 goto err_stm;
1333
1334 err = stp_configfs_init();
1335 if (err)
1336 goto err_src;
1337
1338 init_srcu_struct(&stm_source_srcu);
1339 INIT_LIST_HEAD(&stm_pdrv_head);
1340 mutex_init(&stm_pdrv_mutex);
1341
1342 /*
1343 * So as to not confuse existing users with a requirement
1344 * to load yet another module, do it here.
1345 */
1346 if (IS_ENABLED(CONFIG_STM_PROTO_BASIC))
1347 (void)request_module_nowait("stm_p_basic");
1348 stm_core_up++;
1349
1350 return 0;
1351
1352 err_src:
1353 class_unregister(&stm_source_class);
1354 err_stm:
1355 class_unregister(&stm_class);
1356
1357 return err;
1358 }
1359
1360 module_init(stm_core_init);
1361
stm_core_exit(void)1362 static void __exit stm_core_exit(void)
1363 {
1364 cleanup_srcu_struct(&stm_source_srcu);
1365 class_unregister(&stm_source_class);
1366 class_unregister(&stm_class);
1367 stp_configfs_exit();
1368 }
1369
1370 module_exit(stm_core_exit);
1371
1372 MODULE_LICENSE("GPL v2");
1373 MODULE_DESCRIPTION("System Trace Module device class");
1374 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
1375