1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7 #include <linux/sched/signal.h>
8 #include <linux/wait.h>
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/dma-mapping.h>
13
14 #include <linux/mei.h>
15
16 #include "mei_dev.h"
17 #include "hbm.h"
18 #include "client.h"
19
20 /**
21 * mei_me_cl_init - initialize me client
22 *
23 * @me_cl: me client
24 */
mei_me_cl_init(struct mei_me_client * me_cl)25 void mei_me_cl_init(struct mei_me_client *me_cl)
26 {
27 INIT_LIST_HEAD(&me_cl->list);
28 kref_init(&me_cl->refcnt);
29 }
30
31 /**
32 * mei_me_cl_get - increases me client refcount
33 *
34 * @me_cl: me client
35 *
36 * Locking: called under "dev->device_lock" lock
37 *
38 * Return: me client or NULL
39 */
mei_me_cl_get(struct mei_me_client * me_cl)40 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
41 {
42 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
43 return me_cl;
44
45 return NULL;
46 }
47
48 /**
49 * mei_me_cl_release - free me client
50 *
51 * @ref: me_client refcount
52 *
53 * Locking: called under "dev->device_lock" lock
54 */
mei_me_cl_release(struct kref * ref)55 static void mei_me_cl_release(struct kref *ref)
56 {
57 struct mei_me_client *me_cl =
58 container_of(ref, struct mei_me_client, refcnt);
59
60 kfree(me_cl);
61 }
62
63 /**
64 * mei_me_cl_put - decrease me client refcount and free client if necessary
65 *
66 * @me_cl: me client
67 *
68 * Locking: called under "dev->device_lock" lock
69 */
mei_me_cl_put(struct mei_me_client * me_cl)70 void mei_me_cl_put(struct mei_me_client *me_cl)
71 {
72 if (me_cl)
73 kref_put(&me_cl->refcnt, mei_me_cl_release);
74 }
75
76 /**
77 * __mei_me_cl_del - delete me client from the list and decrease
78 * reference counter
79 *
80 * @dev: mei device
81 * @me_cl: me client
82 *
83 * Locking: dev->me_clients_rwsem
84 */
__mei_me_cl_del(struct mei_device * dev,struct mei_me_client * me_cl)85 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
86 {
87 if (!me_cl)
88 return;
89
90 list_del_init(&me_cl->list);
91 mei_me_cl_put(me_cl);
92 }
93
94 /**
95 * mei_me_cl_del - delete me client from the list and decrease
96 * reference counter
97 *
98 * @dev: mei device
99 * @me_cl: me client
100 */
mei_me_cl_del(struct mei_device * dev,struct mei_me_client * me_cl)101 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
102 {
103 down_write(&dev->me_clients_rwsem);
104 __mei_me_cl_del(dev, me_cl);
105 up_write(&dev->me_clients_rwsem);
106 }
107
108 /**
109 * mei_me_cl_add - add me client to the list
110 *
111 * @dev: mei device
112 * @me_cl: me client
113 */
mei_me_cl_add(struct mei_device * dev,struct mei_me_client * me_cl)114 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
115 {
116 down_write(&dev->me_clients_rwsem);
117 list_add(&me_cl->list, &dev->me_clients);
118 up_write(&dev->me_clients_rwsem);
119 }
120
121 /**
122 * __mei_me_cl_by_uuid - locate me client by uuid
123 * increases ref count
124 *
125 * @dev: mei device
126 * @uuid: me client uuid
127 *
128 * Return: me client or NULL if not found
129 *
130 * Locking: dev->me_clients_rwsem
131 */
__mei_me_cl_by_uuid(struct mei_device * dev,const uuid_le * uuid)132 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
133 const uuid_le *uuid)
134 {
135 struct mei_me_client *me_cl;
136 const uuid_le *pn;
137
138 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
139
140 list_for_each_entry(me_cl, &dev->me_clients, list) {
141 pn = &me_cl->props.protocol_name;
142 if (uuid_le_cmp(*uuid, *pn) == 0)
143 return mei_me_cl_get(me_cl);
144 }
145
146 return NULL;
147 }
148
149 /**
150 * mei_me_cl_by_uuid - locate me client by uuid
151 * increases ref count
152 *
153 * @dev: mei device
154 * @uuid: me client uuid
155 *
156 * Return: me client or NULL if not found
157 *
158 * Locking: dev->me_clients_rwsem
159 */
mei_me_cl_by_uuid(struct mei_device * dev,const uuid_le * uuid)160 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
161 const uuid_le *uuid)
162 {
163 struct mei_me_client *me_cl;
164
165 down_read(&dev->me_clients_rwsem);
166 me_cl = __mei_me_cl_by_uuid(dev, uuid);
167 up_read(&dev->me_clients_rwsem);
168
169 return me_cl;
170 }
171
172 /**
173 * mei_me_cl_by_id - locate me client by client id
174 * increases ref count
175 *
176 * @dev: the device structure
177 * @client_id: me client id
178 *
179 * Return: me client or NULL if not found
180 *
181 * Locking: dev->me_clients_rwsem
182 */
mei_me_cl_by_id(struct mei_device * dev,u8 client_id)183 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
184 {
185
186 struct mei_me_client *__me_cl, *me_cl = NULL;
187
188 down_read(&dev->me_clients_rwsem);
189 list_for_each_entry(__me_cl, &dev->me_clients, list) {
190 if (__me_cl->client_id == client_id) {
191 me_cl = mei_me_cl_get(__me_cl);
192 break;
193 }
194 }
195 up_read(&dev->me_clients_rwsem);
196
197 return me_cl;
198 }
199
200 /**
201 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
202 * increases ref count
203 *
204 * @dev: the device structure
205 * @uuid: me client uuid
206 * @client_id: me client id
207 *
208 * Return: me client or null if not found
209 *
210 * Locking: dev->me_clients_rwsem
211 */
__mei_me_cl_by_uuid_id(struct mei_device * dev,const uuid_le * uuid,u8 client_id)212 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
213 const uuid_le *uuid, u8 client_id)
214 {
215 struct mei_me_client *me_cl;
216 const uuid_le *pn;
217
218 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
219
220 list_for_each_entry(me_cl, &dev->me_clients, list) {
221 pn = &me_cl->props.protocol_name;
222 if (uuid_le_cmp(*uuid, *pn) == 0 &&
223 me_cl->client_id == client_id)
224 return mei_me_cl_get(me_cl);
225 }
226
227 return NULL;
228 }
229
230
231 /**
232 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
233 * increases ref count
234 *
235 * @dev: the device structure
236 * @uuid: me client uuid
237 * @client_id: me client id
238 *
239 * Return: me client or null if not found
240 */
mei_me_cl_by_uuid_id(struct mei_device * dev,const uuid_le * uuid,u8 client_id)241 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
242 const uuid_le *uuid, u8 client_id)
243 {
244 struct mei_me_client *me_cl;
245
246 down_read(&dev->me_clients_rwsem);
247 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
248 up_read(&dev->me_clients_rwsem);
249
250 return me_cl;
251 }
252
253 /**
254 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
255 *
256 * @dev: the device structure
257 * @uuid: me client uuid
258 *
259 * Locking: called under "dev->device_lock" lock
260 */
mei_me_cl_rm_by_uuid(struct mei_device * dev,const uuid_le * uuid)261 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
262 {
263 struct mei_me_client *me_cl;
264
265 dev_dbg(dev->dev, "remove %pUl\n", uuid);
266
267 down_write(&dev->me_clients_rwsem);
268 me_cl = __mei_me_cl_by_uuid(dev, uuid);
269 __mei_me_cl_del(dev, me_cl);
270 mei_me_cl_put(me_cl);
271 up_write(&dev->me_clients_rwsem);
272 }
273
274 /**
275 * mei_me_cl_rm_all - remove all me clients
276 *
277 * @dev: the device structure
278 *
279 * Locking: called under "dev->device_lock" lock
280 */
mei_me_cl_rm_all(struct mei_device * dev)281 void mei_me_cl_rm_all(struct mei_device *dev)
282 {
283 struct mei_me_client *me_cl, *next;
284
285 down_write(&dev->me_clients_rwsem);
286 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
287 __mei_me_cl_del(dev, me_cl);
288 up_write(&dev->me_clients_rwsem);
289 }
290
291 /**
292 * mei_io_cb_free - free mei_cb_private related memory
293 *
294 * @cb: mei callback struct
295 */
mei_io_cb_free(struct mei_cl_cb * cb)296 void mei_io_cb_free(struct mei_cl_cb *cb)
297 {
298 if (cb == NULL)
299 return;
300
301 list_del(&cb->list);
302 kvfree(cb->buf.data);
303 kfree(cb->ext_hdr);
304 kfree(cb);
305 }
306
307 /**
308 * mei_tx_cb_enqueue - queue tx callback
309 *
310 * @cb: mei callback struct
311 * @head: an instance of list to queue on
312 *
313 * Locking: called under "dev->device_lock" lock
314 */
mei_tx_cb_enqueue(struct mei_cl_cb * cb,struct list_head * head)315 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
316 struct list_head *head)
317 {
318 list_add_tail(&cb->list, head);
319 cb->cl->tx_cb_queued++;
320 }
321
322 /**
323 * mei_tx_cb_dequeue - dequeue tx callback
324 *
325 * @cb: mei callback struct to dequeue and free
326 *
327 * Locking: called under "dev->device_lock" lock
328 */
mei_tx_cb_dequeue(struct mei_cl_cb * cb)329 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
330 {
331 if (!WARN_ON(cb->cl->tx_cb_queued == 0))
332 cb->cl->tx_cb_queued--;
333
334 mei_io_cb_free(cb);
335 }
336
337 /**
338 * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
339 *
340 * @cl: mei client
341 * @fp: pointer to file structure
342 *
343 * Locking: called under "dev->device_lock" lock
344 */
mei_cl_set_read_by_fp(const struct mei_cl * cl,const struct file * fp)345 static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
346 const struct file *fp)
347 {
348 struct mei_cl_vtag *cl_vtag;
349
350 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
351 if (cl_vtag->fp == fp) {
352 cl_vtag->pending_read = true;
353 return;
354 }
355 }
356 }
357
358 /**
359 * mei_io_cb_init - allocate and initialize io callback
360 *
361 * @cl: mei client
362 * @type: operation type
363 * @fp: pointer to file structure
364 *
365 * Return: mei_cl_cb pointer or NULL;
366 */
mei_io_cb_init(struct mei_cl * cl,enum mei_cb_file_ops type,const struct file * fp)367 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
368 enum mei_cb_file_ops type,
369 const struct file *fp)
370 {
371 struct mei_cl_cb *cb;
372
373 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
374 if (!cb)
375 return NULL;
376
377 INIT_LIST_HEAD(&cb->list);
378 cb->fp = fp;
379 cb->cl = cl;
380 cb->buf_idx = 0;
381 cb->fop_type = type;
382 cb->vtag = 0;
383 cb->ext_hdr = NULL;
384
385 return cb;
386 }
387
388 /**
389 * mei_io_list_flush_cl - removes cbs belonging to the cl.
390 *
391 * @head: an instance of our list structure
392 * @cl: host client
393 */
mei_io_list_flush_cl(struct list_head * head,const struct mei_cl * cl)394 static void mei_io_list_flush_cl(struct list_head *head,
395 const struct mei_cl *cl)
396 {
397 struct mei_cl_cb *cb, *next;
398
399 list_for_each_entry_safe(cb, next, head, list) {
400 if (cl == cb->cl) {
401 list_del_init(&cb->list);
402 if (cb->fop_type == MEI_FOP_READ)
403 mei_io_cb_free(cb);
404 }
405 }
406 }
407
408 /**
409 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
410 *
411 * @head: An instance of our list structure
412 * @cl: host client
413 * @fp: file pointer (matching cb file object), may be NULL
414 */
mei_io_tx_list_free_cl(struct list_head * head,const struct mei_cl * cl,const struct file * fp)415 static void mei_io_tx_list_free_cl(struct list_head *head,
416 const struct mei_cl *cl,
417 const struct file *fp)
418 {
419 struct mei_cl_cb *cb, *next;
420
421 list_for_each_entry_safe(cb, next, head, list) {
422 if (cl == cb->cl && (!fp || fp == cb->fp))
423 mei_tx_cb_dequeue(cb);
424 }
425 }
426
427 /**
428 * mei_io_list_free_fp - free cb from a list that matches file pointer
429 *
430 * @head: io list
431 * @fp: file pointer (matching cb file object), may be NULL
432 */
mei_io_list_free_fp(struct list_head * head,const struct file * fp)433 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
434 {
435 struct mei_cl_cb *cb, *next;
436
437 list_for_each_entry_safe(cb, next, head, list)
438 if (!fp || fp == cb->fp)
439 mei_io_cb_free(cb);
440 }
441
442 /**
443 * mei_cl_free_pending - free pending cb
444 *
445 * @cl: host client
446 */
mei_cl_free_pending(struct mei_cl * cl)447 static void mei_cl_free_pending(struct mei_cl *cl)
448 {
449 struct mei_cl_cb *cb;
450
451 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
452 mei_io_cb_free(cb);
453 }
454
455 /**
456 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
457 *
458 * @cl: host client
459 * @length: size of the buffer
460 * @fop_type: operation type
461 * @fp: associated file pointer (might be NULL)
462 *
463 * Return: cb on success and NULL on failure
464 */
mei_cl_alloc_cb(struct mei_cl * cl,size_t length,enum mei_cb_file_ops fop_type,const struct file * fp)465 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
466 enum mei_cb_file_ops fop_type,
467 const struct file *fp)
468 {
469 struct mei_cl_cb *cb;
470
471 cb = mei_io_cb_init(cl, fop_type, fp);
472 if (!cb)
473 return NULL;
474
475 if (length == 0)
476 return cb;
477
478 cb->buf.data = kvmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
479 if (!cb->buf.data) {
480 mei_io_cb_free(cb);
481 return NULL;
482 }
483 cb->buf.size = length;
484
485 return cb;
486 }
487
488 /**
489 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
490 * and enqueuing of the control commands cb
491 *
492 * @cl: host client
493 * @length: size of the buffer
494 * @fop_type: operation type
495 * @fp: associated file pointer (might be NULL)
496 *
497 * Return: cb on success and NULL on failure
498 * Locking: called under "dev->device_lock" lock
499 */
mei_cl_enqueue_ctrl_wr_cb(struct mei_cl * cl,size_t length,enum mei_cb_file_ops fop_type,const struct file * fp)500 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
501 enum mei_cb_file_ops fop_type,
502 const struct file *fp)
503 {
504 struct mei_cl_cb *cb;
505
506 /* for RX always allocate at least client's mtu */
507 if (length)
508 length = max_t(size_t, length, mei_cl_mtu(cl));
509
510 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
511 if (!cb)
512 return NULL;
513
514 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
515 return cb;
516 }
517
518 /**
519 * mei_cl_read_cb - find this cl's callback in the read list
520 * for a specific file
521 *
522 * @cl: host client
523 * @fp: file pointer (matching cb file object), may be NULL
524 *
525 * Return: cb on success, NULL if cb is not found
526 */
mei_cl_read_cb(struct mei_cl * cl,const struct file * fp)527 struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
528 {
529 struct mei_cl_cb *cb;
530 struct mei_cl_cb *ret_cb = NULL;
531
532 spin_lock(&cl->rd_completed_lock);
533 list_for_each_entry(cb, &cl->rd_completed, list)
534 if (!fp || fp == cb->fp) {
535 ret_cb = cb;
536 break;
537 }
538 spin_unlock(&cl->rd_completed_lock);
539 return ret_cb;
540 }
541
542 /**
543 * mei_cl_flush_queues - flushes queue lists belonging to cl.
544 *
545 * @cl: host client
546 * @fp: file pointer (matching cb file object), may be NULL
547 *
548 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
549 */
mei_cl_flush_queues(struct mei_cl * cl,const struct file * fp)550 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
551 {
552 struct mei_device *dev;
553
554 if (WARN_ON(!cl || !cl->dev))
555 return -EINVAL;
556
557 dev = cl->dev;
558
559 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
560 mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
561 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
562 /* free pending and control cb only in final flush */
563 if (!fp) {
564 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
565 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
566 mei_cl_free_pending(cl);
567 }
568 spin_lock(&cl->rd_completed_lock);
569 mei_io_list_free_fp(&cl->rd_completed, fp);
570 spin_unlock(&cl->rd_completed_lock);
571
572 return 0;
573 }
574
575 /**
576 * mei_cl_init - initializes cl.
577 *
578 * @cl: host client to be initialized
579 * @dev: mei device
580 */
mei_cl_init(struct mei_cl * cl,struct mei_device * dev)581 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
582 {
583 memset(cl, 0, sizeof(*cl));
584 init_waitqueue_head(&cl->wait);
585 init_waitqueue_head(&cl->rx_wait);
586 init_waitqueue_head(&cl->tx_wait);
587 init_waitqueue_head(&cl->ev_wait);
588 INIT_LIST_HEAD(&cl->vtag_map);
589 spin_lock_init(&cl->rd_completed_lock);
590 INIT_LIST_HEAD(&cl->rd_completed);
591 INIT_LIST_HEAD(&cl->rd_pending);
592 INIT_LIST_HEAD(&cl->link);
593 cl->writing_state = MEI_IDLE;
594 cl->state = MEI_FILE_UNINITIALIZED;
595 cl->dev = dev;
596 }
597
598 /**
599 * mei_cl_allocate - allocates cl structure and sets it up.
600 *
601 * @dev: mei device
602 * Return: The allocated file or NULL on failure
603 */
mei_cl_allocate(struct mei_device * dev)604 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
605 {
606 struct mei_cl *cl;
607
608 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
609 if (!cl)
610 return NULL;
611
612 mei_cl_init(cl, dev);
613
614 return cl;
615 }
616
617 /**
618 * mei_cl_link - allocate host id in the host map
619 *
620 * @cl: host client
621 *
622 * Return: 0 on success
623 * -EINVAL on incorrect values
624 * -EMFILE if open count exceeded.
625 */
mei_cl_link(struct mei_cl * cl)626 int mei_cl_link(struct mei_cl *cl)
627 {
628 struct mei_device *dev;
629 int id;
630
631 if (WARN_ON(!cl || !cl->dev))
632 return -EINVAL;
633
634 dev = cl->dev;
635
636 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
637 if (id >= MEI_CLIENTS_MAX) {
638 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
639 return -EMFILE;
640 }
641
642 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
643 dev_err(dev->dev, "open_handle_count exceeded %d",
644 MEI_MAX_OPEN_HANDLE_COUNT);
645 return -EMFILE;
646 }
647
648 dev->open_handle_count++;
649
650 cl->host_client_id = id;
651 list_add_tail(&cl->link, &dev->file_list);
652
653 set_bit(id, dev->host_clients_map);
654
655 cl->state = MEI_FILE_INITIALIZING;
656
657 cl_dbg(dev, cl, "link cl\n");
658 return 0;
659 }
660
661 /**
662 * mei_cl_unlink - remove host client from the list
663 *
664 * @cl: host client
665 *
666 * Return: always 0
667 */
mei_cl_unlink(struct mei_cl * cl)668 int mei_cl_unlink(struct mei_cl *cl)
669 {
670 struct mei_device *dev;
671
672 /* don't shout on error exit path */
673 if (!cl)
674 return 0;
675
676 if (WARN_ON(!cl->dev))
677 return 0;
678
679 dev = cl->dev;
680
681 cl_dbg(dev, cl, "unlink client");
682
683 if (cl->state == MEI_FILE_UNINITIALIZED)
684 return 0;
685
686 if (dev->open_handle_count > 0)
687 dev->open_handle_count--;
688
689 /* never clear the 0 bit */
690 if (cl->host_client_id)
691 clear_bit(cl->host_client_id, dev->host_clients_map);
692
693 list_del_init(&cl->link);
694
695 cl->state = MEI_FILE_UNINITIALIZED;
696 cl->writing_state = MEI_IDLE;
697
698 WARN_ON(!list_empty(&cl->rd_completed) ||
699 !list_empty(&cl->rd_pending) ||
700 !list_empty(&cl->link));
701
702 return 0;
703 }
704
mei_host_client_init(struct mei_device * dev)705 void mei_host_client_init(struct mei_device *dev)
706 {
707 mei_set_devstate(dev, MEI_DEV_ENABLED);
708 dev->reset_count = 0;
709
710 schedule_work(&dev->bus_rescan_work);
711
712 pm_runtime_mark_last_busy(dev->dev);
713 dev_dbg(dev->dev, "rpm: autosuspend\n");
714 pm_request_autosuspend(dev->dev);
715 }
716
717 /**
718 * mei_hbuf_acquire - try to acquire host buffer
719 *
720 * @dev: the device structure
721 * Return: true if host buffer was acquired
722 */
mei_hbuf_acquire(struct mei_device * dev)723 bool mei_hbuf_acquire(struct mei_device *dev)
724 {
725 if (mei_pg_state(dev) == MEI_PG_ON ||
726 mei_pg_in_transition(dev)) {
727 dev_dbg(dev->dev, "device is in pg\n");
728 return false;
729 }
730
731 if (!dev->hbuf_is_ready) {
732 dev_dbg(dev->dev, "hbuf is not ready\n");
733 return false;
734 }
735
736 dev->hbuf_is_ready = false;
737
738 return true;
739 }
740
741 /**
742 * mei_cl_wake_all - wake up readers, writers and event waiters so
743 * they can be interrupted
744 *
745 * @cl: host client
746 */
mei_cl_wake_all(struct mei_cl * cl)747 static void mei_cl_wake_all(struct mei_cl *cl)
748 {
749 struct mei_device *dev = cl->dev;
750
751 /* synchronized under device mutex */
752 if (waitqueue_active(&cl->rx_wait)) {
753 cl_dbg(dev, cl, "Waking up reading client!\n");
754 wake_up_interruptible(&cl->rx_wait);
755 }
756 /* synchronized under device mutex */
757 if (waitqueue_active(&cl->tx_wait)) {
758 cl_dbg(dev, cl, "Waking up writing client!\n");
759 wake_up_interruptible(&cl->tx_wait);
760 }
761 /* synchronized under device mutex */
762 if (waitqueue_active(&cl->ev_wait)) {
763 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
764 wake_up_interruptible(&cl->ev_wait);
765 }
766 /* synchronized under device mutex */
767 if (waitqueue_active(&cl->wait)) {
768 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
769 wake_up(&cl->wait);
770 }
771 }
772
773 /**
774 * mei_cl_set_disconnected - set disconnected state and clear
775 * associated states and resources
776 *
777 * @cl: host client
778 */
mei_cl_set_disconnected(struct mei_cl * cl)779 static void mei_cl_set_disconnected(struct mei_cl *cl)
780 {
781 struct mei_device *dev = cl->dev;
782
783 if (cl->state == MEI_FILE_DISCONNECTED ||
784 cl->state <= MEI_FILE_INITIALIZING)
785 return;
786
787 cl->state = MEI_FILE_DISCONNECTED;
788 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
789 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
790 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
791 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
792 mei_cl_wake_all(cl);
793 cl->rx_flow_ctrl_creds = 0;
794 cl->tx_flow_ctrl_creds = 0;
795 cl->timer_count = 0;
796
797 if (!cl->me_cl)
798 return;
799
800 if (!WARN_ON(cl->me_cl->connect_count == 0))
801 cl->me_cl->connect_count--;
802
803 if (cl->me_cl->connect_count == 0)
804 cl->me_cl->tx_flow_ctrl_creds = 0;
805
806 mei_me_cl_put(cl->me_cl);
807 cl->me_cl = NULL;
808 }
809
mei_cl_set_connecting(struct mei_cl * cl,struct mei_me_client * me_cl)810 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
811 {
812 if (!mei_me_cl_get(me_cl))
813 return -ENOENT;
814
815 /* only one connection is allowed for fixed address clients */
816 if (me_cl->props.fixed_address) {
817 if (me_cl->connect_count) {
818 mei_me_cl_put(me_cl);
819 return -EBUSY;
820 }
821 }
822
823 cl->me_cl = me_cl;
824 cl->state = MEI_FILE_CONNECTING;
825 cl->me_cl->connect_count++;
826
827 return 0;
828 }
829
830 /*
831 * mei_cl_send_disconnect - send disconnect request
832 *
833 * @cl: host client
834 * @cb: callback block
835 *
836 * Return: 0, OK; otherwise, error.
837 */
mei_cl_send_disconnect(struct mei_cl * cl,struct mei_cl_cb * cb)838 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
839 {
840 struct mei_device *dev;
841 int ret;
842
843 dev = cl->dev;
844
845 ret = mei_hbm_cl_disconnect_req(dev, cl);
846 cl->status = ret;
847 if (ret) {
848 cl->state = MEI_FILE_DISCONNECT_REPLY;
849 return ret;
850 }
851
852 list_move_tail(&cb->list, &dev->ctrl_rd_list);
853 cl->timer_count = dev->timeouts.connect;
854 mei_schedule_stall_timer(dev);
855
856 return 0;
857 }
858
859 /**
860 * mei_cl_irq_disconnect - processes close related operation from
861 * interrupt thread context - send disconnect request
862 *
863 * @cl: client
864 * @cb: callback block.
865 * @cmpl_list: complete list.
866 *
867 * Return: 0, OK; otherwise, error.
868 */
mei_cl_irq_disconnect(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)869 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
870 struct list_head *cmpl_list)
871 {
872 struct mei_device *dev = cl->dev;
873 u32 msg_slots;
874 int slots;
875 int ret;
876
877 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
878 slots = mei_hbuf_empty_slots(dev);
879 if (slots < 0)
880 return -EOVERFLOW;
881
882 if ((u32)slots < msg_slots)
883 return -EMSGSIZE;
884
885 ret = mei_cl_send_disconnect(cl, cb);
886 if (ret)
887 list_move_tail(&cb->list, cmpl_list);
888
889 return ret;
890 }
891
892 /**
893 * __mei_cl_disconnect - disconnect host client from the me one
894 * internal function runtime pm has to be already acquired
895 *
896 * @cl: host client
897 *
898 * Return: 0 on success, <0 on failure.
899 */
__mei_cl_disconnect(struct mei_cl * cl)900 static int __mei_cl_disconnect(struct mei_cl *cl)
901 {
902 struct mei_device *dev;
903 struct mei_cl_cb *cb;
904 int rets;
905
906 dev = cl->dev;
907
908 cl->state = MEI_FILE_DISCONNECTING;
909
910 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
911 if (!cb) {
912 rets = -ENOMEM;
913 goto out;
914 }
915
916 if (mei_hbuf_acquire(dev)) {
917 rets = mei_cl_send_disconnect(cl, cb);
918 if (rets) {
919 cl_err(dev, cl, "failed to disconnect.\n");
920 goto out;
921 }
922 }
923
924 mutex_unlock(&dev->device_lock);
925 wait_event_timeout(cl->wait,
926 cl->state == MEI_FILE_DISCONNECT_REPLY ||
927 cl->state == MEI_FILE_DISCONNECTED,
928 dev->timeouts.cl_connect);
929 mutex_lock(&dev->device_lock);
930
931 rets = cl->status;
932 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
933 cl->state != MEI_FILE_DISCONNECTED) {
934 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
935 rets = -ETIME;
936 }
937
938 out:
939 /* we disconnect also on error */
940 mei_cl_set_disconnected(cl);
941 if (!rets)
942 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
943
944 mei_io_cb_free(cb);
945 return rets;
946 }
947
948 /**
949 * mei_cl_disconnect - disconnect host client from the me one
950 *
951 * @cl: host client
952 *
953 * Locking: called under "dev->device_lock" lock
954 *
955 * Return: 0 on success, <0 on failure.
956 */
mei_cl_disconnect(struct mei_cl * cl)957 int mei_cl_disconnect(struct mei_cl *cl)
958 {
959 struct mei_device *dev;
960 int rets;
961
962 if (WARN_ON(!cl || !cl->dev))
963 return -ENODEV;
964
965 dev = cl->dev;
966
967 cl_dbg(dev, cl, "disconnecting");
968
969 if (!mei_cl_is_connected(cl))
970 return 0;
971
972 if (mei_cl_is_fixed_address(cl)) {
973 mei_cl_set_disconnected(cl);
974 return 0;
975 }
976
977 if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
978 dev->dev_state == MEI_DEV_POWER_DOWN) {
979 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
980 mei_cl_set_disconnected(cl);
981 return 0;
982 }
983
984 rets = pm_runtime_get(dev->dev);
985 if (rets < 0 && rets != -EINPROGRESS) {
986 pm_runtime_put_noidle(dev->dev);
987 cl_err(dev, cl, "rpm: get failed %d\n", rets);
988 return rets;
989 }
990
991 rets = __mei_cl_disconnect(cl);
992
993 cl_dbg(dev, cl, "rpm: autosuspend\n");
994 pm_runtime_mark_last_busy(dev->dev);
995 pm_runtime_put_autosuspend(dev->dev);
996
997 return rets;
998 }
999
1000
1001 /**
1002 * mei_cl_is_other_connecting - checks if other
1003 * client with the same me client id is connecting
1004 *
1005 * @cl: private data of the file object
1006 *
1007 * Return: true if other client is connected, false - otherwise.
1008 */
mei_cl_is_other_connecting(struct mei_cl * cl)1009 static bool mei_cl_is_other_connecting(struct mei_cl *cl)
1010 {
1011 struct mei_device *dev;
1012 struct mei_cl_cb *cb;
1013
1014 dev = cl->dev;
1015
1016 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
1017 if (cb->fop_type == MEI_FOP_CONNECT &&
1018 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
1019 return true;
1020 }
1021
1022 return false;
1023 }
1024
1025 /**
1026 * mei_cl_send_connect - send connect request
1027 *
1028 * @cl: host client
1029 * @cb: callback block
1030 *
1031 * Return: 0, OK; otherwise, error.
1032 */
mei_cl_send_connect(struct mei_cl * cl,struct mei_cl_cb * cb)1033 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1034 {
1035 struct mei_device *dev;
1036 int ret;
1037
1038 dev = cl->dev;
1039
1040 ret = mei_hbm_cl_connect_req(dev, cl);
1041 cl->status = ret;
1042 if (ret) {
1043 cl->state = MEI_FILE_DISCONNECT_REPLY;
1044 return ret;
1045 }
1046
1047 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1048 cl->timer_count = dev->timeouts.connect;
1049 mei_schedule_stall_timer(dev);
1050 return 0;
1051 }
1052
1053 /**
1054 * mei_cl_irq_connect - send connect request in irq_thread context
1055 *
1056 * @cl: host client
1057 * @cb: callback block
1058 * @cmpl_list: complete list
1059 *
1060 * Return: 0, OK; otherwise, error.
1061 */
mei_cl_irq_connect(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)1062 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1063 struct list_head *cmpl_list)
1064 {
1065 struct mei_device *dev = cl->dev;
1066 u32 msg_slots;
1067 int slots;
1068 int rets;
1069
1070 if (mei_cl_is_other_connecting(cl))
1071 return 0;
1072
1073 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1074 slots = mei_hbuf_empty_slots(dev);
1075 if (slots < 0)
1076 return -EOVERFLOW;
1077
1078 if ((u32)slots < msg_slots)
1079 return -EMSGSIZE;
1080
1081 rets = mei_cl_send_connect(cl, cb);
1082 if (rets)
1083 list_move_tail(&cb->list, cmpl_list);
1084
1085 return rets;
1086 }
1087
1088 /**
1089 * mei_cl_connect - connect host client to the me one
1090 *
1091 * @cl: host client
1092 * @me_cl: me client
1093 * @fp: pointer to file structure
1094 *
1095 * Locking: called under "dev->device_lock" lock
1096 *
1097 * Return: 0 on success, <0 on failure.
1098 */
mei_cl_connect(struct mei_cl * cl,struct mei_me_client * me_cl,const struct file * fp)1099 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1100 const struct file *fp)
1101 {
1102 struct mei_device *dev;
1103 struct mei_cl_cb *cb;
1104 int rets;
1105
1106 if (WARN_ON(!cl || !cl->dev || !me_cl))
1107 return -ENODEV;
1108
1109 dev = cl->dev;
1110
1111 rets = mei_cl_set_connecting(cl, me_cl);
1112 if (rets)
1113 goto nortpm;
1114
1115 if (mei_cl_is_fixed_address(cl)) {
1116 cl->state = MEI_FILE_CONNECTED;
1117 rets = 0;
1118 goto nortpm;
1119 }
1120
1121 rets = pm_runtime_get(dev->dev);
1122 if (rets < 0 && rets != -EINPROGRESS) {
1123 pm_runtime_put_noidle(dev->dev);
1124 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1125 goto nortpm;
1126 }
1127
1128 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1129 if (!cb) {
1130 rets = -ENOMEM;
1131 goto out;
1132 }
1133
1134 /* run hbuf acquire last so we don't have to undo */
1135 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1136 rets = mei_cl_send_connect(cl, cb);
1137 if (rets)
1138 goto out;
1139 }
1140
1141 mutex_unlock(&dev->device_lock);
1142 wait_event_timeout(cl->wait,
1143 (cl->state == MEI_FILE_CONNECTED ||
1144 cl->state == MEI_FILE_DISCONNECTED ||
1145 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1146 cl->state == MEI_FILE_DISCONNECT_REPLY),
1147 dev->timeouts.cl_connect);
1148 mutex_lock(&dev->device_lock);
1149
1150 if (!mei_cl_is_connected(cl)) {
1151 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1152 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1153 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1154 /* ignore disconnect return valuue;
1155 * in case of failure reset will be invoked
1156 */
1157 __mei_cl_disconnect(cl);
1158 rets = -EFAULT;
1159 goto out;
1160 }
1161
1162 /* timeout or something went really wrong */
1163 if (!cl->status)
1164 cl->status = -EFAULT;
1165 }
1166
1167 rets = cl->status;
1168 out:
1169 cl_dbg(dev, cl, "rpm: autosuspend\n");
1170 pm_runtime_mark_last_busy(dev->dev);
1171 pm_runtime_put_autosuspend(dev->dev);
1172
1173 mei_io_cb_free(cb);
1174
1175 nortpm:
1176 if (!mei_cl_is_connected(cl))
1177 mei_cl_set_disconnected(cl);
1178
1179 return rets;
1180 }
1181
1182 /**
1183 * mei_cl_alloc_linked - allocate and link host client
1184 *
1185 * @dev: the device structure
1186 *
1187 * Return: cl on success ERR_PTR on failure
1188 */
mei_cl_alloc_linked(struct mei_device * dev)1189 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
1190 {
1191 struct mei_cl *cl;
1192 int ret;
1193
1194 cl = mei_cl_allocate(dev);
1195 if (!cl) {
1196 ret = -ENOMEM;
1197 goto err;
1198 }
1199
1200 ret = mei_cl_link(cl);
1201 if (ret)
1202 goto err;
1203
1204 return cl;
1205 err:
1206 kfree(cl);
1207 return ERR_PTR(ret);
1208 }
1209
1210 /**
1211 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1212 *
1213 * @cl: host client
1214 *
1215 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
1216 */
mei_cl_tx_flow_ctrl_creds(struct mei_cl * cl)1217 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1218 {
1219 if (WARN_ON(!cl || !cl->me_cl))
1220 return -EINVAL;
1221
1222 if (cl->tx_flow_ctrl_creds > 0)
1223 return 1;
1224
1225 if (mei_cl_is_fixed_address(cl))
1226 return 1;
1227
1228 if (mei_cl_is_single_recv_buf(cl)) {
1229 if (cl->me_cl->tx_flow_ctrl_creds > 0)
1230 return 1;
1231 }
1232 return 0;
1233 }
1234
1235 /**
1236 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1237 * for a client
1238 *
1239 * @cl: host client
1240 *
1241 * Return:
1242 * 0 on success
1243 * -EINVAL when ctrl credits are <= 0
1244 */
mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl * cl)1245 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1246 {
1247 if (WARN_ON(!cl || !cl->me_cl))
1248 return -EINVAL;
1249
1250 if (mei_cl_is_fixed_address(cl))
1251 return 0;
1252
1253 if (mei_cl_is_single_recv_buf(cl)) {
1254 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1255 return -EINVAL;
1256 cl->me_cl->tx_flow_ctrl_creds--;
1257 } else {
1258 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1259 return -EINVAL;
1260 cl->tx_flow_ctrl_creds--;
1261 }
1262 return 0;
1263 }
1264
1265 /**
1266 * mei_cl_vtag_alloc - allocate and fill the vtag structure
1267 *
1268 * @fp: pointer to file structure
1269 * @vtag: vm tag
1270 *
1271 * Return:
1272 * * Pointer to allocated struct - on success
1273 * * ERR_PTR(-ENOMEM) on memory allocation failure
1274 */
mei_cl_vtag_alloc(struct file * fp,u8 vtag)1275 struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
1276 {
1277 struct mei_cl_vtag *cl_vtag;
1278
1279 cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
1280 if (!cl_vtag)
1281 return ERR_PTR(-ENOMEM);
1282
1283 INIT_LIST_HEAD(&cl_vtag->list);
1284 cl_vtag->vtag = vtag;
1285 cl_vtag->fp = fp;
1286
1287 return cl_vtag;
1288 }
1289
1290 /**
1291 * mei_cl_fp_by_vtag - obtain the file pointer by vtag
1292 *
1293 * @cl: host client
1294 * @vtag: virtual tag
1295 *
1296 * Return:
1297 * * A file pointer - on success
1298 * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
1299 */
mei_cl_fp_by_vtag(const struct mei_cl * cl,u8 vtag)1300 const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
1301 {
1302 struct mei_cl_vtag *vtag_l;
1303
1304 list_for_each_entry(vtag_l, &cl->vtag_map, list)
1305 /* The client on bus has one fixed fp */
1306 if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
1307 vtag_l->vtag == vtag)
1308 return vtag_l->fp;
1309
1310 return ERR_PTR(-ENOENT);
1311 }
1312
1313 /**
1314 * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
1315 *
1316 * @cl: host client
1317 * @vtag: vm tag
1318 */
mei_cl_reset_read_by_vtag(const struct mei_cl * cl,u8 vtag)1319 static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
1320 {
1321 struct mei_cl_vtag *vtag_l;
1322
1323 list_for_each_entry(vtag_l, &cl->vtag_map, list) {
1324 /* The client on bus has one fixed vtag map */
1325 if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
1326 vtag_l->vtag == vtag) {
1327 vtag_l->pending_read = false;
1328 break;
1329 }
1330 }
1331 }
1332
1333 /**
1334 * mei_cl_read_vtag_add_fc - add flow control for next pending reader
1335 * in the vtag list
1336 *
1337 * @cl: host client
1338 */
mei_cl_read_vtag_add_fc(struct mei_cl * cl)1339 static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
1340 {
1341 struct mei_cl_vtag *cl_vtag;
1342
1343 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
1344 if (cl_vtag->pending_read) {
1345 if (mei_cl_enqueue_ctrl_wr_cb(cl,
1346 mei_cl_mtu(cl),
1347 MEI_FOP_READ,
1348 cl_vtag->fp))
1349 cl->rx_flow_ctrl_creds++;
1350 break;
1351 }
1352 }
1353 }
1354
1355 /**
1356 * mei_cl_vt_support_check - check if client support vtags
1357 *
1358 * @cl: host client
1359 *
1360 * Return:
1361 * * 0 - supported, or not connected at all
1362 * * -EOPNOTSUPP - vtags are not supported by client
1363 */
mei_cl_vt_support_check(const struct mei_cl * cl)1364 int mei_cl_vt_support_check(const struct mei_cl *cl)
1365 {
1366 struct mei_device *dev = cl->dev;
1367
1368 if (!dev->hbm_f_vt_supported)
1369 return -EOPNOTSUPP;
1370
1371 if (!cl->me_cl)
1372 return 0;
1373
1374 return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
1375 }
1376
1377 /**
1378 * mei_cl_add_rd_completed - add read completed callback to list with lock
1379 * and vtag check
1380 *
1381 * @cl: host client
1382 * @cb: callback block
1383 *
1384 */
mei_cl_add_rd_completed(struct mei_cl * cl,struct mei_cl_cb * cb)1385 void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1386 {
1387 const struct file *fp;
1388
1389 if (!mei_cl_vt_support_check(cl)) {
1390 fp = mei_cl_fp_by_vtag(cl, cb->vtag);
1391 if (IS_ERR(fp)) {
1392 /* client already disconnected, discarding */
1393 mei_io_cb_free(cb);
1394 return;
1395 }
1396 cb->fp = fp;
1397 mei_cl_reset_read_by_vtag(cl, cb->vtag);
1398 mei_cl_read_vtag_add_fc(cl);
1399 }
1400
1401 spin_lock(&cl->rd_completed_lock);
1402 list_add_tail(&cb->list, &cl->rd_completed);
1403 spin_unlock(&cl->rd_completed_lock);
1404 }
1405
1406 /**
1407 * mei_cl_del_rd_completed - free read completed callback with lock
1408 *
1409 * @cl: host client
1410 * @cb: callback block
1411 *
1412 */
mei_cl_del_rd_completed(struct mei_cl * cl,struct mei_cl_cb * cb)1413 void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1414 {
1415 spin_lock(&cl->rd_completed_lock);
1416 mei_io_cb_free(cb);
1417 spin_unlock(&cl->rd_completed_lock);
1418 }
1419
1420 /**
1421 * mei_cl_notify_fop2req - convert fop to proper request
1422 *
1423 * @fop: client notification start response command
1424 *
1425 * Return: MEI_HBM_NOTIFICATION_START/STOP
1426 */
mei_cl_notify_fop2req(enum mei_cb_file_ops fop)1427 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1428 {
1429 if (fop == MEI_FOP_NOTIFY_START)
1430 return MEI_HBM_NOTIFICATION_START;
1431 else
1432 return MEI_HBM_NOTIFICATION_STOP;
1433 }
1434
1435 /**
1436 * mei_cl_notify_req2fop - convert notification request top file operation type
1437 *
1438 * @req: hbm notification request type
1439 *
1440 * Return: MEI_FOP_NOTIFY_START/STOP
1441 */
mei_cl_notify_req2fop(u8 req)1442 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1443 {
1444 if (req == MEI_HBM_NOTIFICATION_START)
1445 return MEI_FOP_NOTIFY_START;
1446 else
1447 return MEI_FOP_NOTIFY_STOP;
1448 }
1449
1450 /**
1451 * mei_cl_irq_notify - send notification request in irq_thread context
1452 *
1453 * @cl: client
1454 * @cb: callback block.
1455 * @cmpl_list: complete list.
1456 *
1457 * Return: 0 on such and error otherwise.
1458 */
mei_cl_irq_notify(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)1459 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1460 struct list_head *cmpl_list)
1461 {
1462 struct mei_device *dev = cl->dev;
1463 u32 msg_slots;
1464 int slots;
1465 int ret;
1466 bool request;
1467
1468 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1469 slots = mei_hbuf_empty_slots(dev);
1470 if (slots < 0)
1471 return -EOVERFLOW;
1472
1473 if ((u32)slots < msg_slots)
1474 return -EMSGSIZE;
1475
1476 request = mei_cl_notify_fop2req(cb->fop_type);
1477 ret = mei_hbm_cl_notify_req(dev, cl, request);
1478 if (ret) {
1479 cl->status = ret;
1480 list_move_tail(&cb->list, cmpl_list);
1481 return ret;
1482 }
1483
1484 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1485 return 0;
1486 }
1487
1488 /**
1489 * mei_cl_notify_request - send notification stop/start request
1490 *
1491 * @cl: host client
1492 * @fp: associate request with file
1493 * @request: 1 for start or 0 for stop
1494 *
1495 * Locking: called under "dev->device_lock" lock
1496 *
1497 * Return: 0 on such and error otherwise.
1498 */
mei_cl_notify_request(struct mei_cl * cl,const struct file * fp,u8 request)1499 int mei_cl_notify_request(struct mei_cl *cl,
1500 const struct file *fp, u8 request)
1501 {
1502 struct mei_device *dev;
1503 struct mei_cl_cb *cb;
1504 enum mei_cb_file_ops fop_type;
1505 int rets;
1506
1507 if (WARN_ON(!cl || !cl->dev))
1508 return -ENODEV;
1509
1510 dev = cl->dev;
1511
1512 if (!dev->hbm_f_ev_supported) {
1513 cl_dbg(dev, cl, "notifications not supported\n");
1514 return -EOPNOTSUPP;
1515 }
1516
1517 if (!mei_cl_is_connected(cl))
1518 return -ENODEV;
1519
1520 rets = pm_runtime_get(dev->dev);
1521 if (rets < 0 && rets != -EINPROGRESS) {
1522 pm_runtime_put_noidle(dev->dev);
1523 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1524 return rets;
1525 }
1526
1527 fop_type = mei_cl_notify_req2fop(request);
1528 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1529 if (!cb) {
1530 rets = -ENOMEM;
1531 goto out;
1532 }
1533
1534 if (mei_hbuf_acquire(dev)) {
1535 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1536 rets = -ENODEV;
1537 goto out;
1538 }
1539 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1540 }
1541
1542 mutex_unlock(&dev->device_lock);
1543 wait_event_timeout(cl->wait,
1544 cl->notify_en == request ||
1545 cl->status ||
1546 !mei_cl_is_connected(cl),
1547 dev->timeouts.cl_connect);
1548 mutex_lock(&dev->device_lock);
1549
1550 if (cl->notify_en != request && !cl->status)
1551 cl->status = -EFAULT;
1552
1553 rets = cl->status;
1554
1555 out:
1556 cl_dbg(dev, cl, "rpm: autosuspend\n");
1557 pm_runtime_mark_last_busy(dev->dev);
1558 pm_runtime_put_autosuspend(dev->dev);
1559
1560 mei_io_cb_free(cb);
1561 return rets;
1562 }
1563
1564 /**
1565 * mei_cl_notify - raise notification
1566 *
1567 * @cl: host client
1568 *
1569 * Locking: called under "dev->device_lock" lock
1570 */
mei_cl_notify(struct mei_cl * cl)1571 void mei_cl_notify(struct mei_cl *cl)
1572 {
1573 struct mei_device *dev;
1574
1575 if (!cl || !cl->dev)
1576 return;
1577
1578 dev = cl->dev;
1579
1580 if (!cl->notify_en)
1581 return;
1582
1583 cl_dbg(dev, cl, "notify event");
1584 cl->notify_ev = true;
1585 if (!mei_cl_bus_notify_event(cl))
1586 wake_up_interruptible(&cl->ev_wait);
1587
1588 if (cl->ev_async)
1589 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1590
1591 }
1592
1593 /**
1594 * mei_cl_notify_get - get or wait for notification event
1595 *
1596 * @cl: host client
1597 * @block: this request is blocking
1598 * @notify_ev: true if notification event was received
1599 *
1600 * Locking: called under "dev->device_lock" lock
1601 *
1602 * Return: 0 on such and error otherwise.
1603 */
mei_cl_notify_get(struct mei_cl * cl,bool block,bool * notify_ev)1604 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1605 {
1606 struct mei_device *dev;
1607 int rets;
1608
1609 *notify_ev = false;
1610
1611 if (WARN_ON(!cl || !cl->dev))
1612 return -ENODEV;
1613
1614 dev = cl->dev;
1615
1616 if (!dev->hbm_f_ev_supported) {
1617 cl_dbg(dev, cl, "notifications not supported\n");
1618 return -EOPNOTSUPP;
1619 }
1620
1621 if (!mei_cl_is_connected(cl))
1622 return -ENODEV;
1623
1624 if (cl->notify_ev)
1625 goto out;
1626
1627 if (!block)
1628 return -EAGAIN;
1629
1630 mutex_unlock(&dev->device_lock);
1631 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1632 mutex_lock(&dev->device_lock);
1633
1634 if (rets < 0)
1635 return rets;
1636
1637 out:
1638 *notify_ev = cl->notify_ev;
1639 cl->notify_ev = false;
1640 return 0;
1641 }
1642
1643 /**
1644 * mei_cl_read_start - the start read client message function.
1645 *
1646 * @cl: host client
1647 * @length: number of bytes to read
1648 * @fp: pointer to file structure
1649 *
1650 * Return: 0 on success, <0 on failure.
1651 */
mei_cl_read_start(struct mei_cl * cl,size_t length,const struct file * fp)1652 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1653 {
1654 struct mei_device *dev;
1655 struct mei_cl_cb *cb;
1656 int rets;
1657
1658 if (WARN_ON(!cl || !cl->dev))
1659 return -ENODEV;
1660
1661 dev = cl->dev;
1662
1663 if (!mei_cl_is_connected(cl))
1664 return -ENODEV;
1665
1666 if (!mei_me_cl_is_active(cl->me_cl)) {
1667 cl_err(dev, cl, "no such me client\n");
1668 return -ENOTTY;
1669 }
1670
1671 if (mei_cl_is_fixed_address(cl))
1672 return 0;
1673
1674 /* HW currently supports only one pending read */
1675 if (cl->rx_flow_ctrl_creds) {
1676 mei_cl_set_read_by_fp(cl, fp);
1677 return -EBUSY;
1678 }
1679
1680 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1681 if (!cb)
1682 return -ENOMEM;
1683
1684 mei_cl_set_read_by_fp(cl, fp);
1685
1686 rets = pm_runtime_get(dev->dev);
1687 if (rets < 0 && rets != -EINPROGRESS) {
1688 pm_runtime_put_noidle(dev->dev);
1689 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1690 goto nortpm;
1691 }
1692
1693 rets = 0;
1694 if (mei_hbuf_acquire(dev)) {
1695 rets = mei_hbm_cl_flow_control_req(dev, cl);
1696 if (rets < 0)
1697 goto out;
1698
1699 list_move_tail(&cb->list, &cl->rd_pending);
1700 }
1701 cl->rx_flow_ctrl_creds++;
1702
1703 out:
1704 cl_dbg(dev, cl, "rpm: autosuspend\n");
1705 pm_runtime_mark_last_busy(dev->dev);
1706 pm_runtime_put_autosuspend(dev->dev);
1707 nortpm:
1708 if (rets)
1709 mei_io_cb_free(cb);
1710
1711 return rets;
1712 }
1713
mei_ext_hdr_set_vtag(void * ext,u8 vtag)1714 static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag)
1715 {
1716 struct mei_ext_hdr_vtag *vtag_hdr = ext;
1717
1718 vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG;
1719 vtag_hdr->hdr.length = mei_data2slots(sizeof(*vtag_hdr));
1720 vtag_hdr->vtag = vtag;
1721 vtag_hdr->reserved = 0;
1722 return vtag_hdr->hdr.length;
1723 }
1724
mei_ext_hdr_is_gsc(struct mei_ext_hdr * ext)1725 static inline bool mei_ext_hdr_is_gsc(struct mei_ext_hdr *ext)
1726 {
1727 return ext && ext->type == MEI_EXT_HDR_GSC;
1728 }
1729
mei_ext_hdr_set_gsc(struct mei_ext_hdr * ext,struct mei_ext_hdr * gsc_hdr)1730 static inline u8 mei_ext_hdr_set_gsc(struct mei_ext_hdr *ext, struct mei_ext_hdr *gsc_hdr)
1731 {
1732 memcpy(ext, gsc_hdr, mei_ext_hdr_len(gsc_hdr));
1733 return ext->length;
1734 }
1735
1736 /**
1737 * mei_msg_hdr_init - allocate and initialize mei message header
1738 *
1739 * @cb: message callback structure
1740 *
1741 * Return: a pointer to initialized header or ERR_PTR on failure
1742 */
mei_msg_hdr_init(const struct mei_cl_cb * cb)1743 static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
1744 {
1745 size_t hdr_len;
1746 struct mei_ext_meta_hdr *meta;
1747 struct mei_msg_hdr *mei_hdr;
1748 bool is_ext, is_hbm, is_gsc, is_vtag;
1749 struct mei_ext_hdr *next_ext;
1750
1751 if (!cb)
1752 return ERR_PTR(-EINVAL);
1753
1754 /* Extended header for vtag is attached only on the first fragment */
1755 is_vtag = (cb->vtag && cb->buf_idx == 0);
1756 is_hbm = cb->cl->me_cl->client_id == 0;
1757 is_gsc = ((!is_hbm) && cb->cl->dev->hbm_f_gsc_supported && mei_ext_hdr_is_gsc(cb->ext_hdr));
1758 is_ext = is_vtag || is_gsc;
1759
1760 /* Compute extended header size */
1761 hdr_len = sizeof(*mei_hdr);
1762
1763 if (!is_ext)
1764 goto setup_hdr;
1765
1766 hdr_len += sizeof(*meta);
1767 if (is_vtag)
1768 hdr_len += sizeof(struct mei_ext_hdr_vtag);
1769
1770 if (is_gsc)
1771 hdr_len += mei_ext_hdr_len(cb->ext_hdr);
1772
1773 setup_hdr:
1774 mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
1775 if (!mei_hdr)
1776 return ERR_PTR(-ENOMEM);
1777
1778 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1779 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1780 mei_hdr->internal = cb->internal;
1781 mei_hdr->extended = is_ext;
1782
1783 if (!is_ext)
1784 goto out;
1785
1786 meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
1787 meta->size = 0;
1788 next_ext = (struct mei_ext_hdr *)meta->hdrs;
1789 if (is_vtag) {
1790 meta->count++;
1791 meta->size += mei_ext_hdr_set_vtag(next_ext, cb->vtag);
1792 next_ext = mei_ext_next(next_ext);
1793 }
1794
1795 if (is_gsc) {
1796 meta->count++;
1797 meta->size += mei_ext_hdr_set_gsc(next_ext, cb->ext_hdr);
1798 next_ext = mei_ext_next(next_ext);
1799 }
1800
1801 out:
1802 mei_hdr->length = hdr_len - sizeof(*mei_hdr);
1803 return mei_hdr;
1804 }
1805
1806 /**
1807 * mei_cl_irq_write - write a message to device
1808 * from the interrupt thread context
1809 *
1810 * @cl: client
1811 * @cb: callback block.
1812 * @cmpl_list: complete list.
1813 *
1814 * Return: 0, OK; otherwise error.
1815 */
mei_cl_irq_write(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)1816 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1817 struct list_head *cmpl_list)
1818 {
1819 struct mei_device *dev;
1820 struct mei_msg_data *buf;
1821 struct mei_msg_hdr *mei_hdr = NULL;
1822 size_t hdr_len;
1823 size_t hbuf_len, dr_len;
1824 size_t buf_len = 0;
1825 size_t data_len;
1826 int hbuf_slots;
1827 u32 dr_slots;
1828 u32 dma_len;
1829 int rets;
1830 bool first_chunk;
1831 const void *data = NULL;
1832
1833 if (WARN_ON(!cl || !cl->dev))
1834 return -ENODEV;
1835
1836 dev = cl->dev;
1837
1838 buf = &cb->buf;
1839
1840 first_chunk = cb->buf_idx == 0;
1841
1842 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1843 if (rets < 0)
1844 goto err;
1845
1846 if (rets == 0) {
1847 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1848 return 0;
1849 }
1850
1851 if (buf->data) {
1852 buf_len = buf->size - cb->buf_idx;
1853 data = buf->data + cb->buf_idx;
1854 }
1855 hbuf_slots = mei_hbuf_empty_slots(dev);
1856 if (hbuf_slots < 0) {
1857 rets = -EOVERFLOW;
1858 goto err;
1859 }
1860
1861 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
1862 dr_slots = mei_dma_ring_empty_slots(dev);
1863 dr_len = mei_slots2data(dr_slots);
1864
1865 mei_hdr = mei_msg_hdr_init(cb);
1866 if (IS_ERR(mei_hdr)) {
1867 rets = PTR_ERR(mei_hdr);
1868 mei_hdr = NULL;
1869 goto err;
1870 }
1871
1872 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
1873
1874 /**
1875 * Split the message only if we can write the whole host buffer
1876 * otherwise wait for next time the host buffer is empty.
1877 */
1878 if (hdr_len + buf_len <= hbuf_len) {
1879 data_len = buf_len;
1880 mei_hdr->msg_complete = 1;
1881 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1882 mei_hdr->dma_ring = 1;
1883 if (buf_len > dr_len)
1884 buf_len = dr_len;
1885 else
1886 mei_hdr->msg_complete = 1;
1887
1888 data_len = sizeof(dma_len);
1889 dma_len = buf_len;
1890 data = &dma_len;
1891 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
1892 buf_len = hbuf_len - hdr_len;
1893 data_len = buf_len;
1894 } else {
1895 kfree(mei_hdr);
1896 return 0;
1897 }
1898 mei_hdr->length += data_len;
1899
1900 if (mei_hdr->dma_ring && buf->data)
1901 mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
1902 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
1903
1904 if (rets)
1905 goto err;
1906
1907 cl->status = 0;
1908 cl->writing_state = MEI_WRITING;
1909 cb->buf_idx += buf_len;
1910
1911 if (first_chunk) {
1912 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1913 rets = -EIO;
1914 goto err;
1915 }
1916 }
1917
1918 if (mei_hdr->msg_complete)
1919 list_move_tail(&cb->list, &dev->write_waiting_list);
1920
1921 kfree(mei_hdr);
1922 return 0;
1923
1924 err:
1925 kfree(mei_hdr);
1926 cl->status = rets;
1927 list_move_tail(&cb->list, cmpl_list);
1928 return rets;
1929 }
1930
1931 /**
1932 * mei_cl_write - submit a write cb to mei device
1933 * assumes device_lock is locked
1934 *
1935 * @cl: host client
1936 * @cb: write callback with filled data
1937 * @timeout: send timeout in milliseconds.
1938 * effective only for blocking writes: the cb->blocking is set.
1939 * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
1940 *
1941 * Return: number of bytes sent on success, <0 on failure.
1942 */
mei_cl_write(struct mei_cl * cl,struct mei_cl_cb * cb,unsigned long timeout)1943 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout)
1944 {
1945 struct mei_device *dev;
1946 struct mei_msg_data *buf;
1947 struct mei_msg_hdr *mei_hdr = NULL;
1948 size_t hdr_len;
1949 size_t hbuf_len, dr_len;
1950 size_t buf_len;
1951 size_t data_len;
1952 int hbuf_slots;
1953 u32 dr_slots;
1954 u32 dma_len;
1955 ssize_t rets;
1956 bool blocking;
1957 const void *data;
1958
1959 if (WARN_ON(!cl || !cl->dev))
1960 return -ENODEV;
1961
1962 if (WARN_ON(!cb))
1963 return -EINVAL;
1964
1965 dev = cl->dev;
1966
1967 buf = &cb->buf;
1968 buf_len = buf->size;
1969
1970 cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
1971
1972 blocking = cb->blocking;
1973 data = buf->data;
1974
1975 rets = pm_runtime_get(dev->dev);
1976 if (rets < 0 && rets != -EINPROGRESS) {
1977 pm_runtime_put_noidle(dev->dev);
1978 cl_err(dev, cl, "rpm: get failed %zd\n", rets);
1979 goto free;
1980 }
1981
1982 cb->buf_idx = 0;
1983 cl->writing_state = MEI_IDLE;
1984
1985
1986 rets = mei_cl_tx_flow_ctrl_creds(cl);
1987 if (rets < 0)
1988 goto err;
1989
1990 mei_hdr = mei_msg_hdr_init(cb);
1991 if (IS_ERR(mei_hdr)) {
1992 rets = PTR_ERR(mei_hdr);
1993 mei_hdr = NULL;
1994 goto err;
1995 }
1996
1997 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
1998
1999 if (rets == 0) {
2000 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
2001 rets = buf_len;
2002 goto out;
2003 }
2004
2005 if (!mei_hbuf_acquire(dev)) {
2006 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
2007 rets = buf_len;
2008 goto out;
2009 }
2010
2011 hbuf_slots = mei_hbuf_empty_slots(dev);
2012 if (hbuf_slots < 0) {
2013 buf_len = -EOVERFLOW;
2014 goto out;
2015 }
2016
2017 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
2018 dr_slots = mei_dma_ring_empty_slots(dev);
2019 dr_len = mei_slots2data(dr_slots);
2020
2021 if (hdr_len + buf_len <= hbuf_len) {
2022 data_len = buf_len;
2023 mei_hdr->msg_complete = 1;
2024 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
2025 mei_hdr->dma_ring = 1;
2026 if (buf_len > dr_len)
2027 buf_len = dr_len;
2028 else
2029 mei_hdr->msg_complete = 1;
2030
2031 data_len = sizeof(dma_len);
2032 dma_len = buf_len;
2033 data = &dma_len;
2034 } else {
2035 buf_len = hbuf_len - hdr_len;
2036 data_len = buf_len;
2037 }
2038
2039 mei_hdr->length += data_len;
2040
2041 if (mei_hdr->dma_ring && buf->data)
2042 mei_dma_ring_write(dev, buf->data, buf_len);
2043 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
2044
2045 if (rets)
2046 goto err;
2047
2048 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
2049 if (rets)
2050 goto err;
2051
2052 cl->writing_state = MEI_WRITING;
2053 cb->buf_idx = buf_len;
2054 /* restore return value */
2055 buf_len = buf->size;
2056
2057 out:
2058 if (mei_hdr->msg_complete)
2059 mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
2060 else
2061 mei_tx_cb_enqueue(cb, &dev->write_list);
2062
2063 cb = NULL;
2064 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
2065
2066 mutex_unlock(&dev->device_lock);
2067 rets = wait_event_interruptible_timeout(cl->tx_wait,
2068 cl->writing_state == MEI_WRITE_COMPLETE ||
2069 (!mei_cl_is_connected(cl)),
2070 msecs_to_jiffies(timeout));
2071 mutex_lock(&dev->device_lock);
2072 /* clean all queue on timeout as something fatal happened */
2073 if (rets == 0) {
2074 rets = -ETIME;
2075 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
2076 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
2077 }
2078 /* wait_event_interruptible returns -ERESTARTSYS */
2079 if (rets > 0)
2080 rets = 0;
2081 if (rets) {
2082 if (signal_pending(current))
2083 rets = -EINTR;
2084 goto err;
2085 }
2086 if (cl->writing_state != MEI_WRITE_COMPLETE) {
2087 rets = -EFAULT;
2088 goto err;
2089 }
2090 }
2091
2092 rets = buf_len;
2093 err:
2094 cl_dbg(dev, cl, "rpm: autosuspend\n");
2095 pm_runtime_mark_last_busy(dev->dev);
2096 pm_runtime_put_autosuspend(dev->dev);
2097 free:
2098 mei_io_cb_free(cb);
2099
2100 kfree(mei_hdr);
2101
2102 return rets;
2103 }
2104
2105 /**
2106 * mei_cl_complete - processes completed operation for a client
2107 *
2108 * @cl: private data of the file object.
2109 * @cb: callback block.
2110 */
mei_cl_complete(struct mei_cl * cl,struct mei_cl_cb * cb)2111 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
2112 {
2113 struct mei_device *dev = cl->dev;
2114
2115 switch (cb->fop_type) {
2116 case MEI_FOP_WRITE:
2117 mei_tx_cb_dequeue(cb);
2118 cl->writing_state = MEI_WRITE_COMPLETE;
2119 if (waitqueue_active(&cl->tx_wait)) {
2120 wake_up_interruptible(&cl->tx_wait);
2121 } else {
2122 pm_runtime_mark_last_busy(dev->dev);
2123 pm_request_autosuspend(dev->dev);
2124 }
2125 break;
2126
2127 case MEI_FOP_READ:
2128 mei_cl_add_rd_completed(cl, cb);
2129 if (!mei_cl_is_fixed_address(cl) &&
2130 !WARN_ON(!cl->rx_flow_ctrl_creds))
2131 cl->rx_flow_ctrl_creds--;
2132 if (!mei_cl_bus_rx_event(cl))
2133 wake_up_interruptible(&cl->rx_wait);
2134 break;
2135
2136 case MEI_FOP_CONNECT:
2137 case MEI_FOP_DISCONNECT:
2138 case MEI_FOP_NOTIFY_STOP:
2139 case MEI_FOP_NOTIFY_START:
2140 case MEI_FOP_DMA_MAP:
2141 case MEI_FOP_DMA_UNMAP:
2142 if (waitqueue_active(&cl->wait))
2143 wake_up(&cl->wait);
2144
2145 break;
2146 case MEI_FOP_DISCONNECT_RSP:
2147 mei_io_cb_free(cb);
2148 mei_cl_set_disconnected(cl);
2149 break;
2150 default:
2151 BUG_ON(0);
2152 }
2153 }
2154
2155
2156 /**
2157 * mei_cl_all_disconnect - disconnect forcefully all connected clients
2158 *
2159 * @dev: mei device
2160 */
mei_cl_all_disconnect(struct mei_device * dev)2161 void mei_cl_all_disconnect(struct mei_device *dev)
2162 {
2163 struct mei_cl *cl;
2164
2165 list_for_each_entry(cl, &dev->file_list, link)
2166 mei_cl_set_disconnected(cl);
2167 }
2168 EXPORT_SYMBOL_GPL(mei_cl_all_disconnect);
2169
mei_cl_dma_map_find(struct mei_device * dev,u8 buffer_id)2170 static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id)
2171 {
2172 struct mei_cl *cl;
2173
2174 list_for_each_entry(cl, &dev->file_list, link)
2175 if (cl->dma.buffer_id == buffer_id)
2176 return cl;
2177 return NULL;
2178 }
2179
2180 /**
2181 * mei_cl_irq_dma_map - send client dma map request in irq_thread context
2182 *
2183 * @cl: client
2184 * @cb: callback block.
2185 * @cmpl_list: complete list.
2186 *
2187 * Return: 0 on such and error otherwise.
2188 */
mei_cl_irq_dma_map(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)2189 int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
2190 struct list_head *cmpl_list)
2191 {
2192 struct mei_device *dev = cl->dev;
2193 u32 msg_slots;
2194 int slots;
2195 int ret;
2196
2197 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request));
2198 slots = mei_hbuf_empty_slots(dev);
2199 if (slots < 0)
2200 return -EOVERFLOW;
2201
2202 if ((u32)slots < msg_slots)
2203 return -EMSGSIZE;
2204
2205 ret = mei_hbm_cl_dma_map_req(dev, cl);
2206 if (ret) {
2207 cl->status = ret;
2208 list_move_tail(&cb->list, cmpl_list);
2209 return ret;
2210 }
2211
2212 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2213 return 0;
2214 }
2215
2216 /**
2217 * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context
2218 *
2219 * @cl: client
2220 * @cb: callback block.
2221 * @cmpl_list: complete list.
2222 *
2223 * Return: 0 on such and error otherwise.
2224 */
mei_cl_irq_dma_unmap(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)2225 int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
2226 struct list_head *cmpl_list)
2227 {
2228 struct mei_device *dev = cl->dev;
2229 u32 msg_slots;
2230 int slots;
2231 int ret;
2232
2233 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request));
2234 slots = mei_hbuf_empty_slots(dev);
2235 if (slots < 0)
2236 return -EOVERFLOW;
2237
2238 if ((u32)slots < msg_slots)
2239 return -EMSGSIZE;
2240
2241 ret = mei_hbm_cl_dma_unmap_req(dev, cl);
2242 if (ret) {
2243 cl->status = ret;
2244 list_move_tail(&cb->list, cmpl_list);
2245 return ret;
2246 }
2247
2248 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2249 return 0;
2250 }
2251
mei_cl_dma_alloc(struct mei_cl * cl,u8 buf_id,size_t size)2252 static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
2253 {
2254 cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size,
2255 &cl->dma.daddr, GFP_KERNEL);
2256 if (!cl->dma.vaddr)
2257 return -ENOMEM;
2258
2259 cl->dma.buffer_id = buf_id;
2260 cl->dma.size = size;
2261
2262 return 0;
2263 }
2264
mei_cl_dma_free(struct mei_cl * cl)2265 static void mei_cl_dma_free(struct mei_cl *cl)
2266 {
2267 cl->dma.buffer_id = 0;
2268 dmam_free_coherent(cl->dev->dev,
2269 cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
2270 cl->dma.size = 0;
2271 cl->dma.vaddr = NULL;
2272 cl->dma.daddr = 0;
2273 }
2274
2275 /**
2276 * mei_cl_dma_alloc_and_map - send client dma map request
2277 *
2278 * @cl: host client
2279 * @fp: pointer to file structure
2280 * @buffer_id: id of the mapped buffer
2281 * @size: size of the buffer
2282 *
2283 * Locking: called under "dev->device_lock" lock
2284 *
2285 * Return:
2286 * * -ENODEV
2287 * * -EINVAL
2288 * * -EOPNOTSUPP
2289 * * -EPROTO
2290 * * -ENOMEM;
2291 */
mei_cl_dma_alloc_and_map(struct mei_cl * cl,const struct file * fp,u8 buffer_id,size_t size)2292 int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
2293 u8 buffer_id, size_t size)
2294 {
2295 struct mei_device *dev;
2296 struct mei_cl_cb *cb;
2297 int rets;
2298
2299 if (WARN_ON(!cl || !cl->dev))
2300 return -ENODEV;
2301
2302 dev = cl->dev;
2303
2304 if (!dev->hbm_f_cd_supported) {
2305 cl_dbg(dev, cl, "client dma is not supported\n");
2306 return -EOPNOTSUPP;
2307 }
2308
2309 if (buffer_id == 0)
2310 return -EINVAL;
2311
2312 if (mei_cl_is_connected(cl))
2313 return -EPROTO;
2314
2315 if (cl->dma_mapped)
2316 return -EPROTO;
2317
2318 if (mei_cl_dma_map_find(dev, buffer_id)) {
2319 cl_dbg(dev, cl, "client dma with id %d is already allocated\n",
2320 cl->dma.buffer_id);
2321 return -EPROTO;
2322 }
2323
2324 rets = pm_runtime_get(dev->dev);
2325 if (rets < 0 && rets != -EINPROGRESS) {
2326 pm_runtime_put_noidle(dev->dev);
2327 cl_err(dev, cl, "rpm: get failed %d\n", rets);
2328 return rets;
2329 }
2330
2331 rets = mei_cl_dma_alloc(cl, buffer_id, size);
2332 if (rets) {
2333 pm_runtime_put_noidle(dev->dev);
2334 return rets;
2335 }
2336
2337 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp);
2338 if (!cb) {
2339 rets = -ENOMEM;
2340 goto out;
2341 }
2342
2343 if (mei_hbuf_acquire(dev)) {
2344 if (mei_hbm_cl_dma_map_req(dev, cl)) {
2345 rets = -ENODEV;
2346 goto out;
2347 }
2348 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2349 }
2350
2351 cl->status = 0;
2352
2353 mutex_unlock(&dev->device_lock);
2354 wait_event_timeout(cl->wait,
2355 cl->dma_mapped || cl->status,
2356 dev->timeouts.cl_connect);
2357 mutex_lock(&dev->device_lock);
2358
2359 if (!cl->dma_mapped && !cl->status)
2360 cl->status = -EFAULT;
2361
2362 rets = cl->status;
2363
2364 out:
2365 if (rets)
2366 mei_cl_dma_free(cl);
2367
2368 cl_dbg(dev, cl, "rpm: autosuspend\n");
2369 pm_runtime_mark_last_busy(dev->dev);
2370 pm_runtime_put_autosuspend(dev->dev);
2371
2372 mei_io_cb_free(cb);
2373 return rets;
2374 }
2375
2376 /**
2377 * mei_cl_dma_unmap - send client dma unmap request
2378 *
2379 * @cl: host client
2380 * @fp: pointer to file structure
2381 *
2382 * Locking: called under "dev->device_lock" lock
2383 *
2384 * Return: 0 on such and error otherwise.
2385 */
mei_cl_dma_unmap(struct mei_cl * cl,const struct file * fp)2386 int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
2387 {
2388 struct mei_device *dev;
2389 struct mei_cl_cb *cb;
2390 int rets;
2391
2392 if (WARN_ON(!cl || !cl->dev))
2393 return -ENODEV;
2394
2395 dev = cl->dev;
2396
2397 if (!dev->hbm_f_cd_supported) {
2398 cl_dbg(dev, cl, "client dma is not supported\n");
2399 return -EOPNOTSUPP;
2400 }
2401
2402 /* do not allow unmap for connected client */
2403 if (mei_cl_is_connected(cl))
2404 return -EPROTO;
2405
2406 if (!cl->dma_mapped)
2407 return -EPROTO;
2408
2409 rets = pm_runtime_get(dev->dev);
2410 if (rets < 0 && rets != -EINPROGRESS) {
2411 pm_runtime_put_noidle(dev->dev);
2412 cl_err(dev, cl, "rpm: get failed %d\n", rets);
2413 return rets;
2414 }
2415
2416 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp);
2417 if (!cb) {
2418 rets = -ENOMEM;
2419 goto out;
2420 }
2421
2422 if (mei_hbuf_acquire(dev)) {
2423 if (mei_hbm_cl_dma_unmap_req(dev, cl)) {
2424 rets = -ENODEV;
2425 goto out;
2426 }
2427 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2428 }
2429
2430 cl->status = 0;
2431
2432 mutex_unlock(&dev->device_lock);
2433 wait_event_timeout(cl->wait,
2434 !cl->dma_mapped || cl->status,
2435 dev->timeouts.cl_connect);
2436 mutex_lock(&dev->device_lock);
2437
2438 if (cl->dma_mapped && !cl->status)
2439 cl->status = -EFAULT;
2440
2441 rets = cl->status;
2442
2443 if (!rets)
2444 mei_cl_dma_free(cl);
2445 out:
2446 cl_dbg(dev, cl, "rpm: autosuspend\n");
2447 pm_runtime_mark_last_busy(dev->dev);
2448 pm_runtime_put_autosuspend(dev->dev);
2449
2450 mei_io_cb_free(cb);
2451 return rets;
2452 }
2453