1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
17 #include "internal.h"
18 #include "trace.h"
19
20 /*
21 * Not all MHI state transitions are synchronous. Transitions like Linkdown,
22 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
23 * transition to a new state only if we're allowed to.
24 *
25 * Priority increases as we go down. For instance, from any state in L0, the
26 * transition can be made to states in L1, L2 and L3. A notable exception to
27 * this rule is state DISABLE. From DISABLE state we can only transition to
28 * POR state. Also, while in L2 state, user cannot jump back to previous
29 * L1 or L0 states.
30 *
31 * Valid transitions:
32 * L0: DISABLE <--> POR
33 * POR <--> POR
34 * POR -> M0 -> M2 --> M0
35 * POR -> FW_DL_ERR
36 * FW_DL_ERR <--> FW_DL_ERR
37 * M0 <--> M0
38 * M0 -> FW_DL_ERR
39 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
40 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
41 * SYS_ERR_PROCESS -> SYS_ERR_FAIL
42 * SYS_ERR_FAIL -> SYS_ERR_DETECT
43 * SYS_ERR_PROCESS --> POR
44 * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
45 * SHUTDOWN_PROCESS -> DISABLE
46 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
47 * LD_ERR_FATAL_DETECT -> DISABLE
48 */
49 static const struct mhi_pm_transitions dev_state_transitions[] = {
50 /* L0 States */
51 {
52 MHI_PM_DISABLE,
53 MHI_PM_POR
54 },
55 {
56 MHI_PM_POR,
57 MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
58 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
59 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
60 },
61 {
62 MHI_PM_M0,
63 MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
64 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
65 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
66 },
67 {
68 MHI_PM_M2,
69 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
70 MHI_PM_LD_ERR_FATAL_DETECT
71 },
72 {
73 MHI_PM_M3_ENTER,
74 MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
75 MHI_PM_LD_ERR_FATAL_DETECT
76 },
77 {
78 MHI_PM_M3,
79 MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
80 MHI_PM_LD_ERR_FATAL_DETECT
81 },
82 {
83 MHI_PM_M3_EXIT,
84 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
85 MHI_PM_LD_ERR_FATAL_DETECT
86 },
87 {
88 MHI_PM_FW_DL_ERR,
89 MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
90 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
91 },
92 /* L1 States */
93 {
94 MHI_PM_SYS_ERR_DETECT,
95 MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
96 MHI_PM_LD_ERR_FATAL_DETECT
97 },
98 {
99 MHI_PM_SYS_ERR_PROCESS,
100 MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |
101 MHI_PM_LD_ERR_FATAL_DETECT
102 },
103 {
104 MHI_PM_SYS_ERR_FAIL,
105 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
106 MHI_PM_LD_ERR_FATAL_DETECT
107 },
108 /* L2 States */
109 {
110 MHI_PM_SHUTDOWN_PROCESS,
111 MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
112 },
113 /* L3 States */
114 {
115 MHI_PM_LD_ERR_FATAL_DETECT,
116 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
117 },
118 };
119
mhi_tryset_pm_state(struct mhi_controller * mhi_cntrl,enum mhi_pm_state state)120 enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
121 enum mhi_pm_state state)
122 {
123 unsigned long cur_state = mhi_cntrl->pm_state;
124 int index = find_last_bit(&cur_state, 32);
125
126 if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
127 return cur_state;
128
129 if (unlikely(dev_state_transitions[index].from_state != cur_state))
130 return cur_state;
131
132 if (unlikely(!(dev_state_transitions[index].to_states & state)))
133 return cur_state;
134
135 trace_mhi_tryset_pm_state(mhi_cntrl, state);
136 mhi_cntrl->pm_state = state;
137 return mhi_cntrl->pm_state;
138 }
139
mhi_set_mhi_state(struct mhi_controller * mhi_cntrl,enum mhi_state state)140 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
141 {
142 struct device *dev = &mhi_cntrl->mhi_dev->dev;
143 int ret;
144
145 if (state == MHI_STATE_RESET) {
146 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
147 MHICTRL_RESET_MASK, 1);
148 } else {
149 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
150 MHICTRL_MHISTATE_MASK, state);
151 }
152
153 if (ret)
154 dev_err(dev, "Failed to set MHI state to: %s\n",
155 mhi_state_str(state));
156 }
157
158 /* NOP for backward compatibility, host allowed to ring DB in M2 state */
mhi_toggle_dev_wake_nop(struct mhi_controller * mhi_cntrl)159 static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
160 {
161 }
162
mhi_toggle_dev_wake(struct mhi_controller * mhi_cntrl)163 static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
164 {
165 mhi_cntrl->wake_get(mhi_cntrl, false);
166 mhi_cntrl->wake_put(mhi_cntrl, true);
167 }
168
169 /* Handle device ready state transition */
mhi_ready_state_transition(struct mhi_controller * mhi_cntrl)170 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
171 {
172 struct mhi_event *mhi_event;
173 enum mhi_pm_state cur_state;
174 struct device *dev = &mhi_cntrl->mhi_dev->dev;
175 u32 interval_us = 25000; /* poll register field every 25 milliseconds */
176 u32 timeout_ms;
177 int ret, i;
178
179 /* Check if device entered error state */
180 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
181 dev_err(dev, "Device link is not accessible\n");
182 return -EIO;
183 }
184
185 /* Wait for RESET to be cleared and READY bit to be set by the device */
186 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
187 MHICTRL_RESET_MASK, 0, interval_us,
188 mhi_cntrl->timeout_ms);
189 if (ret) {
190 dev_err(dev, "Device failed to clear MHI Reset\n");
191 return ret;
192 }
193
194 timeout_ms = mhi_cntrl->ready_timeout_ms ?
195 mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
196 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
197 MHISTATUS_READY_MASK, 1, interval_us,
198 timeout_ms);
199 if (ret) {
200 dev_err(dev, "Device failed to enter MHI Ready\n");
201 return ret;
202 }
203
204 dev_dbg(dev, "Device in READY State\n");
205 write_lock_irq(&mhi_cntrl->pm_lock);
206 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
207 mhi_cntrl->dev_state = MHI_STATE_READY;
208 write_unlock_irq(&mhi_cntrl->pm_lock);
209
210 if (cur_state != MHI_PM_POR) {
211 dev_err(dev, "Error moving to state %s from %s\n",
212 to_mhi_pm_state_str(MHI_PM_POR),
213 to_mhi_pm_state_str(cur_state));
214 return -EIO;
215 }
216
217 read_lock_bh(&mhi_cntrl->pm_lock);
218 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
219 dev_err(dev, "Device registers not accessible\n");
220 goto error_mmio;
221 }
222
223 /* Configure MMIO registers */
224 ret = mhi_init_mmio(mhi_cntrl);
225 if (ret) {
226 dev_err(dev, "Error configuring MMIO registers\n");
227 goto error_mmio;
228 }
229
230 /* Add elements to all SW event rings */
231 mhi_event = mhi_cntrl->mhi_event;
232 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
233 struct mhi_ring *ring = &mhi_event->ring;
234
235 /* Skip if this is an offload or HW event */
236 if (mhi_event->offload_ev || mhi_event->hw_ring)
237 continue;
238
239 ring->wp = ring->base + ring->len - ring->el_size;
240 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
241 /* Update all cores */
242 smp_wmb();
243
244 /* Ring the event ring db */
245 spin_lock_irq(&mhi_event->lock);
246 mhi_ring_er_db(mhi_event);
247 spin_unlock_irq(&mhi_event->lock);
248 }
249
250 /* Set MHI to M0 state */
251 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
252 read_unlock_bh(&mhi_cntrl->pm_lock);
253
254 return 0;
255
256 error_mmio:
257 read_unlock_bh(&mhi_cntrl->pm_lock);
258
259 return -EIO;
260 }
261
mhi_pm_m0_transition(struct mhi_controller * mhi_cntrl)262 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
263 {
264 enum mhi_pm_state cur_state;
265 struct mhi_chan *mhi_chan;
266 struct device *dev = &mhi_cntrl->mhi_dev->dev;
267 int i;
268
269 write_lock_irq(&mhi_cntrl->pm_lock);
270 mhi_cntrl->dev_state = MHI_STATE_M0;
271 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
272 write_unlock_irq(&mhi_cntrl->pm_lock);
273 if (unlikely(cur_state != MHI_PM_M0)) {
274 dev_err(dev, "Unable to transition to M0 state\n");
275 return -EIO;
276 }
277 mhi_cntrl->M0++;
278
279 /* Wake up the device */
280 read_lock_bh(&mhi_cntrl->pm_lock);
281 mhi_cntrl->wake_get(mhi_cntrl, true);
282
283 /* Ring all event rings and CMD ring only if we're in mission mode */
284 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
285 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
286 struct mhi_cmd *mhi_cmd =
287 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
288
289 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
290 if (mhi_event->offload_ev)
291 continue;
292
293 spin_lock_irq(&mhi_event->lock);
294 mhi_ring_er_db(mhi_event);
295 spin_unlock_irq(&mhi_event->lock);
296 }
297
298 /* Only ring primary cmd ring if ring is not empty */
299 spin_lock_irq(&mhi_cmd->lock);
300 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
301 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
302 spin_unlock_irq(&mhi_cmd->lock);
303 }
304
305 /* Ring channel DB registers */
306 mhi_chan = mhi_cntrl->mhi_chan;
307 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
308 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
309
310 if (mhi_chan->db_cfg.reset_req) {
311 write_lock_irq(&mhi_chan->lock);
312 mhi_chan->db_cfg.db_mode = true;
313 write_unlock_irq(&mhi_chan->lock);
314 }
315
316 read_lock_irq(&mhi_chan->lock);
317
318 /* Only ring DB if ring is not empty */
319 if (tre_ring->base && tre_ring->wp != tre_ring->rp &&
320 mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
321 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
322 read_unlock_irq(&mhi_chan->lock);
323 }
324
325 mhi_cntrl->wake_put(mhi_cntrl, false);
326 read_unlock_bh(&mhi_cntrl->pm_lock);
327 wake_up_all(&mhi_cntrl->state_event);
328
329 return 0;
330 }
331
332 /*
333 * After receiving the MHI state change event from the device indicating the
334 * transition to M1 state, the host can transition the device to M2 state
335 * for keeping it in low power state.
336 */
mhi_pm_m1_transition(struct mhi_controller * mhi_cntrl)337 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
338 {
339 enum mhi_pm_state state;
340 struct device *dev = &mhi_cntrl->mhi_dev->dev;
341
342 write_lock_irq(&mhi_cntrl->pm_lock);
343 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
344 if (state == MHI_PM_M2) {
345 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
346 mhi_cntrl->dev_state = MHI_STATE_M2;
347
348 write_unlock_irq(&mhi_cntrl->pm_lock);
349
350 mhi_cntrl->M2++;
351 wake_up_all(&mhi_cntrl->state_event);
352
353 /* If there are any pending resources, exit M2 immediately */
354 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
355 atomic_read(&mhi_cntrl->dev_wake))) {
356 dev_dbg(dev,
357 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
358 atomic_read(&mhi_cntrl->pending_pkts),
359 atomic_read(&mhi_cntrl->dev_wake));
360 read_lock_bh(&mhi_cntrl->pm_lock);
361 mhi_cntrl->wake_get(mhi_cntrl, true);
362 mhi_cntrl->wake_put(mhi_cntrl, true);
363 read_unlock_bh(&mhi_cntrl->pm_lock);
364 } else {
365 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
366 }
367 } else {
368 write_unlock_irq(&mhi_cntrl->pm_lock);
369 }
370 }
371
372 /* MHI M3 completion handler */
mhi_pm_m3_transition(struct mhi_controller * mhi_cntrl)373 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
374 {
375 enum mhi_pm_state state;
376 struct device *dev = &mhi_cntrl->mhi_dev->dev;
377
378 write_lock_irq(&mhi_cntrl->pm_lock);
379 mhi_cntrl->dev_state = MHI_STATE_M3;
380 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
381 write_unlock_irq(&mhi_cntrl->pm_lock);
382 if (state != MHI_PM_M3) {
383 dev_err(dev, "Unable to transition to M3 state\n");
384 return -EIO;
385 }
386
387 mhi_cntrl->M3++;
388 wake_up_all(&mhi_cntrl->state_event);
389
390 return 0;
391 }
392
393 /* Handle device Mission Mode transition */
mhi_pm_mission_mode_transition(struct mhi_controller * mhi_cntrl)394 static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
395 {
396 struct mhi_event *mhi_event;
397 struct device *dev = &mhi_cntrl->mhi_dev->dev;
398 enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
399 int i, ret;
400
401 dev_dbg(dev, "Processing Mission Mode transition\n");
402
403 write_lock_irq(&mhi_cntrl->pm_lock);
404 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
405 ee = mhi_get_exec_env(mhi_cntrl);
406
407 if (!MHI_IN_MISSION_MODE(ee)) {
408 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
409 write_unlock_irq(&mhi_cntrl->pm_lock);
410 wake_up_all(&mhi_cntrl->state_event);
411 return -EIO;
412 }
413 mhi_cntrl->ee = ee;
414 write_unlock_irq(&mhi_cntrl->pm_lock);
415
416 wake_up_all(&mhi_cntrl->state_event);
417
418 device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee,
419 mhi_destroy_device);
420 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
421 mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
422
423 /* Force MHI to be in M0 state before continuing */
424 ret = __mhi_device_get_sync(mhi_cntrl);
425 if (ret)
426 return ret;
427
428 read_lock_bh(&mhi_cntrl->pm_lock);
429
430 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
431 ret = -EIO;
432 goto error_mission_mode;
433 }
434
435 /* Add elements to all HW event rings */
436 mhi_event = mhi_cntrl->mhi_event;
437 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
438 struct mhi_ring *ring = &mhi_event->ring;
439
440 if (mhi_event->offload_ev || !mhi_event->hw_ring)
441 continue;
442
443 ring->wp = ring->base + ring->len - ring->el_size;
444 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
445 /* Update to all cores */
446 smp_wmb();
447
448 spin_lock_irq(&mhi_event->lock);
449 if (MHI_DB_ACCESS_VALID(mhi_cntrl))
450 mhi_ring_er_db(mhi_event);
451 spin_unlock_irq(&mhi_event->lock);
452 }
453
454 read_unlock_bh(&mhi_cntrl->pm_lock);
455
456 /*
457 * The MHI devices are only created when the client device switches its
458 * Execution Environment (EE) to either SBL or AMSS states
459 */
460 mhi_create_devices(mhi_cntrl);
461
462 read_lock_bh(&mhi_cntrl->pm_lock);
463
464 error_mission_mode:
465 mhi_cntrl->wake_put(mhi_cntrl, false);
466 read_unlock_bh(&mhi_cntrl->pm_lock);
467
468 return ret;
469 }
470
471 /* Handle shutdown transitions */
mhi_pm_disable_transition(struct mhi_controller * mhi_cntrl,bool destroy_device)472 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
473 bool destroy_device)
474 {
475 enum mhi_pm_state cur_state;
476 struct mhi_event *mhi_event;
477 struct mhi_cmd_ctxt *cmd_ctxt;
478 struct mhi_cmd *mhi_cmd;
479 struct mhi_event_ctxt *er_ctxt;
480 struct device *dev = &mhi_cntrl->mhi_dev->dev;
481 int ret, i;
482
483 dev_dbg(dev, "Processing disable transition with PM state: %s\n",
484 to_mhi_pm_state_str(mhi_cntrl->pm_state));
485
486 mutex_lock(&mhi_cntrl->pm_mutex);
487
488 /* Trigger MHI RESET so that the device will not access host memory */
489 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
490 /* Skip MHI RESET if in RDDM state */
491 if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
492 goto skip_mhi_reset;
493
494 dev_dbg(dev, "Triggering MHI Reset in device\n");
495 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
496
497 /* Wait for the reset bit to be cleared by the device */
498 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
499 MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms);
500 if (ret)
501 dev_err(dev, "Device failed to clear MHI Reset\n");
502
503 /*
504 * Device will clear BHI_INTVEC as a part of RESET processing,
505 * hence re-program it
506 */
507 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
508
509 if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
510 /* wait for ready to be set */
511 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
512 MHISTATUS, MHISTATUS_READY_MASK,
513 1, 25000, mhi_cntrl->timeout_ms);
514 if (ret)
515 dev_err(dev, "Device failed to enter READY state\n");
516 }
517 }
518
519 skip_mhi_reset:
520 dev_dbg(dev,
521 "Waiting for all pending event ring processing to complete\n");
522 mhi_event = mhi_cntrl->mhi_event;
523 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
524 if (mhi_event->offload_ev)
525 continue;
526 disable_irq(mhi_cntrl->irq[mhi_event->irq]);
527 tasklet_kill(&mhi_event->task);
528 }
529
530 /* Release lock and wait for all pending threads to complete */
531 mutex_unlock(&mhi_cntrl->pm_mutex);
532 dev_dbg(dev, "Waiting for all pending threads to complete\n");
533 wake_up_all(&mhi_cntrl->state_event);
534
535 /*
536 * Only destroy the 'struct device' for channels if indicated by the
537 * 'destroy_device' flag. Because, during system suspend or hibernation
538 * state, there is no need to destroy the 'struct device' as the endpoint
539 * device would still be physically attached to the machine.
540 */
541 if (destroy_device) {
542 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
543 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
544 }
545
546 mutex_lock(&mhi_cntrl->pm_mutex);
547
548 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
549 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
550
551 /* Reset the ev rings and cmd rings */
552 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
553 mhi_cmd = mhi_cntrl->mhi_cmd;
554 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
555 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
556 struct mhi_ring *ring = &mhi_cmd->ring;
557
558 ring->rp = ring->base;
559 ring->wp = ring->base;
560 cmd_ctxt->rp = cmd_ctxt->rbase;
561 cmd_ctxt->wp = cmd_ctxt->rbase;
562 }
563
564 mhi_event = mhi_cntrl->mhi_event;
565 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
566 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
567 mhi_event++) {
568 struct mhi_ring *ring = &mhi_event->ring;
569
570 /* Skip offload events */
571 if (mhi_event->offload_ev)
572 continue;
573
574 ring->rp = ring->base;
575 ring->wp = ring->base;
576 er_ctxt->rp = er_ctxt->rbase;
577 er_ctxt->wp = er_ctxt->rbase;
578 }
579
580 /* Move to disable state */
581 write_lock_irq(&mhi_cntrl->pm_lock);
582 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
583 write_unlock_irq(&mhi_cntrl->pm_lock);
584 if (unlikely(cur_state != MHI_PM_DISABLE))
585 dev_err(dev, "Error moving from PM state: %s to: %s\n",
586 to_mhi_pm_state_str(cur_state),
587 to_mhi_pm_state_str(MHI_PM_DISABLE));
588
589 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
590 to_mhi_pm_state_str(mhi_cntrl->pm_state),
591 mhi_state_str(mhi_cntrl->dev_state));
592
593 mutex_unlock(&mhi_cntrl->pm_mutex);
594 }
595
596 /* Handle system error transitions */
mhi_pm_sys_error_transition(struct mhi_controller * mhi_cntrl)597 static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
598 {
599 enum mhi_pm_state cur_state, prev_state;
600 enum dev_st_transition next_state;
601 struct mhi_event *mhi_event;
602 struct mhi_cmd_ctxt *cmd_ctxt;
603 struct mhi_cmd *mhi_cmd;
604 struct mhi_event_ctxt *er_ctxt;
605 struct device *dev = &mhi_cntrl->mhi_dev->dev;
606 bool reset_device = false;
607 int ret, i;
608
609 dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
610 to_mhi_pm_state_str(mhi_cntrl->pm_state),
611 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
612
613 /* We must notify MHI control driver so it can clean up first */
614 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
615
616 mutex_lock(&mhi_cntrl->pm_mutex);
617 write_lock_irq(&mhi_cntrl->pm_lock);
618 prev_state = mhi_cntrl->pm_state;
619 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
620 write_unlock_irq(&mhi_cntrl->pm_lock);
621
622 if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
623 dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
624 to_mhi_pm_state_str(cur_state),
625 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
626 goto exit_sys_error_transition;
627 }
628
629 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
630 mhi_cntrl->dev_state = MHI_STATE_RESET;
631
632 /* Wake up threads waiting for state transition */
633 wake_up_all(&mhi_cntrl->state_event);
634
635 mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
636
637 if (MHI_REG_ACCESS_VALID(prev_state)) {
638 /*
639 * If the device is in PBL or SBL, it will only respond to
640 * RESET if the device is in SYSERR state. SYSERR might
641 * already be cleared at this point.
642 */
643 enum mhi_state cur_state = mhi_get_mhi_state(mhi_cntrl);
644 enum mhi_ee_type cur_ee = mhi_get_exec_env(mhi_cntrl);
645
646 if (cur_state == MHI_STATE_SYS_ERR)
647 reset_device = true;
648 else if (cur_ee != MHI_EE_PBL && cur_ee != MHI_EE_SBL)
649 reset_device = true;
650 }
651
652 /* Trigger MHI RESET so that the device will not access host memory */
653 if (reset_device) {
654 u32 in_reset = -1;
655 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
656
657 dev_dbg(dev, "Triggering MHI Reset in device\n");
658 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
659
660 /* Wait for the reset bit to be cleared by the device */
661 ret = wait_event_timeout(mhi_cntrl->state_event,
662 mhi_read_reg_field(mhi_cntrl,
663 mhi_cntrl->regs,
664 MHICTRL,
665 MHICTRL_RESET_MASK,
666 &in_reset) ||
667 !in_reset, timeout);
668 if (!ret || in_reset) {
669 dev_err(dev, "Device failed to exit MHI Reset state\n");
670 write_lock_irq(&mhi_cntrl->pm_lock);
671 cur_state = mhi_tryset_pm_state(mhi_cntrl,
672 MHI_PM_SYS_ERR_FAIL);
673 write_unlock_irq(&mhi_cntrl->pm_lock);
674 /* Shutdown may have occurred, otherwise cleanup now */
675 if (cur_state != MHI_PM_SYS_ERR_FAIL)
676 goto exit_sys_error_transition;
677 }
678
679 /*
680 * Device will clear BHI_INTVEC as a part of RESET processing,
681 * hence re-program it
682 */
683 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
684 }
685
686 dev_dbg(dev,
687 "Waiting for all pending event ring processing to complete\n");
688 mhi_event = mhi_cntrl->mhi_event;
689 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
690 if (mhi_event->offload_ev)
691 continue;
692 tasklet_kill(&mhi_event->task);
693 }
694
695 /* Release lock and wait for all pending threads to complete */
696 mutex_unlock(&mhi_cntrl->pm_mutex);
697 dev_dbg(dev, "Waiting for all pending threads to complete\n");
698 wake_up_all(&mhi_cntrl->state_event);
699
700 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
701 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
702
703 mutex_lock(&mhi_cntrl->pm_mutex);
704
705 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
706 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
707
708 /* Reset the ev rings and cmd rings */
709 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
710 mhi_cmd = mhi_cntrl->mhi_cmd;
711 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
712 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
713 struct mhi_ring *ring = &mhi_cmd->ring;
714
715 ring->rp = ring->base;
716 ring->wp = ring->base;
717 cmd_ctxt->rp = cmd_ctxt->rbase;
718 cmd_ctxt->wp = cmd_ctxt->rbase;
719 }
720
721 mhi_event = mhi_cntrl->mhi_event;
722 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
723 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
724 mhi_event++) {
725 struct mhi_ring *ring = &mhi_event->ring;
726
727 /* Skip offload events */
728 if (mhi_event->offload_ev)
729 continue;
730
731 ring->rp = ring->base;
732 ring->wp = ring->base;
733 er_ctxt->rp = er_ctxt->rbase;
734 er_ctxt->wp = er_ctxt->rbase;
735 }
736
737 /* Transition to next state */
738 if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
739 write_lock_irq(&mhi_cntrl->pm_lock);
740 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
741 write_unlock_irq(&mhi_cntrl->pm_lock);
742 if (cur_state != MHI_PM_POR) {
743 dev_err(dev, "Error moving to state %s from %s\n",
744 to_mhi_pm_state_str(MHI_PM_POR),
745 to_mhi_pm_state_str(cur_state));
746 goto exit_sys_error_transition;
747 }
748 next_state = DEV_ST_TRANSITION_PBL;
749 } else {
750 next_state = DEV_ST_TRANSITION_READY;
751 }
752
753 mhi_queue_state_transition(mhi_cntrl, next_state);
754
755 exit_sys_error_transition:
756 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
757 to_mhi_pm_state_str(mhi_cntrl->pm_state),
758 mhi_state_str(mhi_cntrl->dev_state));
759
760 mutex_unlock(&mhi_cntrl->pm_mutex);
761 }
762
763 /* Queue a new work item and schedule work */
mhi_queue_state_transition(struct mhi_controller * mhi_cntrl,enum dev_st_transition state)764 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
765 enum dev_st_transition state)
766 {
767 struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
768 unsigned long flags;
769
770 if (!item)
771 return -ENOMEM;
772
773 item->state = state;
774 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
775 list_add_tail(&item->node, &mhi_cntrl->transition_list);
776 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
777
778 queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
779
780 return 0;
781 }
782
783 /* SYS_ERR worker */
mhi_pm_sys_err_handler(struct mhi_controller * mhi_cntrl)784 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
785 {
786 struct device *dev = &mhi_cntrl->mhi_dev->dev;
787
788 /* skip if controller supports RDDM */
789 if (mhi_cntrl->rddm_image) {
790 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
791 return;
792 }
793
794 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
795 }
796
797 /* Device State Transition worker */
mhi_pm_st_worker(struct work_struct * work)798 void mhi_pm_st_worker(struct work_struct *work)
799 {
800 struct state_transition *itr, *tmp;
801 LIST_HEAD(head);
802 struct mhi_controller *mhi_cntrl = container_of(work,
803 struct mhi_controller,
804 st_worker);
805
806 spin_lock_irq(&mhi_cntrl->transition_lock);
807 list_splice_tail_init(&mhi_cntrl->transition_list, &head);
808 spin_unlock_irq(&mhi_cntrl->transition_lock);
809
810 list_for_each_entry_safe(itr, tmp, &head, node) {
811 list_del(&itr->node);
812
813 trace_mhi_pm_st_transition(mhi_cntrl, itr->state);
814
815 switch (itr->state) {
816 case DEV_ST_TRANSITION_PBL:
817 write_lock_irq(&mhi_cntrl->pm_lock);
818 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
819 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
820 write_unlock_irq(&mhi_cntrl->pm_lock);
821 mhi_fw_load_handler(mhi_cntrl);
822 break;
823 case DEV_ST_TRANSITION_SBL:
824 write_lock_irq(&mhi_cntrl->pm_lock);
825 mhi_cntrl->ee = MHI_EE_SBL;
826 write_unlock_irq(&mhi_cntrl->pm_lock);
827 /*
828 * The MHI devices are only created when the client
829 * device switches its Execution Environment (EE) to
830 * either SBL or AMSS states
831 */
832 mhi_create_devices(mhi_cntrl);
833 if (mhi_cntrl->fbc_download)
834 mhi_download_amss_image(mhi_cntrl);
835
836 mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
837 break;
838 case DEV_ST_TRANSITION_MISSION_MODE:
839 mhi_pm_mission_mode_transition(mhi_cntrl);
840 break;
841 case DEV_ST_TRANSITION_FP:
842 write_lock_irq(&mhi_cntrl->pm_lock);
843 mhi_cntrl->ee = MHI_EE_FP;
844 write_unlock_irq(&mhi_cntrl->pm_lock);
845 mhi_create_devices(mhi_cntrl);
846 mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
847 break;
848 case DEV_ST_TRANSITION_READY:
849 mhi_ready_state_transition(mhi_cntrl);
850 break;
851 case DEV_ST_TRANSITION_SYS_ERR:
852 mhi_pm_sys_error_transition(mhi_cntrl);
853 break;
854 case DEV_ST_TRANSITION_DISABLE:
855 mhi_pm_disable_transition(mhi_cntrl, false);
856 break;
857 case DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE:
858 mhi_pm_disable_transition(mhi_cntrl, true);
859 break;
860 default:
861 break;
862 }
863 kfree(itr);
864 }
865 }
866
mhi_pm_suspend(struct mhi_controller * mhi_cntrl)867 int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
868 {
869 struct mhi_chan *itr, *tmp;
870 struct device *dev = &mhi_cntrl->mhi_dev->dev;
871 enum mhi_pm_state new_state;
872 int ret;
873
874 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
875 return -EINVAL;
876
877 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
878 return -EIO;
879
880 /* Return busy if there are any pending resources */
881 if (atomic_read(&mhi_cntrl->dev_wake) ||
882 atomic_read(&mhi_cntrl->pending_pkts))
883 return -EBUSY;
884
885 /* Take MHI out of M2 state */
886 read_lock_bh(&mhi_cntrl->pm_lock);
887 mhi_cntrl->wake_get(mhi_cntrl, false);
888 read_unlock_bh(&mhi_cntrl->pm_lock);
889
890 ret = wait_event_timeout(mhi_cntrl->state_event,
891 mhi_cntrl->dev_state == MHI_STATE_M0 ||
892 mhi_cntrl->dev_state == MHI_STATE_M1 ||
893 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
894 msecs_to_jiffies(mhi_cntrl->timeout_ms));
895
896 read_lock_bh(&mhi_cntrl->pm_lock);
897 mhi_cntrl->wake_put(mhi_cntrl, false);
898 read_unlock_bh(&mhi_cntrl->pm_lock);
899
900 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
901 dev_err(dev,
902 "Could not enter M0/M1 state");
903 return -EIO;
904 }
905
906 write_lock_irq(&mhi_cntrl->pm_lock);
907
908 if (atomic_read(&mhi_cntrl->dev_wake) ||
909 atomic_read(&mhi_cntrl->pending_pkts)) {
910 write_unlock_irq(&mhi_cntrl->pm_lock);
911 return -EBUSY;
912 }
913
914 dev_dbg(dev, "Allowing M3 transition\n");
915 new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
916 if (new_state != MHI_PM_M3_ENTER) {
917 write_unlock_irq(&mhi_cntrl->pm_lock);
918 dev_err(dev,
919 "Error setting to PM state: %s from: %s\n",
920 to_mhi_pm_state_str(MHI_PM_M3_ENTER),
921 to_mhi_pm_state_str(mhi_cntrl->pm_state));
922 return -EIO;
923 }
924
925 /* Set MHI to M3 and wait for completion */
926 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
927 write_unlock_irq(&mhi_cntrl->pm_lock);
928 dev_dbg(dev, "Waiting for M3 completion\n");
929
930 ret = wait_event_timeout(mhi_cntrl->state_event,
931 mhi_cntrl->dev_state == MHI_STATE_M3 ||
932 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
933 msecs_to_jiffies(mhi_cntrl->timeout_ms));
934
935 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
936 dev_err(dev,
937 "Did not enter M3 state, MHI state: %s, PM state: %s\n",
938 mhi_state_str(mhi_cntrl->dev_state),
939 to_mhi_pm_state_str(mhi_cntrl->pm_state));
940 return -EIO;
941 }
942
943 /* Notify clients about entering LPM */
944 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
945 mutex_lock(&itr->mutex);
946 if (itr->mhi_dev)
947 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
948 mutex_unlock(&itr->mutex);
949 }
950
951 return 0;
952 }
953 EXPORT_SYMBOL_GPL(mhi_pm_suspend);
954
__mhi_pm_resume(struct mhi_controller * mhi_cntrl,bool force)955 static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
956 {
957 struct mhi_chan *itr, *tmp;
958 struct device *dev = &mhi_cntrl->mhi_dev->dev;
959 enum mhi_pm_state cur_state;
960 int ret;
961
962 dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
963 to_mhi_pm_state_str(mhi_cntrl->pm_state),
964 mhi_state_str(mhi_cntrl->dev_state));
965
966 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
967 return 0;
968
969 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
970 return -EIO;
971
972 if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
973 dev_warn(dev, "Resuming from non M3 state (%s)\n",
974 mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
975 if (!force)
976 return -EINVAL;
977 }
978
979 /* Notify clients about exiting LPM */
980 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
981 mutex_lock(&itr->mutex);
982 if (itr->mhi_dev)
983 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
984 mutex_unlock(&itr->mutex);
985 }
986
987 write_lock_irq(&mhi_cntrl->pm_lock);
988 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
989 if (cur_state != MHI_PM_M3_EXIT) {
990 write_unlock_irq(&mhi_cntrl->pm_lock);
991 dev_info(dev,
992 "Error setting to PM state: %s from: %s\n",
993 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
994 to_mhi_pm_state_str(mhi_cntrl->pm_state));
995 return -EIO;
996 }
997
998 /* Set MHI to M0 and wait for completion */
999 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
1000 write_unlock_irq(&mhi_cntrl->pm_lock);
1001
1002 ret = wait_event_timeout(mhi_cntrl->state_event,
1003 mhi_cntrl->dev_state == MHI_STATE_M0 ||
1004 mhi_cntrl->dev_state == MHI_STATE_M2 ||
1005 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1006 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1007
1008 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1009 dev_err(dev,
1010 "Did not enter M0 state, MHI state: %s, PM state: %s\n",
1011 mhi_state_str(mhi_cntrl->dev_state),
1012 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1013 return -EIO;
1014 }
1015
1016 return 0;
1017 }
1018
mhi_pm_resume(struct mhi_controller * mhi_cntrl)1019 int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
1020 {
1021 return __mhi_pm_resume(mhi_cntrl, false);
1022 }
1023 EXPORT_SYMBOL_GPL(mhi_pm_resume);
1024
mhi_pm_resume_force(struct mhi_controller * mhi_cntrl)1025 int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
1026 {
1027 return __mhi_pm_resume(mhi_cntrl, true);
1028 }
1029 EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
1030
__mhi_device_get_sync(struct mhi_controller * mhi_cntrl)1031 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
1032 {
1033 int ret;
1034
1035 /* Wake up the device */
1036 read_lock_bh(&mhi_cntrl->pm_lock);
1037 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1038 read_unlock_bh(&mhi_cntrl->pm_lock);
1039 return -EIO;
1040 }
1041 mhi_cntrl->wake_get(mhi_cntrl, true);
1042 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1043 mhi_trigger_resume(mhi_cntrl);
1044 read_unlock_bh(&mhi_cntrl->pm_lock);
1045
1046 ret = wait_event_timeout(mhi_cntrl->state_event,
1047 mhi_cntrl->pm_state == MHI_PM_M0 ||
1048 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1049 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1050
1051 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1052 read_lock_bh(&mhi_cntrl->pm_lock);
1053 mhi_cntrl->wake_put(mhi_cntrl, false);
1054 read_unlock_bh(&mhi_cntrl->pm_lock);
1055 return -EIO;
1056 }
1057
1058 return 0;
1059 }
1060
1061 /* Assert device wake db */
mhi_assert_dev_wake(struct mhi_controller * mhi_cntrl,bool force)1062 static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
1063 {
1064 unsigned long flags;
1065
1066 /*
1067 * If force flag is set, then increment the wake count value and
1068 * ring wake db
1069 */
1070 if (unlikely(force)) {
1071 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1072 atomic_inc(&mhi_cntrl->dev_wake);
1073 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
1074 !mhi_cntrl->wake_set) {
1075 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1076 mhi_cntrl->wake_set = true;
1077 }
1078 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1079 } else {
1080 /*
1081 * If resources are already requested, then just increment
1082 * the wake count value and return
1083 */
1084 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
1085 return;
1086
1087 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1088 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
1089 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
1090 !mhi_cntrl->wake_set) {
1091 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1092 mhi_cntrl->wake_set = true;
1093 }
1094 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1095 }
1096 }
1097
1098 /* De-assert device wake db */
mhi_deassert_dev_wake(struct mhi_controller * mhi_cntrl,bool override)1099 static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
1100 bool override)
1101 {
1102 unsigned long flags;
1103
1104 /*
1105 * Only continue if there is a single resource, else just decrement
1106 * and return
1107 */
1108 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1109 return;
1110
1111 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1112 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1113 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1114 mhi_cntrl->wake_set) {
1115 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1116 mhi_cntrl->wake_set = false;
1117 }
1118 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1119 }
1120
mhi_async_power_up(struct mhi_controller * mhi_cntrl)1121 int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1122 {
1123 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
1124 enum mhi_state state;
1125 enum mhi_ee_type current_ee;
1126 enum dev_st_transition next_state;
1127 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1128 u32 interval_us = 25000; /* poll register field every 25 milliseconds */
1129 int ret, i;
1130
1131 dev_info(dev, "Requested to power ON\n");
1132
1133 /* Supply default wake routines if not provided by controller driver */
1134 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1135 !mhi_cntrl->wake_toggle) {
1136 mhi_cntrl->wake_get = mhi_assert_dev_wake;
1137 mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1138 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1139 mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1140 }
1141
1142 mutex_lock(&mhi_cntrl->pm_mutex);
1143 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1144
1145 /* Setup BHI INTVEC */
1146 write_lock_irq(&mhi_cntrl->pm_lock);
1147 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1148 mhi_cntrl->pm_state = MHI_PM_POR;
1149 mhi_cntrl->ee = MHI_EE_MAX;
1150 current_ee = mhi_get_exec_env(mhi_cntrl);
1151 write_unlock_irq(&mhi_cntrl->pm_lock);
1152
1153 /* Confirm that the device is in valid exec env */
1154 if (!MHI_POWER_UP_CAPABLE(current_ee)) {
1155 dev_err(dev, "%s is not a valid EE for power on\n",
1156 TO_MHI_EXEC_STR(current_ee));
1157 ret = -EIO;
1158 goto error_exit;
1159 }
1160
1161 state = mhi_get_mhi_state(mhi_cntrl);
1162 dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
1163 TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
1164
1165 if (state == MHI_STATE_SYS_ERR) {
1166 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1167 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
1168 MHICTRL_RESET_MASK, 0, interval_us,
1169 mhi_cntrl->timeout_ms);
1170 if (ret) {
1171 dev_info(dev, "Failed to reset MHI due to syserr state\n");
1172 goto error_exit;
1173 }
1174
1175 /*
1176 * device cleares INTVEC as part of RESET processing,
1177 * re-program it
1178 */
1179 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1180 }
1181
1182 /* IRQs have been requested during probe, so we just need to enable them. */
1183 enable_irq(mhi_cntrl->irq[0]);
1184
1185 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
1186 if (mhi_event->offload_ev)
1187 continue;
1188
1189 enable_irq(mhi_cntrl->irq[mhi_event->irq]);
1190 }
1191
1192 /* Transition to next state */
1193 next_state = MHI_IN_PBL(current_ee) ?
1194 DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1195
1196 mhi_queue_state_transition(mhi_cntrl, next_state);
1197
1198 mutex_unlock(&mhi_cntrl->pm_mutex);
1199
1200 dev_info(dev, "Power on setup success\n");
1201
1202 return 0;
1203
1204 error_exit:
1205 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1206 mutex_unlock(&mhi_cntrl->pm_mutex);
1207
1208 return ret;
1209 }
1210 EXPORT_SYMBOL_GPL(mhi_async_power_up);
1211
__mhi_power_down(struct mhi_controller * mhi_cntrl,bool graceful,bool destroy_device)1212 static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful,
1213 bool destroy_device)
1214 {
1215 enum mhi_pm_state cur_state, transition_state;
1216 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1217
1218 mutex_lock(&mhi_cntrl->pm_mutex);
1219 write_lock_irq(&mhi_cntrl->pm_lock);
1220 cur_state = mhi_cntrl->pm_state;
1221 if (cur_state == MHI_PM_DISABLE) {
1222 write_unlock_irq(&mhi_cntrl->pm_lock);
1223 mutex_unlock(&mhi_cntrl->pm_mutex);
1224 return; /* Already powered down */
1225 }
1226
1227 /* If it's not a graceful shutdown, force MHI to linkdown state */
1228 transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1229 MHI_PM_LD_ERR_FATAL_DETECT;
1230
1231 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1232 if (cur_state != transition_state) {
1233 dev_err(dev, "Failed to move to state: %s from: %s\n",
1234 to_mhi_pm_state_str(transition_state),
1235 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1236 /* Force link down or error fatal detected state */
1237 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1238 }
1239
1240 /* mark device inactive to avoid any further host processing */
1241 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1242 mhi_cntrl->dev_state = MHI_STATE_RESET;
1243
1244 wake_up_all(&mhi_cntrl->state_event);
1245
1246 write_unlock_irq(&mhi_cntrl->pm_lock);
1247 mutex_unlock(&mhi_cntrl->pm_mutex);
1248
1249 mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
1250
1251 if (destroy_device)
1252 mhi_queue_state_transition(mhi_cntrl,
1253 DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE);
1254 else
1255 mhi_queue_state_transition(mhi_cntrl,
1256 DEV_ST_TRANSITION_DISABLE);
1257
1258 /* Wait for shutdown to complete */
1259 flush_work(&mhi_cntrl->st_worker);
1260
1261 disable_irq(mhi_cntrl->irq[0]);
1262 }
1263
mhi_power_down(struct mhi_controller * mhi_cntrl,bool graceful)1264 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1265 {
1266 __mhi_power_down(mhi_cntrl, graceful, true);
1267 }
1268 EXPORT_SYMBOL_GPL(mhi_power_down);
1269
mhi_power_down_keep_dev(struct mhi_controller * mhi_cntrl,bool graceful)1270 void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl,
1271 bool graceful)
1272 {
1273 __mhi_power_down(mhi_cntrl, graceful, false);
1274 }
1275 EXPORT_SYMBOL_GPL(mhi_power_down_keep_dev);
1276
mhi_sync_power_up(struct mhi_controller * mhi_cntrl)1277 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1278 {
1279 int ret = mhi_async_power_up(mhi_cntrl);
1280 u32 timeout_ms;
1281
1282 if (ret)
1283 return ret;
1284
1285 /* Some devices need more time to set ready during power up */
1286 timeout_ms = mhi_cntrl->ready_timeout_ms ?
1287 mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
1288 wait_event_timeout(mhi_cntrl->state_event,
1289 MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1290 MHI_PM_FATAL_ERROR(mhi_cntrl->pm_state),
1291 msecs_to_jiffies(timeout_ms));
1292
1293 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1294 if (ret)
1295 mhi_power_down(mhi_cntrl, false);
1296
1297 return ret;
1298 }
1299 EXPORT_SYMBOL(mhi_sync_power_up);
1300
mhi_force_rddm_mode(struct mhi_controller * mhi_cntrl)1301 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1302 {
1303 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1304 int ret;
1305
1306 /* Check if device is already in RDDM */
1307 if (mhi_cntrl->ee == MHI_EE_RDDM)
1308 return 0;
1309
1310 dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1311 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1312
1313 /* Wait for RDDM event */
1314 ret = wait_event_timeout(mhi_cntrl->state_event,
1315 mhi_cntrl->ee == MHI_EE_RDDM,
1316 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1317 ret = ret ? 0 : -EIO;
1318
1319 return ret;
1320 }
1321 EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1322
mhi_device_get_sync(struct mhi_device * mhi_dev)1323 int mhi_device_get_sync(struct mhi_device *mhi_dev)
1324 {
1325 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1326 int ret;
1327
1328 ret = __mhi_device_get_sync(mhi_cntrl);
1329 if (!ret)
1330 mhi_dev->dev_wake++;
1331
1332 return ret;
1333 }
1334 EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1335
mhi_device_put(struct mhi_device * mhi_dev)1336 void mhi_device_put(struct mhi_device *mhi_dev)
1337 {
1338 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1339
1340 mhi_dev->dev_wake--;
1341 read_lock_bh(&mhi_cntrl->pm_lock);
1342 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1343 mhi_trigger_resume(mhi_cntrl);
1344
1345 mhi_cntrl->wake_put(mhi_cntrl, false);
1346 read_unlock_bh(&mhi_cntrl->pm_lock);
1347 }
1348 EXPORT_SYMBOL_GPL(mhi_device_put);
1349
mhi_uevent_notify(struct mhi_controller * mhi_cntrl,enum mhi_ee_type ee)1350 void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee)
1351 {
1352 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1353 char *buf[2];
1354 int ret;
1355
1356 buf[0] = kasprintf(GFP_KERNEL, "EXEC_ENV=%s", TO_MHI_EXEC_STR(ee));
1357 buf[1] = NULL;
1358
1359 if (!buf[0])
1360 return;
1361
1362 ret = kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, buf);
1363 if (ret)
1364 dev_err(dev, "Failed to send %s uevent\n", TO_MHI_EXEC_STR(ee));
1365
1366 kfree(buf[0]);
1367 }
1368