Lines Matching full:ctl

52 	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;  in t7xx_fsm_notifier_register()  local
55 spin_lock_irqsave(&ctl->notifier_lock, flags); in t7xx_fsm_notifier_register()
56 list_add_tail(&notifier->entry, &ctl->notifier_list); in t7xx_fsm_notifier_register()
57 spin_unlock_irqrestore(&ctl->notifier_lock, flags); in t7xx_fsm_notifier_register()
63 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_fsm_notifier_unregister() local
66 spin_lock_irqsave(&ctl->notifier_lock, flags); in t7xx_fsm_notifier_unregister()
67 list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) { in t7xx_fsm_notifier_unregister()
71 spin_unlock_irqrestore(&ctl->notifier_lock, flags); in t7xx_fsm_notifier_unregister()
76 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in fsm_state_notify() local
80 spin_lock_irqsave(&ctl->notifier_lock, flags); in fsm_state_notify()
81 list_for_each_entry(notifier, &ctl->notifier_list, entry) { in fsm_state_notify()
82 spin_unlock_irqrestore(&ctl->notifier_lock, flags); in fsm_state_notify()
86 spin_lock_irqsave(&ctl->notifier_lock, flags); in fsm_state_notify()
88 spin_unlock_irqrestore(&ctl->notifier_lock, flags); in fsm_state_notify()
91 void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state) in t7xx_fsm_broadcast_state() argument
93 ctl->md_state = state; in t7xx_fsm_broadcast_state()
96 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state); in t7xx_fsm_broadcast_state()
97 fsm_state_notify(ctl->md, state); in t7xx_fsm_broadcast_state()
100 static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result) in fsm_finish_command() argument
116 static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl) in fsm_flush_event_cmd_qs() argument
118 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; in fsm_flush_event_cmd_qs()
123 spin_lock_irqsave(&ctl->command_lock, flags); in fsm_flush_event_cmd_qs()
124 list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) { in fsm_flush_event_cmd_qs()
127 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_flush_event_cmd_qs()
129 spin_unlock_irqrestore(&ctl->command_lock, flags); in fsm_flush_event_cmd_qs()
131 spin_lock_irqsave(&ctl->event_lock, flags); in fsm_flush_event_cmd_qs()
132 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { in fsm_flush_event_cmd_qs()
136 spin_unlock_irqrestore(&ctl->event_lock, flags); in fsm_flush_event_cmd_qs()
139 static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected, in fsm_wait_for_event() argument
153 spin_lock_irqsave(&ctl->event_lock, flags); in fsm_wait_for_event()
154 event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry); in fsm_wait_for_event()
162 spin_unlock_irqrestore(&ctl->event_lock, flags); in fsm_wait_for_event()
169 static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, in fsm_routine_exception() argument
172 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; in fsm_routine_exception()
174 if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) { in fsm_routine_exception()
176 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_routine_exception()
181 ctl->curr_state = FSM_STATE_EXCEPTION; in fsm_routine_exception()
190 t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION); in fsm_routine_exception()
191 t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev); in fsm_routine_exception()
192 t7xx_md_exception_handshake(ctl->md); in fsm_routine_exception()
194 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX, in fsm_routine_exception()
196 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID, in fsm_routine_exception()
206 fsm_finish_command(ctl, cmd, 0); in fsm_routine_exception()
209 static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl) in fsm_stopped_handler() argument
211 ctl->curr_state = FSM_STATE_STOPPED; in fsm_stopped_handler()
213 t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED); in fsm_stopped_handler()
214 return t7xx_md_reset(ctl->md->t7xx_dev); in fsm_stopped_handler()
217 static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) in fsm_routine_stopped() argument
219 if (ctl->curr_state == FSM_STATE_STOPPED) { in fsm_routine_stopped()
220 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_routine_stopped()
224 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); in fsm_routine_stopped()
227 static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) in fsm_routine_stopping() argument
233 if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) { in fsm_routine_stopping()
234 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_routine_stopping()
238 md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD]; in fsm_routine_stopping()
239 t7xx_dev = ctl->md->t7xx_dev; in fsm_routine_stopping()
241 ctl->curr_state = FSM_STATE_STOPPING; in fsm_routine_stopping()
242 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP); in fsm_routine_stopping()
245 if (!ctl->md->rgu_irq_asserted) { in fsm_routine_stopping()
255 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); in fsm_routine_stopping()
258 static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl) in t7xx_fsm_broadcast_ready_state() argument
260 if (ctl->md_state != MD_STATE_WAITING_FOR_HS2) in t7xx_fsm_broadcast_ready_state()
263 ctl->md_state = MD_STATE_READY; in t7xx_fsm_broadcast_ready_state()
265 fsm_state_notify(ctl->md, MD_STATE_READY); in t7xx_fsm_broadcast_ready_state()
266 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY); in t7xx_fsm_broadcast_ready_state()
269 static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl) in fsm_routine_ready() argument
271 struct t7xx_modem *md = ctl->md; in fsm_routine_ready()
273 ctl->curr_state = FSM_STATE_READY; in fsm_routine_ready()
274 t7xx_fsm_broadcast_ready_state(ctl); in fsm_routine_ready()
278 static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) in fsm_routine_starting() argument
280 struct t7xx_modem *md = ctl->md; in fsm_routine_starting()
283 ctl->curr_state = FSM_STATE_STARTING; in fsm_routine_starting()
285 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); in fsm_routine_starting()
288 wait_event_interruptible_timeout(ctl->async_hk_wq, in fsm_routine_starting()
290 ctl->exp_flg, HZ * 60); in fsm_routine_starting()
293 if (ctl->exp_flg) in fsm_routine_starting()
299 t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); in fsm_routine_starting()
301 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); in fsm_routine_starting()
306 t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0); in fsm_routine_starting()
308 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); in fsm_routine_starting()
313 fsm_routine_ready(ctl); in fsm_routine_starting()
317 static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) in fsm_routine_start() argument
319 struct t7xx_modem *md = ctl->md; in fsm_routine_start()
326 if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START && in fsm_routine_start()
327 ctl->curr_state != FSM_STATE_STOPPED) { in fsm_routine_start()
328 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_routine_start()
332 ctl->curr_state = FSM_STATE_PRE_START; in fsm_routine_start()
341 fsm_finish_command(ctl, cmd, -ETIMEDOUT); in fsm_routine_start()
348 fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); in fsm_routine_start()
353 struct t7xx_fsm_ctl *ctl = data; in fsm_main_thread() local
358 if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) || in fsm_main_thread()
365 spin_lock_irqsave(&ctl->command_lock, flags); in fsm_main_thread()
366 cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry); in fsm_main_thread()
368 spin_unlock_irqrestore(&ctl->command_lock, flags); in fsm_main_thread()
372 fsm_routine_start(ctl, cmd); in fsm_main_thread()
376 fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag)); in fsm_main_thread()
380 fsm_routine_stopping(ctl, cmd); in fsm_main_thread()
384 fsm_routine_stopped(ctl, cmd); in fsm_main_thread()
388 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_main_thread()
389 fsm_flush_event_cmd_qs(ctl); in fsm_main_thread()
397 int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag) in t7xx_fsm_append_cmd() argument
416 spin_lock_irqsave(&ctl->command_lock, flags); in t7xx_fsm_append_cmd()
417 list_add_tail(&cmd->entry, &ctl->command_queue); in t7xx_fsm_append_cmd()
418 spin_unlock_irqrestore(&ctl->command_lock, flags); in t7xx_fsm_append_cmd()
420 wake_up(&ctl->command_wq); in t7xx_fsm_append_cmd()
436 int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, in t7xx_fsm_append_event() argument
439 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; in t7xx_fsm_append_event()
460 spin_lock_irqsave(&ctl->event_lock, flags); in t7xx_fsm_append_event()
461 list_add_tail(&event->entry, &ctl->event_queue); in t7xx_fsm_append_event()
462 spin_unlock_irqrestore(&ctl->event_lock, flags); in t7xx_fsm_append_event()
464 wake_up_all(&ctl->event_wq); in t7xx_fsm_append_event()
468 void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id) in t7xx_fsm_clr_event() argument
473 spin_lock_irqsave(&ctl->event_lock, flags); in t7xx_fsm_clr_event()
474 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { in t7xx_fsm_clr_event()
478 spin_unlock_irqrestore(&ctl->event_lock, flags); in t7xx_fsm_clr_event()
481 enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl) in t7xx_fsm_get_md_state() argument
483 if (ctl) in t7xx_fsm_get_md_state()
484 return ctl->md_state; in t7xx_fsm_get_md_state()
489 unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl) in t7xx_fsm_get_ctl_state() argument
491 if (ctl) in t7xx_fsm_get_ctl_state()
492 return ctl->curr_state; in t7xx_fsm_get_ctl_state()
497 int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type) in t7xx_fsm_recv_md_intr() argument
502 return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags); in t7xx_fsm_recv_md_intr()
504 ctl->exp_flg = true; in t7xx_fsm_recv_md_intr()
505 wake_up(&ctl->async_hk_wq); in t7xx_fsm_recv_md_intr()
507 return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags); in t7xx_fsm_recv_md_intr()
515 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_fsm_reset() local
517 fsm_flush_event_cmd_qs(ctl); in t7xx_fsm_reset()
518 ctl->curr_state = FSM_STATE_STOPPED; in t7xx_fsm_reset()
519 ctl->exp_flg = false; in t7xx_fsm_reset()
525 struct t7xx_fsm_ctl *ctl; in t7xx_fsm_init() local
527 ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL); in t7xx_fsm_init()
528 if (!ctl) in t7xx_fsm_init()
531 md->fsm_ctl = ctl; in t7xx_fsm_init()
532 ctl->md = md; in t7xx_fsm_init()
533 ctl->curr_state = FSM_STATE_INIT; in t7xx_fsm_init()
534 INIT_LIST_HEAD(&ctl->command_queue); in t7xx_fsm_init()
535 INIT_LIST_HEAD(&ctl->event_queue); in t7xx_fsm_init()
536 init_waitqueue_head(&ctl->async_hk_wq); in t7xx_fsm_init()
537 init_waitqueue_head(&ctl->event_wq); in t7xx_fsm_init()
538 INIT_LIST_HEAD(&ctl->notifier_list); in t7xx_fsm_init()
539 init_waitqueue_head(&ctl->command_wq); in t7xx_fsm_init()
540 spin_lock_init(&ctl->event_lock); in t7xx_fsm_init()
541 spin_lock_init(&ctl->command_lock); in t7xx_fsm_init()
542 ctl->exp_flg = false; in t7xx_fsm_init()
543 spin_lock_init(&ctl->notifier_lock); in t7xx_fsm_init()
545 ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm"); in t7xx_fsm_init()
546 return PTR_ERR_OR_ZERO(ctl->fsm_thread); in t7xx_fsm_init()
551 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_fsm_uninit() local
553 if (!ctl) in t7xx_fsm_uninit()
556 if (ctl->fsm_thread) in t7xx_fsm_uninit()
557 kthread_stop(ctl->fsm_thread); in t7xx_fsm_uninit()
559 fsm_flush_event_cmd_qs(ctl); in t7xx_fsm_uninit()