Lines Matching full:qca
196 * QCA Bluetooth chipset
211 * Platform data for the QCA Bluetooth power driver.
284 struct qca_data *qca = hu->priv; in serial_clock_vote() local
287 bool old_vote = (qca->tx_vote | qca->rx_vote); in serial_clock_vote()
292 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
295 qca->vote_off_ms += diff; in serial_clock_vote()
297 qca->vote_on_ms += diff; in serial_clock_vote()
301 qca->tx_vote = true; in serial_clock_vote()
302 qca->tx_votes_on++; in serial_clock_vote()
306 qca->rx_vote = true; in serial_clock_vote()
307 qca->rx_votes_on++; in serial_clock_vote()
311 qca->tx_vote = false; in serial_clock_vote()
312 qca->tx_votes_off++; in serial_clock_vote()
316 qca->rx_vote = false; in serial_clock_vote()
317 qca->rx_votes_off++; in serial_clock_vote()
325 new_vote = qca->rx_vote | qca->tx_vote; in serial_clock_vote()
336 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
339 qca->votes_on++; in serial_clock_vote()
340 qca->vote_off_ms += diff; in serial_clock_vote()
342 qca->votes_off++; in serial_clock_vote()
343 qca->vote_on_ms += diff; in serial_clock_vote()
345 qca->vote_last_jif = jiffies; in serial_clock_vote()
356 struct qca_data *qca = hu->priv; in send_hci_ibs_cmd() local
369 skb_queue_tail(&qca->txq, skb); in send_hci_ibs_cmd()
376 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_device() local
378 struct hci_uart *hu = qca->hu; in qca_wq_awake_device()
387 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
393 qca->ibs_sent_wakes++; in qca_wq_awake_device()
396 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in qca_wq_awake_device()
397 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in qca_wq_awake_device()
399 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
407 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_rx() local
409 struct hci_uart *hu = qca->hu; in qca_wq_awake_rx()
416 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
417 qca->rx_ibs_state = HCI_IBS_RX_AWAKE; in qca_wq_awake_rx()
425 qca->ibs_sent_wacks++; in qca_wq_awake_rx()
427 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
435 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_rx_clock_vote_off() local
437 struct hci_uart *hu = qca->hu; in qca_wq_serial_rx_clock_vote_off()
446 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_tx_clock_vote_off() local
448 struct hci_uart *hu = qca->hu; in qca_wq_serial_tx_clock_vote_off()
463 struct qca_data *qca = from_timer(qca, t, tx_idle_timer); in hci_ibs_tx_idle_timeout() local
464 struct hci_uart *hu = qca->hu; in hci_ibs_tx_idle_timeout()
467 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
469 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_tx_idle_timeout()
472 switch (qca->tx_ibs_state) { in hci_ibs_tx_idle_timeout()
479 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in hci_ibs_tx_idle_timeout()
480 qca->ibs_sent_slps++; in hci_ibs_tx_idle_timeout()
481 queue_work(qca->workqueue, &qca->ws_tx_vote_off); in hci_ibs_tx_idle_timeout()
487 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
491 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_tx_idle_timeout()
496 struct qca_data *qca = from_timer(qca, t, wake_retrans_timer); in hci_ibs_wake_retrans_timeout() local
497 struct hci_uart *hu = qca->hu; in hci_ibs_wake_retrans_timeout()
502 hu, qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
504 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_wake_retrans_timeout()
508 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in hci_ibs_wake_retrans_timeout()
509 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
513 switch (qca->tx_ibs_state) { in hci_ibs_wake_retrans_timeout()
521 qca->ibs_sent_wakes++; in hci_ibs_wake_retrans_timeout()
522 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in hci_ibs_wake_retrans_timeout()
523 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in hci_ibs_wake_retrans_timeout()
529 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
533 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
542 struct qca_data *qca = container_of(work, struct qca_data, in qca_controller_memdump_timeout() local
544 struct hci_uart *hu = qca->hu; in qca_controller_memdump_timeout()
546 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
547 if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { in qca_controller_memdump_timeout()
548 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_controller_memdump_timeout()
549 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_controller_memdump_timeout()
557 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
565 struct qca_data *qca; in qca_open() local
572 qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); in qca_open()
573 if (!qca) in qca_open()
576 skb_queue_head_init(&qca->txq); in qca_open()
577 skb_queue_head_init(&qca->tx_wait_q); in qca_open()
578 skb_queue_head_init(&qca->rx_memdump_q); in qca_open()
579 spin_lock_init(&qca->hci_ibs_lock); in qca_open()
580 mutex_init(&qca->hci_memdump_lock); in qca_open()
581 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0); in qca_open()
582 if (!qca->workqueue) { in qca_open()
583 BT_ERR("QCA Workqueue not initialized properly"); in qca_open()
584 kfree(qca); in qca_open()
588 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); in qca_open()
589 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); in qca_open()
590 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); in qca_open()
591 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); in qca_open()
592 INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump); in qca_open()
593 INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout, in qca_open()
595 init_waitqueue_head(&qca->suspend_wait_q); in qca_open()
597 qca->hu = hu; in qca_open()
598 init_completion(&qca->drop_ev_comp); in qca_open()
601 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_open()
602 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in qca_open()
604 qca->vote_last_jif = jiffies; in qca_open()
606 hu->priv = qca; in qca_open()
628 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); in qca_open()
629 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; in qca_open()
631 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); in qca_open()
632 qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS; in qca_open()
635 qca->tx_idle_delay, qca->wake_retrans); in qca_open()
643 struct qca_data *qca = hu->priv; in qca_debugfs_init() local
650 if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags)) in qca_debugfs_init()
657 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); in qca_debugfs_init()
658 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); in qca_debugfs_init()
660 &qca->ibs_sent_slps); in qca_debugfs_init()
662 &qca->ibs_sent_wakes); in qca_debugfs_init()
664 &qca->ibs_sent_wacks); in qca_debugfs_init()
666 &qca->ibs_recv_slps); in qca_debugfs_init()
668 &qca->ibs_recv_wakes); in qca_debugfs_init()
670 &qca->ibs_recv_wacks); in qca_debugfs_init()
671 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); in qca_debugfs_init()
672 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); in qca_debugfs_init()
673 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); in qca_debugfs_init()
674 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); in qca_debugfs_init()
675 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); in qca_debugfs_init()
676 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); in qca_debugfs_init()
677 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); in qca_debugfs_init()
678 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); in qca_debugfs_init()
679 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); in qca_debugfs_init()
680 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); in qca_debugfs_init()
684 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); in qca_debugfs_init()
686 &qca->tx_idle_delay); in qca_debugfs_init()
692 struct qca_data *qca = hu->priv; in qca_flush() local
694 BT_DBG("hu %p qca flush", hu); in qca_flush()
696 skb_queue_purge(&qca->tx_wait_q); in qca_flush()
697 skb_queue_purge(&qca->txq); in qca_flush()
705 struct qca_data *qca = hu->priv; in qca_close() local
707 BT_DBG("hu %p qca close", hu); in qca_close()
711 skb_queue_purge(&qca->tx_wait_q); in qca_close()
712 skb_queue_purge(&qca->txq); in qca_close()
713 skb_queue_purge(&qca->rx_memdump_q); in qca_close()
720 timer_shutdown_sync(&qca->tx_idle_timer); in qca_close()
721 timer_shutdown_sync(&qca->wake_retrans_timer); in qca_close()
722 destroy_workqueue(qca->workqueue); in qca_close()
723 qca->hu = NULL; in qca_close()
725 kfree_skb(qca->rx_skb); in qca_close()
729 kfree(qca); in qca_close()
739 struct qca_data *qca = hu->priv; in device_want_to_wakeup() local
743 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
745 qca->ibs_recv_wakes++; in device_want_to_wakeup()
748 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_want_to_wakeup()
749 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
753 switch (qca->rx_ibs_state) { in device_want_to_wakeup()
758 queue_work(qca->workqueue, &qca->ws_awake_rx); in device_want_to_wakeup()
759 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
770 qca->ibs_sent_wacks++; in device_want_to_wakeup()
776 qca->rx_ibs_state); in device_want_to_wakeup()
780 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
791 struct qca_data *qca = hu->priv; in device_want_to_sleep() local
793 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state); in device_want_to_sleep()
795 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
797 qca->ibs_recv_slps++; in device_want_to_sleep()
799 switch (qca->rx_ibs_state) { in device_want_to_sleep()
802 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in device_want_to_sleep()
804 queue_work(qca->workqueue, &qca->ws_rx_vote_off); in device_want_to_sleep()
813 qca->rx_ibs_state); in device_want_to_sleep()
817 wake_up_interruptible(&qca->suspend_wait_q); in device_want_to_sleep()
819 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
827 struct qca_data *qca = hu->priv; in device_woke_up() local
832 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_woke_up()
834 qca->ibs_recv_wacks++; in device_woke_up()
837 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_woke_up()
838 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
842 switch (qca->tx_ibs_state) { in device_woke_up()
846 qca->tx_ibs_state); in device_woke_up()
851 while ((skb = skb_dequeue(&qca->tx_wait_q))) in device_woke_up()
852 skb_queue_tail(&qca->txq, skb); in device_woke_up()
855 del_timer(&qca->wake_retrans_timer); in device_woke_up()
856 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in device_woke_up()
857 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in device_woke_up()
858 qca->tx_ibs_state = HCI_IBS_TX_AWAKE; in device_woke_up()
864 qca->tx_ibs_state); in device_woke_up()
868 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
880 struct qca_data *qca = hu->priv; in qca_enqueue() local
882 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb, in qca_enqueue()
883 qca->tx_ibs_state); in qca_enqueue()
885 if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { in qca_enqueue()
895 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_enqueue()
901 if (test_bit(QCA_IBS_DISABLED, &qca->flags) || in qca_enqueue()
902 test_bit(QCA_SUSPENDING, &qca->flags)) { in qca_enqueue()
903 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
904 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
909 switch (qca->tx_ibs_state) { in qca_enqueue()
912 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
913 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in qca_enqueue()
914 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in qca_enqueue()
920 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
922 qca->tx_ibs_state = HCI_IBS_TX_WAKING; in qca_enqueue()
924 queue_work(qca->workqueue, &qca->ws_awake_device); in qca_enqueue()
930 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
935 qca->tx_ibs_state); in qca_enqueue()
940 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
997 struct qca_data *qca = hu->priv; in qca_dmp_hdr() local
1001 qca->controller_id); in qca_dmp_hdr()
1005 qca->fw_version); in qca_dmp_hdr()
1018 struct qca_data *qca = container_of(work, struct qca_data, in qca_controller_memdump() local
1020 struct hci_uart *hu = qca->hu; in qca_controller_memdump()
1023 struct qca_memdump_info *qca_memdump = qca->qca_memdump; in qca_controller_memdump()
1030 while ((skb = skb_dequeue(&qca->rx_memdump_q))) { in qca_controller_memdump()
1032 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump()
1036 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_controller_memdump()
1037 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_controller_memdump()
1038 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1046 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1050 qca->qca_memdump = qca_memdump; in qca_controller_memdump()
1053 qca->memdump_state = QCA_MEMDUMP_COLLECTING; in qca_controller_memdump()
1066 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_controller_memdump()
1067 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1074 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1078 queue_delayed_work(qca->workqueue, in qca_controller_memdump()
1079 &qca->ctrl_memdump_timeout, in qca_controller_memdump()
1088 kfree(qca->qca_memdump); in qca_controller_memdump()
1089 qca->qca_memdump = NULL; in qca_controller_memdump()
1090 qca->memdump_state = QCA_MEMDUMP_COLLECTED; in qca_controller_memdump()
1091 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_controller_memdump()
1092 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1093 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1097 bt_dev_info(hu->hdev, "QCA collecting dump of size:%u", in qca_controller_memdump()
1105 if (!test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { in qca_controller_memdump()
1106 bt_dev_err(hu->hdev, "QCA: Discarding other packets"); in qca_controller_memdump()
1109 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1123 bt_dev_err(hu->hdev, "QCA controller missed packet:%d", in qca_controller_memdump()
1129 "QCA memdump received %d, no space for missed packet", in qca_controller_memdump()
1144 "QCA memdump unexpected packet %d", in qca_controller_memdump()
1148 "QCA memdump packet %d with length %d", in qca_controller_memdump()
1155 "QCA memdump received no space for packet %d", in qca_controller_memdump()
1161 "QCA memdump Done, received %d, total %d", in qca_controller_memdump()
1165 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_controller_memdump()
1166 kfree(qca->qca_memdump); in qca_controller_memdump()
1167 qca->qca_memdump = NULL; in qca_controller_memdump()
1168 qca->memdump_state = QCA_MEMDUMP_COLLECTED; in qca_controller_memdump()
1169 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1172 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1181 struct qca_data *qca = hu->priv; in qca_controller_memdump_event() local
1183 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_controller_memdump_event()
1184 skb_queue_tail(&qca->rx_memdump_q, skb); in qca_controller_memdump_event()
1185 queue_work(qca->workqueue, &qca->ctrl_memdump_evt); in qca_controller_memdump_event()
1193 struct qca_data *qca = hu->priv; in qca_recv_event() local
1195 if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) { in qca_recv_event()
1209 complete(&qca->drop_ev_comp); in qca_recv_event()
1259 struct qca_data *qca = hu->priv; in qca_recv() local
1264 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, in qca_recv()
1266 if (IS_ERR(qca->rx_skb)) { in qca_recv()
1267 int err = PTR_ERR(qca->rx_skb); in qca_recv()
1269 qca->rx_skb = NULL; in qca_recv()
1278 struct qca_data *qca = hu->priv; in qca_dequeue() local
1280 return skb_dequeue(&qca->txq); in qca_dequeue()
1322 struct qca_data *qca = hu->priv; in qca_set_baudrate() local
1341 skb_queue_tail(&qca->txq, skb); in qca_set_baudrate()
1346 while (!skb_queue_empty(&qca->txq)) in qca_set_baudrate()
1465 struct qca_data *qca = hu->priv; in qca_set_speed() local
1499 reinit_completion(&qca->drop_ev_comp); in qca_set_speed()
1500 set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1536 if (!wait_for_completion_timeout(&qca->drop_ev_comp, in qca_set_speed()
1543 clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1556 struct qca_data *qca = hu->priv; in qca_send_crashbuffer() local
1573 skb_queue_tail(&qca->txq, skb); in qca_send_crashbuffer()
1582 struct qca_data *qca = hu->priv; in qca_wait_for_dump_collection() local
1584 wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION, in qca_wait_for_dump_collection()
1587 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_wait_for_dump_collection()
1593 struct qca_data *qca = hu->priv; in qca_hw_error() local
1595 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_hw_error()
1596 set_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1597 bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state); in qca_hw_error()
1599 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_hw_error()
1600 /* If hardware error event received for other than QCA in qca_hw_error()
1606 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_hw_error()
1609 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_hw_error()
1617 mutex_lock(&qca->hci_memdump_lock); in qca_hw_error()
1618 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1621 if (qca->qca_memdump) { in qca_hw_error()
1622 kfree(qca->qca_memdump); in qca_hw_error()
1623 qca->qca_memdump = NULL; in qca_hw_error()
1625 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_hw_error()
1626 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_hw_error()
1628 mutex_unlock(&qca->hci_memdump_lock); in qca_hw_error()
1630 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_hw_error()
1631 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1632 cancel_work_sync(&qca->ctrl_memdump_evt); in qca_hw_error()
1633 skb_queue_purge(&qca->rx_memdump_q); in qca_hw_error()
1636 clear_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1642 struct qca_data *qca = hu->priv; in qca_cmd_timeout() local
1644 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_cmd_timeout()
1645 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_cmd_timeout()
1646 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_cmd_timeout()
1649 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_cmd_timeout()
1657 mutex_lock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1658 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_cmd_timeout()
1659 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_cmd_timeout()
1660 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_cmd_timeout()
1667 mutex_unlock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1775 struct qca_data *qca = hu->priv; in qca_power_on() local
1804 clear_bit(QCA_BT_OFF, &qca->flags); in qca_power_on()
1820 /* QCA uses 1 as non-HCI data path id for HFP */ in qca_get_data_path_id()
1830 * data transport path for QCA controllers, so set below field as NULL. in qca_configure_hfp_offload()
1839 struct qca_data *qca = hu->priv; in qca_setup() local
1852 clear_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1854 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_setup()
1890 qca->memdump_state = QCA_MEMDUMP_IDLE; in qca_setup()
1897 clear_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_setup()
1950 /* Get QCA version information */ in qca_setup()
1960 clear_bit(QCA_IBS_DISABLED, &qca->flags); in qca_setup()
1968 set_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1975 set_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
2004 qca->fw_version = le16_to_cpu(ver.patch_ver); in qca_setup()
2005 qca->controller_id = le16_to_cpu(ver.rom_ver); in qca_setup()
2013 .name = "QCA",
2130 struct qca_data *qca = hu->priv; in qca_power_shutdown() local
2139 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
2140 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_power_shutdown()
2142 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
2177 set_bit(QCA_BT_OFF, &qca->flags); in qca_power_shutdown()
2183 struct qca_data *qca = hu->priv; in qca_power_off() local
2189 del_timer_sync(&qca->wake_retrans_timer); in qca_power_off()
2190 del_timer_sync(&qca->tx_idle_timer); in qca_power_off()
2194 && qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_power_off()
2246 static int qca_init_regulators(struct qca_power *qca, in qca_init_regulators() argument
2253 bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL); in qca_init_regulators()
2260 ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk); in qca_init_regulators()
2270 qca->vreg_bulk = bulk; in qca_init_regulators()
2271 qca->num_vregs = num_vregs; in qca_init_regulators()
2443 struct qca_data *qca = hu->priv; in qca_serdev_shutdown() local
2448 if (test_bit(QCA_BT_OFF, &qca->flags) || in qca_serdev_shutdown()
2456 BT_ERR("QCA send IBS_WAKE_IND error: %d", ret); in qca_serdev_shutdown()
2466 BT_ERR("QCA send EDL_RESET_REQ error: %d", ret); in qca_serdev_shutdown()
2479 struct qca_data *qca = hu->priv; in qca_suspend() local
2486 set_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2491 if (test_bit(QCA_ROM_FW, &qca->flags)) in qca_suspend()
2498 if (test_bit(QCA_BT_OFF, &qca->flags) && in qca_suspend()
2499 !test_bit(QCA_SSR_TRIGGERED, &qca->flags)) in qca_suspend()
2502 if (test_bit(QCA_IBS_DISABLED, &qca->flags) || in qca_suspend()
2503 test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { in qca_suspend()
2504 wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ? in qca_suspend()
2512 wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED, in qca_suspend()
2515 if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { in qca_suspend()
2522 cancel_work_sync(&qca->ws_awake_device); in qca_suspend()
2523 cancel_work_sync(&qca->ws_awake_rx); in qca_suspend()
2525 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in qca_suspend()
2528 switch (qca->tx_ibs_state) { in qca_suspend()
2530 del_timer(&qca->wake_retrans_timer); in qca_suspend()
2533 del_timer(&qca->tx_idle_timer); in qca_suspend()
2544 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_suspend()
2545 qca->ibs_sent_slps++; in qca_suspend()
2553 BT_ERR("Spurious tx state %d", qca->tx_ibs_state); in qca_suspend()
2558 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_suspend()
2572 ret = wait_event_interruptible_timeout(qca->suspend_wait_q, in qca_suspend()
2573 qca->rx_ibs_state == HCI_IBS_RX_ASLEEP, in qca_suspend()
2583 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2593 struct qca_data *qca = hu->priv; in qca_resume() local
2595 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_resume()