Lines Matching refs:qca

286 	struct qca_data *qca = hu->priv;  in serial_clock_vote()  local
289 bool old_vote = (qca->tx_vote | qca->rx_vote); in serial_clock_vote()
294 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
297 qca->vote_off_ms += diff; in serial_clock_vote()
299 qca->vote_on_ms += diff; in serial_clock_vote()
303 qca->tx_vote = true; in serial_clock_vote()
304 qca->tx_votes_on++; in serial_clock_vote()
308 qca->rx_vote = true; in serial_clock_vote()
309 qca->rx_votes_on++; in serial_clock_vote()
313 qca->tx_vote = false; in serial_clock_vote()
314 qca->tx_votes_off++; in serial_clock_vote()
318 qca->rx_vote = false; in serial_clock_vote()
319 qca->rx_votes_off++; in serial_clock_vote()
327 new_vote = qca->rx_vote | qca->tx_vote; in serial_clock_vote()
338 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
341 qca->votes_on++; in serial_clock_vote()
342 qca->vote_off_ms += diff; in serial_clock_vote()
344 qca->votes_off++; in serial_clock_vote()
345 qca->vote_on_ms += diff; in serial_clock_vote()
347 qca->vote_last_jif = jiffies; in serial_clock_vote()
358 struct qca_data *qca = hu->priv; in send_hci_ibs_cmd() local
371 skb_queue_tail(&qca->txq, skb); in send_hci_ibs_cmd()
378 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_device() local
380 struct hci_uart *hu = qca->hu; in qca_wq_awake_device()
389 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
395 qca->ibs_sent_wakes++; in qca_wq_awake_device()
398 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in qca_wq_awake_device()
399 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in qca_wq_awake_device()
401 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
409 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_rx() local
411 struct hci_uart *hu = qca->hu; in qca_wq_awake_rx()
418 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
419 qca->rx_ibs_state = HCI_IBS_RX_AWAKE; in qca_wq_awake_rx()
427 qca->ibs_sent_wacks++; in qca_wq_awake_rx()
429 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
437 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_rx_clock_vote_off() local
439 struct hci_uart *hu = qca->hu; in qca_wq_serial_rx_clock_vote_off()
448 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_tx_clock_vote_off() local
450 struct hci_uart *hu = qca->hu; in qca_wq_serial_tx_clock_vote_off()
465 struct qca_data *qca = from_timer(qca, t, tx_idle_timer); in hci_ibs_tx_idle_timeout() local
466 struct hci_uart *hu = qca->hu; in hci_ibs_tx_idle_timeout()
469 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
471 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_tx_idle_timeout()
474 switch (qca->tx_ibs_state) { in hci_ibs_tx_idle_timeout()
481 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in hci_ibs_tx_idle_timeout()
482 qca->ibs_sent_slps++; in hci_ibs_tx_idle_timeout()
483 queue_work(qca->workqueue, &qca->ws_tx_vote_off); in hci_ibs_tx_idle_timeout()
489 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
493 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_tx_idle_timeout()
498 struct qca_data *qca = from_timer(qca, t, wake_retrans_timer); in hci_ibs_wake_retrans_timeout() local
499 struct hci_uart *hu = qca->hu; in hci_ibs_wake_retrans_timeout()
504 hu, qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
506 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_wake_retrans_timeout()
510 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in hci_ibs_wake_retrans_timeout()
511 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
515 switch (qca->tx_ibs_state) { in hci_ibs_wake_retrans_timeout()
523 qca->ibs_sent_wakes++; in hci_ibs_wake_retrans_timeout()
524 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in hci_ibs_wake_retrans_timeout()
525 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in hci_ibs_wake_retrans_timeout()
531 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
535 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
544 struct qca_data *qca = container_of(work, struct qca_data, in qca_controller_memdump_timeout() local
546 struct hci_uart *hu = qca->hu; in qca_controller_memdump_timeout()
548 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
549 if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { in qca_controller_memdump_timeout()
550 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_controller_memdump_timeout()
551 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_controller_memdump_timeout()
559 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
567 struct qca_data *qca; in qca_open() local
574 qca = kzalloc(sizeof(*qca), GFP_KERNEL); in qca_open()
575 if (!qca) in qca_open()
578 skb_queue_head_init(&qca->txq); in qca_open()
579 skb_queue_head_init(&qca->tx_wait_q); in qca_open()
580 skb_queue_head_init(&qca->rx_memdump_q); in qca_open()
581 spin_lock_init(&qca->hci_ibs_lock); in qca_open()
582 mutex_init(&qca->hci_memdump_lock); in qca_open()
583 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0); in qca_open()
584 if (!qca->workqueue) { in qca_open()
586 kfree(qca); in qca_open()
590 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); in qca_open()
591 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); in qca_open()
592 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); in qca_open()
593 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); in qca_open()
594 INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump); in qca_open()
595 INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout, in qca_open()
597 init_waitqueue_head(&qca->suspend_wait_q); in qca_open()
599 qca->hu = hu; in qca_open()
600 init_completion(&qca->drop_ev_comp); in qca_open()
603 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_open()
604 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in qca_open()
606 qca->vote_last_jif = jiffies; in qca_open()
608 hu->priv = qca; in qca_open()
630 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); in qca_open()
631 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; in qca_open()
633 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); in qca_open()
634 qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS; in qca_open()
637 qca->tx_idle_delay, qca->wake_retrans); in qca_open()
645 struct qca_data *qca = hu->priv; in qca_debugfs_init() local
652 if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags)) in qca_debugfs_init()
659 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); in qca_debugfs_init()
660 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); in qca_debugfs_init()
662 &qca->ibs_sent_slps); in qca_debugfs_init()
664 &qca->ibs_sent_wakes); in qca_debugfs_init()
666 &qca->ibs_sent_wacks); in qca_debugfs_init()
668 &qca->ibs_recv_slps); in qca_debugfs_init()
670 &qca->ibs_recv_wakes); in qca_debugfs_init()
672 &qca->ibs_recv_wacks); in qca_debugfs_init()
673 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); in qca_debugfs_init()
674 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); in qca_debugfs_init()
675 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); in qca_debugfs_init()
676 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); in qca_debugfs_init()
677 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); in qca_debugfs_init()
678 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); in qca_debugfs_init()
679 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); in qca_debugfs_init()
680 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); in qca_debugfs_init()
681 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); in qca_debugfs_init()
682 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); in qca_debugfs_init()
686 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); in qca_debugfs_init()
688 &qca->tx_idle_delay); in qca_debugfs_init()
694 struct qca_data *qca = hu->priv; in qca_flush() local
698 skb_queue_purge(&qca->tx_wait_q); in qca_flush()
699 skb_queue_purge(&qca->txq); in qca_flush()
707 struct qca_data *qca = hu->priv; in qca_close() local
713 skb_queue_purge(&qca->tx_wait_q); in qca_close()
714 skb_queue_purge(&qca->txq); in qca_close()
715 skb_queue_purge(&qca->rx_memdump_q); in qca_close()
722 timer_shutdown_sync(&qca->tx_idle_timer); in qca_close()
723 timer_shutdown_sync(&qca->wake_retrans_timer); in qca_close()
724 destroy_workqueue(qca->workqueue); in qca_close()
725 qca->hu = NULL; in qca_close()
727 kfree_skb(qca->rx_skb); in qca_close()
731 kfree(qca); in qca_close()
741 struct qca_data *qca = hu->priv; in device_want_to_wakeup() local
745 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
747 qca->ibs_recv_wakes++; in device_want_to_wakeup()
750 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_want_to_wakeup()
751 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
755 switch (qca->rx_ibs_state) { in device_want_to_wakeup()
760 queue_work(qca->workqueue, &qca->ws_awake_rx); in device_want_to_wakeup()
761 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
772 qca->ibs_sent_wacks++; in device_want_to_wakeup()
778 qca->rx_ibs_state); in device_want_to_wakeup()
782 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
793 struct qca_data *qca = hu->priv; in device_want_to_sleep() local
795 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state); in device_want_to_sleep()
797 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
799 qca->ibs_recv_slps++; in device_want_to_sleep()
801 switch (qca->rx_ibs_state) { in device_want_to_sleep()
804 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in device_want_to_sleep()
806 queue_work(qca->workqueue, &qca->ws_rx_vote_off); in device_want_to_sleep()
815 qca->rx_ibs_state); in device_want_to_sleep()
819 wake_up_interruptible(&qca->suspend_wait_q); in device_want_to_sleep()
821 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
829 struct qca_data *qca = hu->priv; in device_woke_up() local
834 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_woke_up()
836 qca->ibs_recv_wacks++; in device_woke_up()
839 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_woke_up()
840 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
844 switch (qca->tx_ibs_state) { in device_woke_up()
848 qca->tx_ibs_state); in device_woke_up()
853 while ((skb = skb_dequeue(&qca->tx_wait_q))) in device_woke_up()
854 skb_queue_tail(&qca->txq, skb); in device_woke_up()
857 del_timer(&qca->wake_retrans_timer); in device_woke_up()
858 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in device_woke_up()
859 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in device_woke_up()
860 qca->tx_ibs_state = HCI_IBS_TX_AWAKE; in device_woke_up()
866 qca->tx_ibs_state); in device_woke_up()
870 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
882 struct qca_data *qca = hu->priv; in qca_enqueue() local
885 qca->tx_ibs_state); in qca_enqueue()
887 if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { in qca_enqueue()
897 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_enqueue()
903 if (test_bit(QCA_IBS_DISABLED, &qca->flags) || in qca_enqueue()
904 test_bit(QCA_SUSPENDING, &qca->flags)) { in qca_enqueue()
905 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
906 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
911 switch (qca->tx_ibs_state) { in qca_enqueue()
914 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
915 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in qca_enqueue()
916 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in qca_enqueue()
922 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
924 qca->tx_ibs_state = HCI_IBS_TX_WAKING; in qca_enqueue()
926 queue_work(qca->workqueue, &qca->ws_awake_device); in qca_enqueue()
932 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
937 qca->tx_ibs_state); in qca_enqueue()
942 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
999 struct qca_data *qca = hu->priv; in qca_dmp_hdr() local
1003 qca->controller_id); in qca_dmp_hdr()
1007 qca->fw_version); in qca_dmp_hdr()
1020 struct qca_data *qca = container_of(work, struct qca_data, in qca_controller_memdump() local
1022 struct hci_uart *hu = qca->hu; in qca_controller_memdump()
1025 struct qca_memdump_info *qca_memdump = qca->qca_memdump; in qca_controller_memdump()
1032 while ((skb = skb_dequeue(&qca->rx_memdump_q))) { in qca_controller_memdump()
1034 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump()
1038 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_controller_memdump()
1039 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_controller_memdump()
1040 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1047 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1051 qca->qca_memdump = qca_memdump; in qca_controller_memdump()
1054 qca->memdump_state = QCA_MEMDUMP_COLLECTING; in qca_controller_memdump()
1067 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_controller_memdump()
1068 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1075 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1079 queue_delayed_work(qca->workqueue, in qca_controller_memdump()
1080 &qca->ctrl_memdump_timeout, in qca_controller_memdump()
1089 kfree(qca->qca_memdump); in qca_controller_memdump()
1090 qca->qca_memdump = NULL; in qca_controller_memdump()
1091 qca->memdump_state = QCA_MEMDUMP_COLLECTED; in qca_controller_memdump()
1092 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_controller_memdump()
1093 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1094 clear_bit(QCA_IBS_DISABLED, &qca->flags); in qca_controller_memdump()
1095 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1107 if (!test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { in qca_controller_memdump()
1111 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1167 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_controller_memdump()
1168 kfree(qca->qca_memdump); in qca_controller_memdump()
1169 qca->qca_memdump = NULL; in qca_controller_memdump()
1170 qca->memdump_state = QCA_MEMDUMP_COLLECTED; in qca_controller_memdump()
1171 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1174 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1183 struct qca_data *qca = hu->priv; in qca_controller_memdump_event() local
1185 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_controller_memdump_event()
1186 skb_queue_tail(&qca->rx_memdump_q, skb); in qca_controller_memdump_event()
1187 queue_work(qca->workqueue, &qca->ctrl_memdump_evt); in qca_controller_memdump_event()
1195 struct qca_data *qca = hu->priv; in qca_recv_event() local
1197 if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) { in qca_recv_event()
1211 complete(&qca->drop_ev_comp); in qca_recv_event()
1261 struct qca_data *qca = hu->priv; in qca_recv() local
1266 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, in qca_recv()
1268 if (IS_ERR(qca->rx_skb)) { in qca_recv()
1269 int err = PTR_ERR(qca->rx_skb); in qca_recv()
1271 qca->rx_skb = NULL; in qca_recv()
1280 struct qca_data *qca = hu->priv; in qca_dequeue() local
1282 return skb_dequeue(&qca->txq); in qca_dequeue()
1324 struct qca_data *qca = hu->priv; in qca_set_baudrate() local
1343 skb_queue_tail(&qca->txq, skb); in qca_set_baudrate()
1348 while (!skb_queue_empty(&qca->txq)) in qca_set_baudrate()
1467 struct qca_data *qca = hu->priv; in qca_set_speed() local
1501 reinit_completion(&qca->drop_ev_comp); in qca_set_speed()
1502 set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1538 if (!wait_for_completion_timeout(&qca->drop_ev_comp, in qca_set_speed()
1545 clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1558 struct qca_data *qca = hu->priv; in qca_send_crashbuffer() local
1575 skb_queue_tail(&qca->txq, skb); in qca_send_crashbuffer()
1584 struct qca_data *qca = hu->priv; in qca_wait_for_dump_collection() local
1586 wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION, in qca_wait_for_dump_collection()
1589 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_wait_for_dump_collection()
1595 struct qca_data *qca = hu->priv; in qca_hw_error() local
1597 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_hw_error()
1598 set_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1599 bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state); in qca_hw_error()
1601 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_hw_error()
1608 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_hw_error()
1611 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_hw_error()
1619 mutex_lock(&qca->hci_memdump_lock); in qca_hw_error()
1620 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1623 if (qca->qca_memdump) { in qca_hw_error()
1624 kfree(qca->qca_memdump); in qca_hw_error()
1625 qca->qca_memdump = NULL; in qca_hw_error()
1627 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_hw_error()
1628 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_hw_error()
1630 mutex_unlock(&qca->hci_memdump_lock); in qca_hw_error()
1632 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_hw_error()
1633 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1634 cancel_work_sync(&qca->ctrl_memdump_evt); in qca_hw_error()
1635 skb_queue_purge(&qca->rx_memdump_q); in qca_hw_error()
1638 clear_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1644 struct qca_data *qca = hu->priv; in qca_cmd_timeout() local
1646 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_cmd_timeout()
1647 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_cmd_timeout()
1648 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_cmd_timeout()
1651 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_cmd_timeout()
1659 mutex_lock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1660 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_cmd_timeout()
1661 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_cmd_timeout()
1662 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_cmd_timeout()
1669 mutex_unlock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1788 struct qca_data *qca = hu->priv; in qca_power_on() local
1818 clear_bit(QCA_BT_OFF, &qca->flags); in qca_power_on()
1853 struct qca_data *qca = hu->priv; in qca_setup() local
1867 clear_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1869 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_setup()
1905 qca->memdump_state = QCA_MEMDUMP_IDLE; in qca_setup()
1912 clear_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_setup()
1968 clear_bit(QCA_IBS_DISABLED, &qca->flags); in qca_setup()
1978 set_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1985 set_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
2014 qca->fw_version = le16_to_cpu(ver.patch_ver); in qca_setup()
2015 qca->controller_id = le16_to_cpu(ver.rom_ver); in qca_setup()
2140 struct qca_data *qca = hu->priv; in qca_power_shutdown() local
2150 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
2151 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_power_shutdown()
2153 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
2166 set_bit(QCA_BT_OFF, &qca->flags); in qca_power_shutdown()
2195 set_bit(QCA_BT_OFF, &qca->flags); in qca_power_shutdown()
2201 struct qca_data *qca = hu->priv; in qca_power_off() local
2207 del_timer_sync(&qca->wake_retrans_timer); in qca_power_off()
2208 del_timer_sync(&qca->tx_idle_timer); in qca_power_off()
2212 && qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_power_off()
2267 static int qca_init_regulators(struct qca_power *qca, in qca_init_regulators() argument
2274 bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL); in qca_init_regulators()
2281 ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk); in qca_init_regulators()
2291 qca->vreg_bulk = bulk; in qca_init_regulators()
2292 qca->num_vregs = num_vregs; in qca_init_regulators()
2564 struct qca_data *qca = hu->priv; in qca_suspend() local
2571 set_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2576 if (test_bit(QCA_ROM_FW, &qca->flags)) in qca_suspend()
2583 if (test_bit(QCA_BT_OFF, &qca->flags) && in qca_suspend()
2584 !test_bit(QCA_SSR_TRIGGERED, &qca->flags)) in qca_suspend()
2587 if (test_bit(QCA_IBS_DISABLED, &qca->flags) || in qca_suspend()
2588 test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { in qca_suspend()
2589 wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ? in qca_suspend()
2597 wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED, in qca_suspend()
2600 if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { in qca_suspend()
2607 cancel_work_sync(&qca->ws_awake_device); in qca_suspend()
2608 cancel_work_sync(&qca->ws_awake_rx); in qca_suspend()
2610 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in qca_suspend()
2613 switch (qca->tx_ibs_state) { in qca_suspend()
2615 del_timer(&qca->wake_retrans_timer); in qca_suspend()
2618 del_timer(&qca->tx_idle_timer); in qca_suspend()
2629 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_suspend()
2630 qca->ibs_sent_slps++; in qca_suspend()
2638 BT_ERR("Spurious tx state %d", qca->tx_ibs_state); in qca_suspend()
2643 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_suspend()
2657 ret = wait_event_interruptible_timeout(qca->suspend_wait_q, in qca_suspend()
2658 qca->rx_ibs_state == HCI_IBS_RX_ASLEEP, in qca_suspend()
2668 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2678 struct qca_data *qca = hu->priv; in qca_resume() local
2680 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_resume()