Lines Matching full:dev

16 	struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet);  in mt76x02_pre_tbtt_tasklet()  local
17 struct mt76_dev *mdev = &dev->mt76; in mt76x02_pre_tbtt_tasklet()
18 struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD]; in mt76x02_pre_tbtt_tasklet()
20 .dev = dev, in mt76x02_pre_tbtt_tasklet()
25 if (dev->mphy.offchannel) in mt76x02_pre_tbtt_tasklet()
30 mt76x02_resync_beacon_timer(dev); in mt76x02_pre_tbtt_tasklet()
33 mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff); in mt76x02_pre_tbtt_tasklet()
34 dev->beacon_data_count = 0; in mt76x02_pre_tbtt_tasklet()
36 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), in mt76x02_pre_tbtt_tasklet()
41 mt76x02_mac_set_beacon(dev, skb); in mt76x02_pre_tbtt_tasklet()
43 mt76_wr(dev, MT_BCN_BYPASS_MASK, in mt76x02_pre_tbtt_tasklet()
44 0xff00 | ~(0xff00 >> dev->beacon_data_count)); in mt76x02_pre_tbtt_tasklet()
51 mt76x02_enqueue_buffered_bc(dev, &data, 8); in mt76x02_pre_tbtt_tasklet()
69 mt76_tx_queue_skb(dev, q, MT_TXQ_PSD, skb, &mvif->group_wcid, in mt76x02_pre_tbtt_tasklet()
75 static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en) in mt76x02e_pre_tbtt_enable() argument
78 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); in mt76x02e_pre_tbtt_enable()
80 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); in mt76x02e_pre_tbtt_enable()
83 static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en) in mt76x02e_beacon_enable() argument
85 mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en); in mt76x02e_beacon_enable()
87 mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); in mt76x02e_beacon_enable()
89 mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); in mt76x02e_beacon_enable()
92 void mt76x02e_init_beacon_config(struct mt76x02_dev *dev) in mt76x02e_init_beacon_config() argument
101 dev->beacon_ops = &beacon_ops; in mt76x02e_init_beacon_config()
104 mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, in mt76x02e_init_beacon_config()
106 mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER, in mt76x02e_init_beacon_config()
108 mt76_wr(dev, MT_INT_TIMER_EN, 0); in mt76x02e_init_beacon_config()
110 mt76x02_init_beacon_config(dev); in mt76x02e_init_beacon_config()
115 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, in mt76x02_init_rx_queue() argument
120 err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize, in mt76x02_init_rx_queue()
125 mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx)); in mt76x02_init_rx_queue()
130 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev) in mt76x02_process_tx_status_fifo() argument
135 while (kfifo_get(&dev->txstatus_fifo, &stat)) in mt76x02_process_tx_status_fifo()
136 mt76x02_send_tx_status(dev, &stat, &update); in mt76x02_process_tx_status_fifo()
141 struct mt76x02_dev *dev; in mt76x02_tx_worker() local
143 dev = container_of(w, struct mt76x02_dev, mt76.tx_worker); in mt76x02_tx_worker()
145 mt76x02_mac_poll_tx_status(dev, false); in mt76x02_tx_worker()
146 mt76x02_process_tx_status_fifo(dev); in mt76x02_tx_worker()
148 mt76_txq_schedule_all(&dev->mphy); in mt76x02_tx_worker()
153 struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev, in mt76x02_poll_tx() local
157 mt76x02_mac_poll_tx_status(dev, false); in mt76x02_poll_tx()
159 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); in mt76x02_poll_tx()
161 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); in mt76x02_poll_tx()
164 mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL); in mt76x02_poll_tx()
166 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); in mt76x02_poll_tx()
168 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); in mt76x02_poll_tx()
170 mt76_worker_schedule(&dev->mt76.tx_worker); in mt76x02_poll_tx()
175 int mt76x02_dma_init(struct mt76x02_dev *dev) in mt76x02_dma_init() argument
185 status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); in mt76x02_dma_init()
189 dev->mt76.tx_worker.fn = mt76x02_tx_worker; in mt76x02_dma_init()
190 tasklet_setup(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet); in mt76x02_dma_init()
192 spin_lock_init(&dev->txstatus_fifo_lock); in mt76x02_dma_init()
193 kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); in mt76x02_dma_init()
195 mt76_dma_attach(&dev->mt76); in mt76x02_dma_init()
197 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); in mt76x02_dma_init()
200 ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i), in mt76x02_dma_init()
207 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT, in mt76x02_dma_init()
213 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU, in mt76x02_dma_init()
218 mt76x02_irq_enable(dev, in mt76x02_dma_init()
226 ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, in mt76x02_dma_init()
231 q = &dev->mt76.q_rx[MT_RXQ_MAIN]; in mt76x02_dma_init()
233 ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE, in mt76x02_dma_init()
238 ret = mt76_init_queues(dev, mt76_dma_rx_poll); in mt76x02_dma_init()
242 netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, in mt76x02_dma_init()
244 napi_enable(&dev->mt76.tx_napi); in mt76x02_dma_init()
252 struct mt76x02_dev *dev; in mt76x02_rx_poll_complete() local
254 dev = container_of(mdev, struct mt76x02_dev, mt76); in mt76x02_rx_poll_complete()
255 mt76x02_irq_enable(dev, MT_INT_RX_DONE(q)); in mt76x02_rx_poll_complete()
261 struct mt76x02_dev *dev = dev_instance; in mt76x02_irq_handler() local
264 intr = mt76_rr(dev, MT_INT_SOURCE_CSR); in mt76x02_irq_handler()
265 intr &= dev->mt76.mmio.irqmask; in mt76x02_irq_handler()
266 mt76_wr(dev, MT_INT_SOURCE_CSR, intr); in mt76x02_irq_handler()
268 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) in mt76x02_irq_handler()
271 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); in mt76x02_irq_handler()
277 mt76x02_irq_disable(dev, mask); in mt76x02_irq_handler()
280 napi_schedule(&dev->mt76.napi[0]); in mt76x02_irq_handler()
283 napi_schedule(&dev->mt76.napi[1]); in mt76x02_irq_handler()
286 tasklet_schedule(&dev->mt76.pre_tbtt_tasklet); in mt76x02_irq_handler()
290 if (dev->mt76.csa_complete) in mt76x02_irq_handler()
291 mt76_csa_finish(&dev->mt76); in mt76x02_irq_handler()
293 mt76_queue_kick(dev, dev->mphy.q_tx[MT_TXQ_PSD]); in mt76x02_irq_handler()
297 mt76x02_mac_poll_tx_status(dev, true); in mt76x02_irq_handler()
300 napi_schedule(&dev->mt76.tx_napi); in mt76x02_irq_handler()
303 tasklet_schedule(&dev->dfs_pd.dfs_tasklet); in mt76x02_irq_handler()
309 static void mt76x02_dma_enable(struct mt76x02_dev *dev) in mt76x02_dma_enable() argument
313 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); in mt76x02_dma_enable()
314 mt76x02_wait_for_wpdma(&dev->mt76, 1000); in mt76x02_dma_enable()
320 mt76_set(dev, MT_WPDMA_GLO_CFG, val); in mt76x02_dma_enable()
321 mt76_clear(dev, MT_WPDMA_GLO_CFG, in mt76x02_dma_enable()
325 void mt76x02_dma_disable(struct mt76x02_dev *dev) in mt76x02_dma_disable() argument
327 u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG); in mt76x02_dma_disable()
333 mt76_wr(dev, MT_WPDMA_GLO_CFG, val); in mt76x02_dma_disable()
337 void mt76x02_mac_start(struct mt76x02_dev *dev) in mt76x02_mac_start() argument
339 mt76x02_mac_reset_counters(dev); in mt76x02_mac_start()
340 mt76x02_dma_enable(dev); in mt76x02_mac_start()
341 mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); in mt76x02_mac_start()
342 mt76_wr(dev, MT_MAC_SYS_CTRL, in mt76x02_mac_start()
345 mt76x02_irq_enable(dev, in mt76x02_mac_start()
351 static bool mt76x02_tx_hang(struct mt76x02_dev *dev) in mt76x02_tx_hang() argument
358 q = dev->mphy.q_tx[i]; in mt76x02_tx_hang()
360 prev_dma_idx = dev->mt76.tx_dma_idx[i]; in mt76x02_tx_hang()
362 dev->mt76.tx_dma_idx[i] = dma_idx; in mt76x02_tx_hang()
365 dev->tx_hang_check[i] = 0; in mt76x02_tx_hang()
369 if (++dev->tx_hang_check[i] >= MT_TX_HANG_TH) in mt76x02_tx_hang()
380 struct mt76x02_dev *dev = hw->priv; in mt76x02_key_sync() local
391 mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key); in mt76x02_key_sync()
394 static void mt76x02_reset_state(struct mt76x02_dev *dev) in mt76x02_reset_state() argument
398 lockdep_assert_held(&dev->mt76.mutex); in mt76x02_reset_state()
400 clear_bit(MT76_STATE_RUNNING, &dev->mphy.state); in mt76x02_reset_state()
403 ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL); in mt76x02_reset_state()
413 wcid = rcu_dereference_protected(dev->mt76.wcid[i], in mt76x02_reset_state()
414 lockdep_is_held(&dev->mt76.mutex)); in mt76x02_reset_state()
418 rcu_assign_pointer(dev->mt76.wcid[i], NULL); in mt76x02_reset_state()
426 __mt76_sta_remove(&dev->mt76, vif, sta); in mt76x02_reset_state()
430 dev->mt76.vif_mask = 0; in mt76x02_reset_state()
431 dev->mt76.beacon_mask = 0; in mt76x02_reset_state()
434 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) in mt76x02_watchdog_reset() argument
436 u32 mask = dev->mt76.mmio.irqmask; in mt76x02_watchdog_reset()
437 bool restart = dev->mt76.mcu_ops->mcu_restart; in mt76x02_watchdog_reset()
440 ieee80211_stop_queues(dev->mt76.hw); in mt76x02_watchdog_reset()
441 set_bit(MT76_RESET, &dev->mphy.state); in mt76x02_watchdog_reset()
443 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); in mt76x02_watchdog_reset()
444 mt76_worker_disable(&dev->mt76.tx_worker); in mt76x02_watchdog_reset()
445 napi_disable(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
447 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
448 napi_disable(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
451 mutex_lock(&dev->mt76.mutex); in mt76x02_watchdog_reset()
453 dev->mcu_timeout = 0; in mt76x02_watchdog_reset()
455 mt76x02_reset_state(dev); in mt76x02_watchdog_reset()
457 if (dev->mt76.beacon_mask) in mt76x02_watchdog_reset()
458 mt76_clear(dev, MT_BEACON_TIME_CFG, in mt76x02_watchdog_reset()
462 mt76x02_irq_disable(dev, mask); in mt76x02_watchdog_reset()
465 mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); in mt76x02_watchdog_reset()
466 mt76_wr(dev, MT_MAC_SYS_CTRL, 0); in mt76x02_watchdog_reset()
467 mt76_clear(dev, MT_WPDMA_GLO_CFG, in mt76x02_watchdog_reset()
470 mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff); in mt76x02_watchdog_reset()
473 mt76_set(dev, 0x734, 0x3); in mt76x02_watchdog_reset()
476 mt76_mcu_restart(dev); in mt76x02_watchdog_reset()
478 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); in mt76x02_watchdog_reset()
480 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); in mt76x02_watchdog_reset()
482 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
483 mt76_queue_rx_reset(dev, i); in mt76x02_watchdog_reset()
486 mt76_tx_status_check(&dev->mt76, true); in mt76x02_watchdog_reset()
488 mt76x02_mac_start(dev); in mt76x02_watchdog_reset()
490 if (dev->ed_monitor) in mt76x02_watchdog_reset()
491 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); in mt76x02_watchdog_reset()
493 if (dev->mt76.beacon_mask && !restart) in mt76x02_watchdog_reset()
494 mt76_set(dev, MT_BEACON_TIME_CFG, in mt76x02_watchdog_reset()
498 mt76x02_irq_enable(dev, mask); in mt76x02_watchdog_reset()
500 mutex_unlock(&dev->mt76.mutex); in mt76x02_watchdog_reset()
502 clear_bit(MT76_RESET, &dev->mphy.state); in mt76x02_watchdog_reset()
504 mt76_worker_enable(&dev->mt76.tx_worker); in mt76x02_watchdog_reset()
505 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); in mt76x02_watchdog_reset()
508 napi_enable(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
509 napi_schedule(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
511 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
512 napi_enable(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
513 napi_schedule(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
518 set_bit(MT76_RESTART, &dev->mphy.state); in mt76x02_watchdog_reset()
519 mt76x02_mcu_function_select(dev, Q_SELECT, 1); in mt76x02_watchdog_reset()
520 ieee80211_restart_hw(dev->mt76.hw); in mt76x02_watchdog_reset()
522 ieee80211_wake_queues(dev->mt76.hw); in mt76x02_watchdog_reset()
523 mt76_txq_schedule_all(&dev->mphy); in mt76x02_watchdog_reset()
530 struct mt76x02_dev *dev = hw->priv; in mt76x02_reconfig_complete() local
535 clear_bit(MT76_RESTART, &dev->mphy.state); in mt76x02_reconfig_complete()
539 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) in mt76x02_check_tx_hang() argument
541 if (test_bit(MT76_RESTART, &dev->mphy.state)) in mt76x02_check_tx_hang()
544 if (!mt76x02_tx_hang(dev) && !dev->mcu_timeout) in mt76x02_check_tx_hang()
547 mt76x02_watchdog_reset(dev); in mt76x02_check_tx_hang()
549 dev->tx_hang_reset++; in mt76x02_check_tx_hang()
550 memset(dev->tx_hang_check, 0, sizeof(dev->tx_hang_check)); in mt76x02_check_tx_hang()
551 memset(dev->mt76.tx_dma_idx, 0xff, in mt76x02_check_tx_hang()
552 sizeof(dev->mt76.tx_dma_idx)); in mt76x02_check_tx_hang()
557 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, in mt76x02_wdt_work() local
560 mt76x02_check_tx_hang(dev); in mt76x02_wdt_work()
562 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work, in mt76x02_wdt_work()