Lines Matching refs:bnad

73 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)  in bnad_cq_cleanup()  argument
89 bnad_tx_buff_unmap(struct bnad *bnad, in bnad_tx_buff_unmap() argument
103 dma_unmap_single(&bnad->pcidev->dev, in bnad_tx_buff_unmap()
118 dma_unmap_page(&bnad->pcidev->dev, in bnad_tx_buff_unmap()
137 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) in bnad_txq_cleanup() argument
147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup()
159 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) in bnad_txcmpl_process() argument
190 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons); in bnad_txcmpl_process()
204 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) in bnad_tx_complete() argument
206 struct net_device *netdev = bnad->netdev; in bnad_tx_complete()
212 sent = bnad_txcmpl_process(bnad, tcb); in bnad_tx_complete()
220 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); in bnad_tx_complete()
239 struct bnad *bnad = tcb->bnad; in bnad_msix_tx() local
241 bnad_tx_complete(bnad, tcb); in bnad_msix_tx()
247 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_alloc_uninit() argument
259 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_alloc_init() argument
264 bnad_rxq_alloc_uninit(bnad, rcb); in bnad_rxq_alloc_init()
292 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap) in bnad_rxq_cleanup_page() argument
297 dma_unmap_page(&bnad->pcidev->dev, in bnad_rxq_cleanup_page()
307 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap) in bnad_rxq_cleanup_skb() argument
312 dma_unmap_single(&bnad->pcidev->dev, in bnad_rxq_cleanup_skb()
322 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_cleanup() argument
331 bnad_rxq_cleanup_skb(bnad, unmap); in bnad_rxq_cleanup()
333 bnad_rxq_cleanup_page(bnad, unmap); in bnad_rxq_cleanup()
335 bnad_rxq_alloc_uninit(bnad, rcb); in bnad_rxq_cleanup()
339 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) in bnad_rxq_refill_page() argument
370 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); in bnad_rxq_refill_page()
375 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, in bnad_rxq_refill_page()
377 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { in bnad_rxq_refill_page()
379 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); in bnad_rxq_refill_page()
413 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) in bnad_rxq_refill_skb() argument
430 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz); in bnad_rxq_refill_skb()
433 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); in bnad_rxq_refill_skb()
438 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, in bnad_rxq_refill_skb()
440 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { in bnad_rxq_refill_skb()
442 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); in bnad_rxq_refill_skb()
469 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_post() argument
479 bnad_rxq_refill_skb(bnad, rcb, to_alloc); in bnad_rxq_post()
481 bnad_rxq_refill_page(bnad, rcb, to_alloc); in bnad_rxq_post()
499 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, in bnad_cq_drop_packet() argument
512 bnad_rxq_cleanup_skb(bnad, unmap); in bnad_cq_drop_packet()
514 bnad_rxq_cleanup_page(bnad, unmap); in bnad_cq_drop_packet()
522 struct bnad *bnad; in bnad_cq_setup_skb_frags() local
533 bnad = rcb->bnad; in bnad_cq_setup_skb_frags()
547 dma_unmap_page(&bnad->pcidev->dev, in bnad_cq_setup_skb_frags()
570 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb, in bnad_cq_setup_skb() argument
575 dma_unmap_single(&bnad->pcidev->dev, in bnad_cq_setup_skb()
580 skb->protocol = eth_type_trans(skb, bnad->netdev); in bnad_cq_setup_skb()
587 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) in bnad_cq_process() argument
600 prefetch(bnad->netdev); in bnad_cq_process()
683 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs); in bnad_cq_process()
690 bnad_cq_setup_skb(bnad, skb, unmap, len); in bnad_cq_process()
701 ((bnad->netdev->features & NETIF_F_RXCSUM) && in bnad_cq_process()
711 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) in bnad_cq_process()
732 bnad_rxq_post(bnad, ccb->rcb[0]); in bnad_cq_process()
734 bnad_rxq_post(bnad, ccb->rcb[1]); in bnad_cq_process()
740 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) in bnad_netif_rx_schedule_poll() argument
759 bnad_netif_rx_schedule_poll(ccb->bnad, ccb); in bnad_msix_rx()
773 struct bnad *bnad = (struct bnad *)data; in bnad_msix_mbox_handler() local
775 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_msix_mbox_handler()
776 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { in bnad_msix_mbox_handler()
777 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_msix_mbox_handler()
781 bna_intr_status_get(&bnad->bna, intr_status); in bnad_msix_mbox_handler()
783 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) in bnad_msix_mbox_handler()
784 bna_mbox_handler(&bnad->bna, intr_status); in bnad_msix_mbox_handler()
786 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_msix_mbox_handler()
797 struct bnad *bnad = (struct bnad *)data; in bnad_isr() local
802 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_isr()
803 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { in bnad_isr()
804 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_isr()
808 bna_intr_status_get(&bnad->bna, intr_status); in bnad_isr()
811 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_isr()
815 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) in bnad_isr()
816 bna_mbox_handler(&bnad->bna, intr_status); in bnad_isr()
818 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_isr()
825 for (i = 0; i < bnad->num_tx; i++) { in bnad_isr()
826 for (j = 0; j < bnad->num_txq_per_tx; j++) { in bnad_isr()
827 tcb = bnad->tx_info[i].tcb[j]; in bnad_isr()
829 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]); in bnad_isr()
833 for (i = 0; i < bnad->num_rx; i++) { in bnad_isr()
834 rx_info = &bnad->rx_info[i]; in bnad_isr()
837 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_isr()
840 bnad_netif_rx_schedule_poll(bnad, in bnad_isr()
852 bnad_enable_mbox_irq(struct bnad *bnad) in bnad_enable_mbox_irq() argument
854 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); in bnad_enable_mbox_irq()
856 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); in bnad_enable_mbox_irq()
864 bnad_disable_mbox_irq(struct bnad *bnad) in bnad_disable_mbox_irq() argument
866 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); in bnad_disable_mbox_irq()
868 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); in bnad_disable_mbox_irq()
872 bnad_set_netdev_perm_addr(struct bnad *bnad) in bnad_set_netdev_perm_addr() argument
874 struct net_device *netdev = bnad->netdev; in bnad_set_netdev_perm_addr()
876 ether_addr_copy(netdev->perm_addr, bnad->perm_addr); in bnad_set_netdev_perm_addr()
878 eth_hw_addr_set(netdev, bnad->perm_addr); in bnad_set_netdev_perm_addr()
885 bnad_cb_mbox_intr_enable(struct bnad *bnad) in bnad_cb_mbox_intr_enable() argument
887 bnad_enable_mbox_irq(bnad); in bnad_cb_mbox_intr_enable()
891 bnad_cb_mbox_intr_disable(struct bnad *bnad) in bnad_cb_mbox_intr_disable() argument
893 bnad_disable_mbox_irq(bnad); in bnad_cb_mbox_intr_disable()
897 bnad_cb_ioceth_ready(struct bnad *bnad) in bnad_cb_ioceth_ready() argument
899 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; in bnad_cb_ioceth_ready()
900 complete(&bnad->bnad_completions.ioc_comp); in bnad_cb_ioceth_ready()
904 bnad_cb_ioceth_failed(struct bnad *bnad) in bnad_cb_ioceth_failed() argument
906 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL; in bnad_cb_ioceth_failed()
907 complete(&bnad->bnad_completions.ioc_comp); in bnad_cb_ioceth_failed()
911 bnad_cb_ioceth_disabled(struct bnad *bnad) in bnad_cb_ioceth_disabled() argument
913 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; in bnad_cb_ioceth_disabled()
914 complete(&bnad->bnad_completions.ioc_comp); in bnad_cb_ioceth_disabled()
920 struct bnad *bnad = (struct bnad *)arg; in bnad_cb_enet_disabled() local
922 netif_carrier_off(bnad->netdev); in bnad_cb_enet_disabled()
923 complete(&bnad->bnad_completions.enet_comp); in bnad_cb_enet_disabled()
927 bnad_cb_ethport_link_status(struct bnad *bnad, in bnad_cb_ethport_link_status() argument
935 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) in bnad_cb_ethport_link_status()
936 BNAD_UPDATE_CTR(bnad, cee_toggle); in bnad_cb_ethport_link_status()
937 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); in bnad_cb_ethport_link_status()
939 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) in bnad_cb_ethport_link_status()
940 BNAD_UPDATE_CTR(bnad, cee_toggle); in bnad_cb_ethport_link_status()
941 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); in bnad_cb_ethport_link_status()
945 if (!netif_carrier_ok(bnad->netdev)) { in bnad_cb_ethport_link_status()
947 netdev_info(bnad->netdev, "link up\n"); in bnad_cb_ethport_link_status()
948 netif_carrier_on(bnad->netdev); in bnad_cb_ethport_link_status()
949 BNAD_UPDATE_CTR(bnad, link_toggle); in bnad_cb_ethport_link_status()
950 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) { in bnad_cb_ethport_link_status()
951 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx; in bnad_cb_ethport_link_status()
954 bnad->tx_info[tx_id].tcb[tcb_id]; in bnad_cb_ethport_link_status()
967 bnad->netdev, in bnad_cb_ethport_link_status()
969 BNAD_UPDATE_CTR(bnad, in bnad_cb_ethport_link_status()
973 bnad->netdev, in bnad_cb_ethport_link_status()
975 BNAD_UPDATE_CTR(bnad, in bnad_cb_ethport_link_status()
982 if (netif_carrier_ok(bnad->netdev)) { in bnad_cb_ethport_link_status()
983 netdev_info(bnad->netdev, "link down\n"); in bnad_cb_ethport_link_status()
984 netif_carrier_off(bnad->netdev); in bnad_cb_ethport_link_status()
985 BNAD_UPDATE_CTR(bnad, link_toggle); in bnad_cb_ethport_link_status()
993 struct bnad *bnad = (struct bnad *)arg; in bnad_cb_tx_disabled() local
995 complete(&bnad->bnad_completions.tx_comp); in bnad_cb_tx_disabled()
999 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) in bnad_cb_tcb_setup() argument
1009 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) in bnad_cb_tcb_destroy() argument
1019 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) in bnad_cb_ccb_setup() argument
1029 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) in bnad_cb_ccb_destroy() argument
1038 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) in bnad_cb_tx_stall() argument
1051 netif_stop_subqueue(bnad->netdev, txq_id); in bnad_cb_tx_stall()
1056 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) in bnad_cb_tx_resume() argument
1073 if (netif_carrier_ok(bnad->netdev)) { in bnad_cb_tx_resume()
1074 netif_wake_subqueue(bnad->netdev, txq_id); in bnad_cb_tx_resume()
1075 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); in bnad_cb_tx_resume()
1084 if (is_zero_ether_addr(bnad->perm_addr)) { in bnad_cb_tx_resume()
1085 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr); in bnad_cb_tx_resume()
1086 bnad_set_netdev_perm_addr(bnad); in bnad_cb_tx_resume()
1098 struct bnad *bnad = NULL; in bnad_tx_cleanup() local
1108 bnad = tcb->bnad; in bnad_tx_cleanup()
1115 bnad_txq_cleanup(bnad, tcb); in bnad_tx_cleanup()
1122 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, in bnad_tx_cleanup()
1127 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_tx_cleanup()
1129 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_tx_cleanup()
1133 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) in bnad_cb_tx_cleanup() argument
1145 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0); in bnad_cb_tx_cleanup()
1149 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) in bnad_cb_rx_stall() argument
1178 struct bnad *bnad = NULL; in bnad_rx_cleanup() local
1188 bnad = rx_ctrl->ccb->bnad; in bnad_rx_cleanup()
1196 bnad_cq_cleanup(bnad, rx_ctrl->ccb); in bnad_rx_cleanup()
1197 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]); in bnad_rx_cleanup()
1199 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]); in bnad_rx_cleanup()
1202 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_rx_cleanup()
1204 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_rx_cleanup()
1208 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) in bnad_cb_rx_cleanup() argument
1227 queue_work(bnad->work_q, &rx_info->rx_cleanup_work); in bnad_cb_rx_cleanup()
1231 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) in bnad_cb_rx_post() argument
1252 bnad_rxq_alloc_init(bnad, rcb); in bnad_cb_rx_post()
1255 bnad_rxq_post(bnad, rcb); in bnad_cb_rx_post()
1263 struct bnad *bnad = (struct bnad *)arg; in bnad_cb_rx_disabled() local
1265 complete(&bnad->bnad_completions.rx_comp); in bnad_cb_rx_disabled()
1269 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx) in bnad_cb_rx_mcast_add() argument
1271 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS; in bnad_cb_rx_mcast_add()
1272 complete(&bnad->bnad_completions.mcast_comp); in bnad_cb_rx_mcast_add()
1276 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, in bnad_cb_stats_get() argument
1280 BNAD_UPDATE_CTR(bnad, hw_stats_updates); in bnad_cb_stats_get()
1282 if (!netif_running(bnad->netdev) || in bnad_cb_stats_get()
1283 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) in bnad_cb_stats_get()
1286 mod_timer(&bnad->stats_timer, in bnad_cb_stats_get()
1291 bnad_cb_enet_mtu_set(struct bnad *bnad) in bnad_cb_enet_mtu_set() argument
1293 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS; in bnad_cb_enet_mtu_set()
1294 complete(&bnad->bnad_completions.mtu_comp); in bnad_cb_enet_mtu_set()
1310 bnad_mem_free(struct bnad *bnad, in bnad_mem_free() argument
1324 dma_free_coherent(&bnad->pcidev->dev, in bnad_mem_free()
1336 bnad_mem_alloc(struct bnad *bnad, in bnad_mem_alloc() argument
1356 dma_alloc_coherent(&bnad->pcidev->dev, in bnad_mem_alloc()
1378 bnad_mem_free(bnad, mem_info); in bnad_mem_alloc()
1384 bnad_mbox_irq_free(struct bnad *bnad) in bnad_mbox_irq_free() argument
1389 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mbox_irq_free()
1390 bnad_disable_mbox_irq(bnad); in bnad_mbox_irq_free()
1391 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mbox_irq_free()
1393 irq = BNAD_GET_MBOX_IRQ(bnad); in bnad_mbox_irq_free()
1394 free_irq(irq, bnad); in bnad_mbox_irq_free()
1403 bnad_mbox_irq_alloc(struct bnad *bnad) in bnad_mbox_irq_alloc() argument
1410 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mbox_irq_alloc()
1411 if (bnad->cfg_flags & BNAD_CF_MSIX) { in bnad_mbox_irq_alloc()
1413 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; in bnad_mbox_irq_alloc()
1417 irq = bnad->pcidev->irq; in bnad_mbox_irq_alloc()
1421 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mbox_irq_alloc()
1422 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); in bnad_mbox_irq_alloc()
1428 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); in bnad_mbox_irq_alloc()
1430 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); in bnad_mbox_irq_alloc()
1433 bnad->mbox_irq_name, bnad); in bnad_mbox_irq_alloc()
1439 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) in bnad_txrx_irq_free() argument
1447 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, in bnad_txrx_irq_alloc() argument
1454 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_txrx_irq_alloc()
1455 cfg_flags = bnad->cfg_flags; in bnad_txrx_irq_alloc()
1456 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_txrx_irq_alloc()
1473 (bnad->num_tx * bnad->num_txq_per_tx) + in bnad_txrx_irq_alloc()
1509 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, in bnad_tx_msix_unregister() argument
1520 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); in bnad_tx_msix_unregister()
1528 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, in bnad_tx_msix_register() argument
1538 bnad->netdev->name, in bnad_tx_msix_register()
1540 err = request_irq(bnad->msix_table[vector_num].vector, in bnad_tx_msix_register()
1552 bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); in bnad_tx_msix_register()
1560 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, in bnad_rx_msix_unregister() argument
1571 free_irq(bnad->msix_table[vector_num].vector, in bnad_rx_msix_unregister()
1580 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, in bnad_rx_msix_register() argument
1590 "%s CQ %d", bnad->netdev->name, in bnad_rx_msix_register()
1592 err = request_irq(bnad->msix_table[vector_num].vector, in bnad_rx_msix_register()
1604 bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); in bnad_rx_msix_register()
1610 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info) in bnad_tx_res_free() argument
1616 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); in bnad_tx_res_free()
1618 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); in bnad_tx_res_free()
1624 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, in bnad_tx_res_alloc() argument
1631 err = bnad_mem_alloc(bnad, in bnad_tx_res_alloc()
1634 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id, in bnad_tx_res_alloc()
1642 bnad_tx_res_free(bnad, res_info); in bnad_tx_res_alloc()
1648 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info) in bnad_rx_res_free() argument
1654 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); in bnad_rx_res_free()
1656 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); in bnad_rx_res_free()
1662 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, in bnad_rx_res_alloc() argument
1670 err = bnad_mem_alloc(bnad, in bnad_rx_res_alloc()
1673 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id, in bnad_rx_res_alloc()
1681 bnad_rx_res_free(bnad, res_info); in bnad_rx_res_alloc()
1690 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer); in bnad_ioc_timeout() local
1693 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioc_timeout()
1694 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc); in bnad_ioc_timeout()
1695 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioc_timeout()
1701 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer); in bnad_ioc_hb_check() local
1704 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioc_hb_check()
1705 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc); in bnad_ioc_hb_check()
1706 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioc_hb_check()
1712 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer); in bnad_iocpf_timeout() local
1715 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_iocpf_timeout()
1716 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc); in bnad_iocpf_timeout()
1717 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_iocpf_timeout()
1723 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer); in bnad_iocpf_sem_timeout() local
1726 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_iocpf_sem_timeout()
1727 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc); in bnad_iocpf_sem_timeout()
1728 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_iocpf_sem_timeout()
1745 struct bnad *bnad = from_timer(bnad, t, dim_timer); in bnad_dim_timeout() local
1751 if (!netif_carrier_ok(bnad->netdev)) in bnad_dim_timeout()
1754 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_dim_timeout()
1755 for (i = 0; i < bnad->num_rx; i++) { in bnad_dim_timeout()
1756 rx_info = &bnad->rx_info[i]; in bnad_dim_timeout()
1759 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_dim_timeout()
1768 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) in bnad_dim_timeout()
1769 mod_timer(&bnad->dim_timer, in bnad_dim_timeout()
1771 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_dim_timeout()
1778 struct bnad *bnad = from_timer(bnad, t, stats_timer); in bnad_stats_timeout() local
1781 if (!netif_running(bnad->netdev) || in bnad_stats_timeout()
1782 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) in bnad_stats_timeout()
1785 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stats_timeout()
1786 bna_hw_stats_get(&bnad->bna); in bnad_stats_timeout()
1787 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stats_timeout()
1795 bnad_dim_timer_start(struct bnad *bnad) in bnad_dim_timer_start() argument
1797 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && in bnad_dim_timer_start()
1798 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { in bnad_dim_timer_start()
1799 timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0); in bnad_dim_timer_start()
1800 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); in bnad_dim_timer_start()
1801 mod_timer(&bnad->dim_timer, in bnad_dim_timer_start()
1811 bnad_stats_timer_start(struct bnad *bnad) in bnad_stats_timer_start() argument
1815 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stats_timer_start()
1816 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { in bnad_stats_timer_start()
1817 timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0); in bnad_stats_timer_start()
1818 mod_timer(&bnad->stats_timer, in bnad_stats_timer_start()
1821 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stats_timer_start()
1829 bnad_stats_timer_stop(struct bnad *bnad) in bnad_stats_timer_stop() argument
1834 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stats_timer_stop()
1835 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) in bnad_stats_timer_stop()
1837 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stats_timer_stop()
1839 del_timer_sync(&bnad->stats_timer); in bnad_stats_timer_stop()
1861 struct bnad *bnad = rx_ctrl->bnad; in bnad_napi_poll_rx() local
1866 if (!netif_carrier_ok(bnad->netdev)) in bnad_napi_poll_rx()
1869 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget); in bnad_napi_poll_rx()
1885 bnad_napi_add(struct bnad *bnad, u32 rx_id) in bnad_napi_add() argument
1891 for (i = 0; i < bnad->num_rxp_per_rx; i++) { in bnad_napi_add()
1892 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; in bnad_napi_add()
1893 netif_napi_add(bnad->netdev, &rx_ctrl->napi, in bnad_napi_add()
1899 bnad_napi_delete(struct bnad *bnad, u32 rx_id) in bnad_napi_delete() argument
1904 for (i = 0; i < bnad->num_rxp_per_rx; i++) in bnad_napi_delete()
1905 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); in bnad_napi_delete()
1910 bnad_destroy_tx(struct bnad *bnad, u32 tx_id) in bnad_destroy_tx() argument
1912 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; in bnad_destroy_tx()
1913 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; in bnad_destroy_tx()
1919 init_completion(&bnad->bnad_completions.tx_comp); in bnad_destroy_tx()
1920 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_tx()
1922 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_tx()
1923 wait_for_completion(&bnad->bnad_completions.tx_comp); in bnad_destroy_tx()
1926 bnad_tx_msix_unregister(bnad, tx_info, in bnad_destroy_tx()
1927 bnad->num_txq_per_tx); in bnad_destroy_tx()
1929 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_tx()
1931 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_tx()
1936 bnad_tx_res_free(bnad, res_info); in bnad_destroy_tx()
1941 bnad_setup_tx(struct bnad *bnad, u32 tx_id) in bnad_setup_tx() argument
1944 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; in bnad_setup_tx()
1945 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; in bnad_setup_tx()
1948 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; in bnad_setup_tx()
1963 tx_config->num_txq = bnad->num_txq_per_tx; in bnad_setup_tx()
1964 tx_config->txq_depth = bnad->txq_depth; in bnad_setup_tx()
1966 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo; in bnad_setup_tx()
1969 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
1970 bna_tx_res_req(bnad->num_txq_per_tx, in bnad_setup_tx()
1971 bnad->txq_depth, res_info); in bnad_setup_tx()
1972 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
1976 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) * in bnad_setup_tx()
1977 bnad->txq_depth)); in bnad_setup_tx()
1980 err = bnad_tx_res_alloc(bnad, res_info, tx_id); in bnad_setup_tx()
1985 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
1986 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, in bnad_setup_tx()
1988 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
1999 err = bnad_tx_msix_register(bnad, tx_info, in bnad_setup_tx()
2000 tx_id, bnad->num_txq_per_tx); in bnad_setup_tx()
2005 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
2007 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
2012 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
2014 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
2018 bnad_tx_res_free(bnad, res_info); in bnad_setup_tx()
2025 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) in bnad_init_rx_config() argument
2029 rx_config->num_paths = bnad->num_rxp_per_rx; in bnad_init_rx_config()
2030 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; in bnad_init_rx_config()
2032 if (bnad->num_rxp_per_rx > 1) { in bnad_init_rx_config()
2040 bnad->num_rxp_per_rx - 1; in bnad_init_rx_config()
2049 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu); in bnad_init_rx_config()
2059 if (BNAD_PCI_DEV_IS_CAT2(bnad) && in bnad_init_rx_config()
2068 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs; in bnad_init_rx_config()
2073 rx_config->q0_depth = bnad->rxq_depth; in bnad_init_rx_config()
2078 rx_config->q1_depth = bnad->rxq_depth; in bnad_init_rx_config()
2083 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? in bnad_init_rx_config()
2088 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) in bnad_rx_ctrl_init() argument
2090 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; in bnad_rx_ctrl_init()
2093 for (i = 0; i < bnad->num_rxp_per_rx; i++) in bnad_rx_ctrl_init()
2094 rx_info->rx_ctrl[i].bnad = bnad; in bnad_rx_ctrl_init()
2099 bnad_reinit_rx(struct bnad *bnad) in bnad_reinit_rx() argument
2101 struct net_device *netdev = bnad->netdev; in bnad_reinit_rx()
2107 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { in bnad_reinit_rx()
2108 if (!bnad->rx_info[rx_id].rx) in bnad_reinit_rx()
2110 bnad_destroy_rx(bnad, rx_id); in bnad_reinit_rx()
2113 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_reinit_rx()
2114 bna_enet_mtu_set(&bnad->bna.enet, in bnad_reinit_rx()
2115 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); in bnad_reinit_rx()
2116 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_reinit_rx()
2118 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { in bnad_reinit_rx()
2120 current_err = bnad_setup_rx(bnad, rx_id); in bnad_reinit_rx()
2128 if (bnad->rx_info[0].rx && !err) { in bnad_reinit_rx()
2129 bnad_restore_vlans(bnad, 0); in bnad_reinit_rx()
2130 bnad_enable_default_bcast(bnad); in bnad_reinit_rx()
2131 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_reinit_rx()
2132 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); in bnad_reinit_rx()
2133 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_reinit_rx()
2142 bnad_destroy_rx(struct bnad *bnad, u32 rx_id) in bnad_destroy_rx() argument
2144 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; in bnad_destroy_rx()
2145 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; in bnad_destroy_rx()
2146 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; in bnad_destroy_rx()
2154 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_rx()
2155 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && in bnad_destroy_rx()
2156 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { in bnad_destroy_rx()
2157 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); in bnad_destroy_rx()
2160 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_rx()
2162 del_timer_sync(&bnad->dim_timer); in bnad_destroy_rx()
2165 init_completion(&bnad->bnad_completions.rx_comp); in bnad_destroy_rx()
2166 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_rx()
2168 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_rx()
2169 wait_for_completion(&bnad->bnad_completions.rx_comp); in bnad_destroy_rx()
2172 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); in bnad_destroy_rx()
2174 bnad_napi_delete(bnad, rx_id); in bnad_destroy_rx()
2176 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_rx()
2181 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_rx()
2183 bnad_rx_res_free(bnad, res_info); in bnad_destroy_rx()
2188 bnad_setup_rx(struct bnad *bnad, u32 rx_id) in bnad_setup_rx() argument
2191 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; in bnad_setup_rx()
2192 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; in bnad_setup_rx()
2195 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; in bnad_setup_rx()
2211 bnad_init_rx_config(bnad, rx_config); in bnad_setup_rx()
2214 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_rx()
2216 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2233 err = bnad_rx_res_alloc(bnad, res_info, rx_id); in bnad_setup_rx()
2237 bnad_rx_ctrl_init(bnad, rx_id); in bnad_setup_rx()
2240 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_rx()
2241 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, in bnad_setup_rx()
2245 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2249 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2257 bnad_napi_add(bnad, rx_id); in bnad_setup_rx()
2261 err = bnad_rx_msix_register(bnad, rx_info, rx_id, in bnad_setup_rx()
2267 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_rx()
2270 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) in bnad_setup_rx()
2271 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); in bnad_setup_rx()
2277 bnad_dim_timer_start(bnad); in bnad_setup_rx()
2281 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2286 bnad_destroy_rx(bnad, rx_id); in bnad_setup_rx()
2292 bnad_tx_coalescing_timeo_set(struct bnad *bnad) in bnad_tx_coalescing_timeo_set() argument
2296 tx_info = &bnad->tx_info[0]; in bnad_tx_coalescing_timeo_set()
2300 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); in bnad_tx_coalescing_timeo_set()
2305 bnad_rx_coalescing_timeo_set(struct bnad *bnad) in bnad_rx_coalescing_timeo_set() argument
2310 for (i = 0; i < bnad->num_rx; i++) { in bnad_rx_coalescing_timeo_set()
2311 rx_info = &bnad->rx_info[i]; in bnad_rx_coalescing_timeo_set()
2315 bnad->rx_coalescing_timeo); in bnad_rx_coalescing_timeo_set()
2323 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr) in bnad_mac_addr_set_locked() argument
2331 if (!bnad->rx_info[0].rx) in bnad_mac_addr_set_locked()
2334 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr); in bnad_mac_addr_set_locked()
2343 bnad_enable_default_bcast(struct bnad *bnad) in bnad_enable_default_bcast() argument
2345 struct bnad_rx_info *rx_info = &bnad->rx_info[0]; in bnad_enable_default_bcast()
2349 init_completion(&bnad->bnad_completions.mcast_comp); in bnad_enable_default_bcast()
2351 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_default_bcast()
2354 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_default_bcast()
2357 wait_for_completion(&bnad->bnad_completions.mcast_comp); in bnad_enable_default_bcast()
2361 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) in bnad_enable_default_bcast()
2369 bnad_restore_vlans(struct bnad *bnad, u32 rx_id) in bnad_restore_vlans() argument
2374 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { in bnad_restore_vlans()
2375 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_restore_vlans()
2376 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid); in bnad_restore_vlans()
2377 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_restore_vlans()
2383 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) in bnad_netdev_qstats_fill() argument
2387 for (i = 0; i < bnad->num_rx; i++) { in bnad_netdev_qstats_fill()
2388 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_netdev_qstats_fill()
2389 if (bnad->rx_info[i].rx_ctrl[j].ccb) { in bnad_netdev_qstats_fill()
2390 stats->rx_packets += bnad->rx_info[i]. in bnad_netdev_qstats_fill()
2392 stats->rx_bytes += bnad->rx_info[i]. in bnad_netdev_qstats_fill()
2394 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && in bnad_netdev_qstats_fill()
2395 bnad->rx_info[i].rx_ctrl[j].ccb-> in bnad_netdev_qstats_fill()
2398 bnad->rx_info[i].rx_ctrl[j]. in bnad_netdev_qstats_fill()
2401 bnad->rx_info[i].rx_ctrl[j]. in bnad_netdev_qstats_fill()
2407 for (i = 0; i < bnad->num_tx; i++) { in bnad_netdev_qstats_fill()
2408 for (j = 0; j < bnad->num_txq_per_tx; j++) { in bnad_netdev_qstats_fill()
2409 if (bnad->tx_info[i].tcb[j]) { in bnad_netdev_qstats_fill()
2411 bnad->tx_info[i].tcb[j]->txq->tx_packets; in bnad_netdev_qstats_fill()
2413 bnad->tx_info[i].tcb[j]->txq->tx_bytes; in bnad_netdev_qstats_fill()
2423 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) in bnad_netdev_hwstats_fill() argument
2429 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats; in bnad_netdev_hwstats_fill()
2448 bmap = bna_rx_rid_mask(&bnad->bna); in bnad_netdev_hwstats_fill()
2452 bnad->stats.bna_stats-> in bnad_netdev_hwstats_fill()
2461 bnad_mbox_irq_sync(struct bnad *bnad) in bnad_mbox_irq_sync() argument
2466 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mbox_irq_sync()
2467 if (bnad->cfg_flags & BNAD_CF_MSIX) in bnad_mbox_irq_sync()
2468 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; in bnad_mbox_irq_sync()
2470 irq = bnad->pcidev->irq; in bnad_mbox_irq_sync()
2471 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mbox_irq_sync()
2478 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) in bnad_tso_prepare() argument
2484 BNAD_UPDATE_CTR(bnad, tso_err); in bnad_tso_prepare()
2502 BNAD_UPDATE_CTR(bnad, tso4); in bnad_tso_prepare()
2505 BNAD_UPDATE_CTR(bnad, tso6); in bnad_tso_prepare()
2517 bnad_q_num_init(struct bnad *bnad) in bnad_q_num_init() argument
2524 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) in bnad_q_num_init()
2527 bnad->num_rx = 1; in bnad_q_num_init()
2528 bnad->num_tx = 1; in bnad_q_num_init()
2529 bnad->num_rxp_per_rx = rxps; in bnad_q_num_init()
2530 bnad->num_txq_per_tx = BNAD_TXQ_NUM; in bnad_q_num_init()
2540 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp) in bnad_q_num_adjust() argument
2542 bnad->num_txq_per_tx = 1; in bnad_q_num_adjust()
2543 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + in bnad_q_num_adjust()
2545 (bnad->cfg_flags & BNAD_CF_MSIX)) { in bnad_q_num_adjust()
2546 bnad->num_rxp_per_rx = msix_vectors - in bnad_q_num_adjust()
2547 (bnad->num_tx * bnad->num_txq_per_tx) - in bnad_q_num_adjust()
2550 bnad->num_rxp_per_rx = 1; in bnad_q_num_adjust()
2555 bnad_ioceth_disable(struct bnad *bnad) in bnad_ioceth_disable() argument
2560 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioceth_disable()
2561 init_completion(&bnad->bnad_completions.ioc_comp); in bnad_ioceth_disable()
2562 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP); in bnad_ioceth_disable()
2563 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioceth_disable()
2565 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, in bnad_ioceth_disable()
2568 err = bnad->bnad_completions.ioc_comp_status; in bnad_ioceth_disable()
2573 bnad_ioceth_enable(struct bnad *bnad) in bnad_ioceth_enable() argument
2578 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioceth_enable()
2579 init_completion(&bnad->bnad_completions.ioc_comp); in bnad_ioceth_enable()
2580 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING; in bnad_ioceth_enable()
2581 bna_ioceth_enable(&bnad->bna.ioceth); in bnad_ioceth_enable()
2582 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioceth_enable()
2584 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, in bnad_ioceth_enable()
2587 err = bnad->bnad_completions.ioc_comp_status; in bnad_ioceth_enable()
2594 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info, in bnad_res_free() argument
2600 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); in bnad_res_free()
2605 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, in bnad_res_alloc() argument
2611 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info); in bnad_res_alloc()
2618 bnad_res_free(bnad, res_info, res_val_max); in bnad_res_alloc()
2624 bnad_enable_msix(struct bnad *bnad) in bnad_enable_msix() argument
2629 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_msix()
2630 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { in bnad_enable_msix()
2631 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2634 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2636 if (bnad->msix_table) in bnad_enable_msix()
2639 bnad->msix_table = in bnad_enable_msix()
2640 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); in bnad_enable_msix()
2642 if (!bnad->msix_table) in bnad_enable_msix()
2645 for (i = 0; i < bnad->msix_num; i++) in bnad_enable_msix()
2646 bnad->msix_table[i].entry = i; in bnad_enable_msix()
2648 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table, in bnad_enable_msix()
2649 1, bnad->msix_num); in bnad_enable_msix()
2652 } else if (ret < bnad->msix_num) { in bnad_enable_msix()
2653 dev_warn(&bnad->pcidev->dev, in bnad_enable_msix()
2655 ret, bnad->msix_num); in bnad_enable_msix()
2657 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_msix()
2659 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2, in bnad_enable_msix()
2661 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2663 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + in bnad_enable_msix()
2666 if (bnad->msix_num > ret) { in bnad_enable_msix()
2667 pci_disable_msix(bnad->pcidev); in bnad_enable_msix()
2672 pci_intx(bnad->pcidev, 0); in bnad_enable_msix()
2677 dev_warn(&bnad->pcidev->dev, in bnad_enable_msix()
2680 kfree(bnad->msix_table); in bnad_enable_msix()
2681 bnad->msix_table = NULL; in bnad_enable_msix()
2682 bnad->msix_num = 0; in bnad_enable_msix()
2683 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_msix()
2684 bnad->cfg_flags &= ~BNAD_CF_MSIX; in bnad_enable_msix()
2685 bnad_q_num_init(bnad); in bnad_enable_msix()
2686 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2690 bnad_disable_msix(struct bnad *bnad) in bnad_disable_msix() argument
2695 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_disable_msix()
2696 cfg_flags = bnad->cfg_flags; in bnad_disable_msix()
2697 if (bnad->cfg_flags & BNAD_CF_MSIX) in bnad_disable_msix()
2698 bnad->cfg_flags &= ~BNAD_CF_MSIX; in bnad_disable_msix()
2699 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_disable_msix()
2702 pci_disable_msix(bnad->pcidev); in bnad_disable_msix()
2703 kfree(bnad->msix_table); in bnad_disable_msix()
2704 bnad->msix_table = NULL; in bnad_disable_msix()
2713 struct bnad *bnad = netdev_priv(netdev); in bnad_open() local
2717 mutex_lock(&bnad->conf_mutex); in bnad_open()
2720 err = bnad_setup_tx(bnad, 0); in bnad_open()
2725 err = bnad_setup_rx(bnad, 0); in bnad_open()
2733 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_open()
2734 bna_enet_mtu_set(&bnad->bna.enet, in bnad_open()
2735 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); in bnad_open()
2736 bna_enet_pause_config(&bnad->bna.enet, &pause_config); in bnad_open()
2737 bna_enet_enable(&bnad->bna.enet); in bnad_open()
2738 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_open()
2741 bnad_enable_default_bcast(bnad); in bnad_open()
2744 bnad_restore_vlans(bnad, 0); in bnad_open()
2747 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_open()
2748 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); in bnad_open()
2749 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_open()
2752 bnad_stats_timer_start(bnad); in bnad_open()
2754 mutex_unlock(&bnad->conf_mutex); in bnad_open()
2759 bnad_destroy_tx(bnad, 0); in bnad_open()
2762 mutex_unlock(&bnad->conf_mutex); in bnad_open()
2769 struct bnad *bnad = netdev_priv(netdev); in bnad_stop() local
2772 mutex_lock(&bnad->conf_mutex); in bnad_stop()
2775 bnad_stats_timer_stop(bnad); in bnad_stop()
2777 init_completion(&bnad->bnad_completions.enet_comp); in bnad_stop()
2779 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stop()
2780 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP, in bnad_stop()
2782 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stop()
2784 wait_for_completion(&bnad->bnad_completions.enet_comp); in bnad_stop()
2786 bnad_destroy_tx(bnad, 0); in bnad_stop()
2787 bnad_destroy_rx(bnad, 0); in bnad_stop()
2790 bnad_mbox_irq_sync(bnad); in bnad_stop()
2792 mutex_unlock(&bnad->conf_mutex); in bnad_stop()
2800 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, in bnad_txq_wi_prepare() argument
2811 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { in bnad_txq_wi_prepare()
2820 if (unlikely(gso_size > bnad->netdev->mtu)) { in bnad_txq_wi_prepare()
2821 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); in bnad_txq_wi_prepare()
2827 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); in bnad_txq_wi_prepare()
2833 if (bnad_tso_prepare(bnad, skb)) { in bnad_txq_wi_prepare()
2834 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); in bnad_txq_wi_prepare()
2846 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) { in bnad_txq_wi_prepare()
2847 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); in bnad_txq_wi_prepare()
2869 BNAD_UPDATE_CTR(bnad, tcpcsum_offload); in bnad_txq_wi_prepare()
2873 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); in bnad_txq_wi_prepare()
2882 BNAD_UPDATE_CTR(bnad, udpcsum_offload); in bnad_txq_wi_prepare()
2886 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); in bnad_txq_wi_prepare()
2891 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); in bnad_txq_wi_prepare()
2911 struct bnad *bnad = netdev_priv(netdev); in bnad_start_xmit() local
2927 BNAD_UPDATE_CTR(bnad, tx_skb_too_short); in bnad_start_xmit()
2932 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); in bnad_start_xmit()
2937 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); in bnad_start_xmit()
2941 tcb = bnad->tx_info[0].tcb[txq_id]; in bnad_start_xmit()
2949 BNAD_UPDATE_CTR(bnad, tx_skb_stopping); in bnad_start_xmit()
2962 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); in bnad_start_xmit()
2971 sent = bnad_txcmpl_process(bnad, tcb); in bnad_start_xmit()
2978 BNAD_UPDATE_CTR(bnad, netif_queue_stop); in bnad_start_xmit()
2988 BNAD_UPDATE_CTR(bnad, netif_queue_stop); in bnad_start_xmit()
2992 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); in bnad_start_xmit()
3000 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { in bnad_start_xmit()
3012 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, in bnad_start_xmit()
3014 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { in bnad_start_xmit()
3016 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); in bnad_start_xmit()
3030 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, in bnad_start_xmit()
3033 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); in bnad_start_xmit()
3048 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, in bnad_start_xmit()
3050 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { in bnad_start_xmit()
3052 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, in bnad_start_xmit()
3055 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); in bnad_start_xmit()
3069 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); in bnad_start_xmit()
3071 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); in bnad_start_xmit()
3097 struct bnad *bnad = netdev_priv(netdev); in bnad_get_stats64() local
3100 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_stats64()
3102 bnad_netdev_qstats_fill(bnad, stats); in bnad_get_stats64()
3103 bnad_netdev_hwstats_fill(bnad, stats); in bnad_get_stats64()
3105 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_stats64()
3109 bnad_set_rx_ucast_fltr(struct bnad *bnad) in bnad_set_rx_ucast_fltr() argument
3111 struct net_device *netdev = bnad->netdev; in bnad_set_rx_ucast_fltr()
3118 if (netdev_uc_empty(bnad->netdev)) { in bnad_set_rx_ucast_fltr()
3119 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL); in bnad_set_rx_ucast_fltr()
3123 if (uc_count > bna_attr(&bnad->bna)->num_ucmac) in bnad_set_rx_ucast_fltr()
3136 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list); in bnad_set_rx_ucast_fltr()
3146 bnad->cfg_flags |= BNAD_CF_DEFAULT; in bnad_set_rx_ucast_fltr()
3147 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL); in bnad_set_rx_ucast_fltr()
3151 bnad_set_rx_mcast_fltr(struct bnad *bnad) in bnad_set_rx_mcast_fltr() argument
3153 struct net_device *netdev = bnad->netdev; in bnad_set_rx_mcast_fltr()
3164 if (mc_count > bna_attr(&bnad->bna)->num_mcmac) in bnad_set_rx_mcast_fltr()
3176 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list); in bnad_set_rx_mcast_fltr()
3185 bnad->cfg_flags |= BNAD_CF_ALLMULTI; in bnad_set_rx_mcast_fltr()
3186 bna_rx_mcast_delall(bnad->rx_info[0].rx); in bnad_set_rx_mcast_fltr()
3192 struct bnad *bnad = netdev_priv(netdev); in bnad_set_rx_mode() local
3196 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_rx_mode()
3198 if (bnad->rx_info[0].rx == NULL) { in bnad_set_rx_mode()
3199 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_rx_mode()
3204 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT | in bnad_set_rx_mode()
3210 bnad->cfg_flags |= BNAD_CF_PROMISC; in bnad_set_rx_mode()
3212 bnad_set_rx_mcast_fltr(bnad); in bnad_set_rx_mode()
3214 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) in bnad_set_rx_mode()
3217 bnad_set_rx_ucast_fltr(bnad); in bnad_set_rx_mode()
3219 if (bnad->cfg_flags & BNAD_CF_DEFAULT) in bnad_set_rx_mode()
3225 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask); in bnad_set_rx_mode()
3227 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_rx_mode()
3239 struct bnad *bnad = netdev_priv(netdev); in bnad_set_mac_address() local
3243 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_mac_address()
3245 err = bnad_mac_addr_set_locked(bnad, sa->sa_data); in bnad_set_mac_address()
3249 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_mac_address()
3255 bnad_mtu_set(struct bnad *bnad, int frame_size) in bnad_mtu_set() argument
3259 init_completion(&bnad->bnad_completions.mtu_comp); in bnad_mtu_set()
3261 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mtu_set()
3262 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set); in bnad_mtu_set()
3263 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mtu_set()
3265 wait_for_completion(&bnad->bnad_completions.mtu_comp); in bnad_mtu_set()
3267 return bnad->bnad_completions.mtu_comp_status; in bnad_mtu_set()
3274 struct bnad *bnad = netdev_priv(netdev); in bnad_change_mtu() local
3277 mutex_lock(&bnad->conf_mutex); in bnad_change_mtu()
3286 if (BNAD_PCI_DEV_IS_CAT2(bnad) && in bnad_change_mtu()
3287 netif_running(bnad->netdev)) { in bnad_change_mtu()
3291 bnad_reinit_rx(bnad); in bnad_change_mtu()
3294 err = bnad_mtu_set(bnad, new_frame); in bnad_change_mtu()
3298 mutex_unlock(&bnad->conf_mutex); in bnad_change_mtu()
3305 struct bnad *bnad = netdev_priv(netdev); in bnad_vlan_rx_add_vid() local
3308 if (!bnad->rx_info[0].rx) in bnad_vlan_rx_add_vid()
3311 mutex_lock(&bnad->conf_mutex); in bnad_vlan_rx_add_vid()
3313 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_vlan_rx_add_vid()
3314 bna_rx_vlan_add(bnad->rx_info[0].rx, vid); in bnad_vlan_rx_add_vid()
3315 set_bit(vid, bnad->active_vlans); in bnad_vlan_rx_add_vid()
3316 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_vlan_rx_add_vid()
3318 mutex_unlock(&bnad->conf_mutex); in bnad_vlan_rx_add_vid()
3326 struct bnad *bnad = netdev_priv(netdev); in bnad_vlan_rx_kill_vid() local
3329 if (!bnad->rx_info[0].rx) in bnad_vlan_rx_kill_vid()
3332 mutex_lock(&bnad->conf_mutex); in bnad_vlan_rx_kill_vid()
3334 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_vlan_rx_kill_vid()
3335 clear_bit(vid, bnad->active_vlans); in bnad_vlan_rx_kill_vid()
3336 bna_rx_vlan_del(bnad->rx_info[0].rx, vid); in bnad_vlan_rx_kill_vid()
3337 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_vlan_rx_kill_vid()
3339 mutex_unlock(&bnad->conf_mutex); in bnad_vlan_rx_kill_vid()
3346 struct bnad *bnad = netdev_priv(dev); in bnad_set_features() local
3352 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_features()
3355 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); in bnad_set_features()
3357 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); in bnad_set_features()
3359 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_features()
3369 struct bnad *bnad = netdev_priv(netdev); in bnad_netpoll() local
3375 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { in bnad_netpoll()
3376 bna_intx_disable(&bnad->bna, curr_mask); in bnad_netpoll()
3377 bnad_isr(bnad->pcidev->irq, netdev); in bnad_netpoll()
3378 bna_intx_enable(&bnad->bna, curr_mask); in bnad_netpoll()
3386 for (i = 0; i < bnad->num_rx; i++) { in bnad_netpoll()
3387 rx_info = &bnad->rx_info[i]; in bnad_netpoll()
3390 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_netpoll()
3393 bnad_netif_rx_schedule_poll(bnad, in bnad_netpoll()
3419 bnad_netdev_init(struct bnad *bnad) in bnad_netdev_init() argument
3421 struct net_device *netdev = bnad->netdev; in bnad_netdev_init()
3435 netdev->mem_start = bnad->mmio_start; in bnad_netdev_init()
3436 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; in bnad_netdev_init()
3453 bnad_init(struct bnad *bnad, in bnad_init() argument
3461 bnad->netdev = netdev; in bnad_init()
3462 bnad->pcidev = pdev; in bnad_init()
3463 bnad->mmio_start = pci_resource_start(pdev, 0); in bnad_init()
3464 bnad->mmio_len = pci_resource_len(pdev, 0); in bnad_init()
3465 bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len); in bnad_init()
3466 if (!bnad->bar0) { in bnad_init()
3470 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0, in bnad_init()
3471 (unsigned long long) bnad->mmio_len); in bnad_init()
3473 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_init()
3475 bnad->cfg_flags = BNAD_CF_MSIX; in bnad_init()
3477 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; in bnad_init()
3479 bnad_q_num_init(bnad); in bnad_init()
3480 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_init()
3482 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + in bnad_init()
3483 (bnad->num_rx * bnad->num_rxp_per_rx) + in bnad_init()
3486 bnad->txq_depth = BNAD_TXQ_DEPTH; in bnad_init()
3487 bnad->rxq_depth = BNAD_RXQ_DEPTH; in bnad_init()
3489 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; in bnad_init()
3490 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; in bnad_init()
3492 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); in bnad_init()
3493 bnad->work_q = create_singlethread_workqueue(bnad->wq_name); in bnad_init()
3494 if (!bnad->work_q) { in bnad_init()
3495 iounmap(bnad->bar0); in bnad_init()
3508 bnad_uninit(struct bnad *bnad) in bnad_uninit() argument
3510 if (bnad->work_q) { in bnad_uninit()
3511 destroy_workqueue(bnad->work_q); in bnad_uninit()
3512 bnad->work_q = NULL; in bnad_uninit()
3515 if (bnad->bar0) in bnad_uninit()
3516 iounmap(bnad->bar0); in bnad_uninit()
3526 bnad_lock_init(struct bnad *bnad) in bnad_lock_init() argument
3528 spin_lock_init(&bnad->bna_lock); in bnad_lock_init()
3529 mutex_init(&bnad->conf_mutex); in bnad_lock_init()
3533 bnad_lock_uninit(struct bnad *bnad) in bnad_lock_uninit() argument
3535 mutex_destroy(&bnad->conf_mutex); in bnad_lock_uninit()
3540 bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev) in bnad_pci_init() argument
3576 struct bnad *bnad; in bnad_pci_probe() local
3594 netdev = alloc_etherdev(sizeof(struct bnad)); in bnad_pci_probe()
3599 bnad = netdev_priv(netdev); in bnad_pci_probe()
3600 bnad_lock_init(bnad); in bnad_pci_probe()
3601 bnad->id = atomic_inc_return(&bna_id) - 1; in bnad_pci_probe()
3603 mutex_lock(&bnad->conf_mutex); in bnad_pci_probe()
3605 err = bnad_pci_init(bnad, pdev); in bnad_pci_probe()
3613 err = bnad_init(bnad, pdev, netdev); in bnad_pci_probe()
3618 bnad_netdev_init(bnad); in bnad_pci_probe()
3625 bnad_debugfs_init(bnad); in bnad_pci_probe()
3628 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3629 bna_res_req(&bnad->res_info[0]); in bnad_pci_probe()
3630 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3633 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX); in bnad_pci_probe()
3637 bna = &bnad->bna; in bnad_pci_probe()
3640 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); in bnad_pci_probe()
3641 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); in bnad_pci_probe()
3642 pcidev_info.device_id = bnad->pcidev->device; in bnad_pci_probe()
3643 pcidev_info.pci_bar_kva = bnad->bar0; in bnad_pci_probe()
3645 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3646 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); in bnad_pci_probe()
3647 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3649 bnad->stats.bna_stats = &bna->stats; in bnad_pci_probe()
3651 bnad_enable_msix(bnad); in bnad_pci_probe()
3652 err = bnad_mbox_irq_alloc(bnad); in bnad_pci_probe()
3657 timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0); in bnad_pci_probe()
3658 timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0); in bnad_pci_probe()
3659 timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0); in bnad_pci_probe()
3660 timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, in bnad_pci_probe()
3668 err = bnad_ioceth_enable(bnad); in bnad_pci_probe()
3674 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3677 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1, in bnad_pci_probe()
3683 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3687 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3688 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]); in bnad_pci_probe()
3689 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3691 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); in bnad_pci_probe()
3697 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3698 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]); in bnad_pci_probe()
3699 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3702 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3703 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr); in bnad_pci_probe()
3704 bnad_set_netdev_perm_addr(bnad); in bnad_pci_probe()
3705 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3707 mutex_unlock(&bnad->conf_mutex); in bnad_pci_probe()
3715 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); in bnad_pci_probe()
3720 mutex_unlock(&bnad->conf_mutex); in bnad_pci_probe()
3724 mutex_lock(&bnad->conf_mutex); in bnad_pci_probe()
3725 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); in bnad_pci_probe()
3727 bnad_ioceth_disable(bnad); in bnad_pci_probe()
3728 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); in bnad_pci_probe()
3729 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); in bnad_pci_probe()
3730 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); in bnad_pci_probe()
3731 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3733 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3734 bnad_mbox_irq_free(bnad); in bnad_pci_probe()
3735 bnad_disable_msix(bnad); in bnad_pci_probe()
3737 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); in bnad_pci_probe()
3740 kfree(bnad->regdata); in bnad_pci_probe()
3741 bnad_debugfs_uninit(bnad); in bnad_pci_probe()
3742 bnad_uninit(bnad); in bnad_pci_probe()
3746 mutex_unlock(&bnad->conf_mutex); in bnad_pci_probe()
3747 bnad_lock_uninit(bnad); in bnad_pci_probe()
3756 struct bnad *bnad; in bnad_pci_remove() local
3763 bnad = netdev_priv(netdev); in bnad_pci_remove()
3764 bna = &bnad->bna; in bnad_pci_remove()
3766 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags)) in bnad_pci_remove()
3769 mutex_lock(&bnad->conf_mutex); in bnad_pci_remove()
3770 bnad_ioceth_disable(bnad); in bnad_pci_remove()
3771 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); in bnad_pci_remove()
3772 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); in bnad_pci_remove()
3773 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); in bnad_pci_remove()
3774 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_remove()
3776 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_remove()
3778 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); in bnad_pci_remove()
3779 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); in bnad_pci_remove()
3780 bnad_mbox_irq_free(bnad); in bnad_pci_remove()
3781 bnad_disable_msix(bnad); in bnad_pci_remove()
3783 mutex_unlock(&bnad->conf_mutex); in bnad_pci_remove()
3784 bnad_lock_uninit(bnad); in bnad_pci_remove()
3786 kfree(bnad->regdata); in bnad_pci_remove()
3787 bnad_debugfs_uninit(bnad); in bnad_pci_remove()
3788 bnad_uninit(bnad); in bnad_pci_remove()