Lines Matching refs:bnad
282 struct bnad *bnad = netdev_priv(netdev); in bnad_get_drvinfo() local
290 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_drvinfo()
291 bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr); in bnad_get_drvinfo()
292 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_drvinfo()
299 strscpy(drvinfo->bus_info, pci_name(bnad->pcidev), in bnad_get_drvinfo()
315 struct bnad *bnad = netdev_priv(netdev); in bnad_get_coalesce() local
319 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_coalesce()
321 (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false; in bnad_get_coalesce()
322 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_coalesce()
324 coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo * in bnad_get_coalesce()
326 coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo * in bnad_get_coalesce()
338 struct bnad *bnad = netdev_priv(netdev); in bnad_set_coalesce() local
352 mutex_lock(&bnad->conf_mutex); in bnad_set_coalesce()
358 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_coalesce()
360 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) { in bnad_set_coalesce()
361 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; in bnad_set_coalesce()
362 bnad_dim_timer_start(bnad); in bnad_set_coalesce()
365 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) { in bnad_set_coalesce()
366 bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED; in bnad_set_coalesce()
367 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && in bnad_set_coalesce()
369 &bnad->run_flags)) { in bnad_set_coalesce()
371 &bnad->run_flags); in bnad_set_coalesce()
374 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_coalesce()
376 del_timer_sync(&bnad->dim_timer); in bnad_set_coalesce()
377 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_coalesce()
378 bnad_rx_coalescing_timeo_set(bnad); in bnad_set_coalesce()
381 if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs / in bnad_set_coalesce()
383 bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs / in bnad_set_coalesce()
385 bnad_tx_coalescing_timeo_set(bnad); in bnad_set_coalesce()
388 if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs / in bnad_set_coalesce()
390 bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs / in bnad_set_coalesce()
393 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) in bnad_set_coalesce()
394 bnad_rx_coalescing_timeo_set(bnad); in bnad_set_coalesce()
400 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_coalesce()
402 mutex_unlock(&bnad->conf_mutex); in bnad_set_coalesce()
412 struct bnad *bnad = netdev_priv(netdev); in bnad_get_ringparam() local
417 ringparam->rx_pending = bnad->rxq_depth; in bnad_get_ringparam()
418 ringparam->tx_pending = bnad->txq_depth; in bnad_get_ringparam()
428 struct bnad *bnad = netdev_priv(netdev); in bnad_set_ringparam() local
431 mutex_lock(&bnad->conf_mutex); in bnad_set_ringparam()
432 if (ringparam->rx_pending == bnad->rxq_depth && in bnad_set_ringparam()
433 ringparam->tx_pending == bnad->txq_depth) { in bnad_set_ringparam()
434 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
441 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
447 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
451 if (ringparam->rx_pending != bnad->rxq_depth) { in bnad_set_ringparam()
452 bnad->rxq_depth = ringparam->rx_pending; in bnad_set_ringparam()
454 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
458 for (i = 0; i < bnad->num_rx; i++) { in bnad_set_ringparam()
459 if (!bnad->rx_info[i].rx) in bnad_set_ringparam()
461 bnad_destroy_rx(bnad, i); in bnad_set_ringparam()
462 current_err = bnad_setup_rx(bnad, i); in bnad_set_ringparam()
467 if (!err && bnad->rx_info[0].rx) { in bnad_set_ringparam()
469 bnad_restore_vlans(bnad, 0); in bnad_set_ringparam()
470 bnad_enable_default_bcast(bnad); in bnad_set_ringparam()
471 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_ringparam()
472 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); in bnad_set_ringparam()
473 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_ringparam()
474 bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | in bnad_set_ringparam()
479 if (ringparam->tx_pending != bnad->txq_depth) { in bnad_set_ringparam()
480 bnad->txq_depth = ringparam->tx_pending; in bnad_set_ringparam()
482 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
486 for (i = 0; i < bnad->num_tx; i++) { in bnad_set_ringparam()
487 if (!bnad->tx_info[i].tx) in bnad_set_ringparam()
489 bnad_destroy_tx(bnad, i); in bnad_set_ringparam()
490 current_err = bnad_setup_tx(bnad, i); in bnad_set_ringparam()
496 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
504 struct bnad *bnad = netdev_priv(netdev); in bnad_get_pauseparam() local
507 pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause; in bnad_get_pauseparam()
508 pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause; in bnad_get_pauseparam()
515 struct bnad *bnad = netdev_priv(netdev); in bnad_set_pauseparam() local
522 mutex_lock(&bnad->conf_mutex); in bnad_set_pauseparam()
523 if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause || in bnad_set_pauseparam()
524 pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) { in bnad_set_pauseparam()
527 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_pauseparam()
528 bna_enet_pause_config(&bnad->bna.enet, &pause_config); in bnad_set_pauseparam()
529 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_pauseparam()
531 mutex_unlock(&bnad->conf_mutex); in bnad_set_pauseparam()
600 struct bnad *bnad = netdev_priv(netdev); in bnad_get_strings() local
607 mutex_lock(&bnad->conf_mutex); in bnad_get_strings()
614 bmap = bna_tx_rid_mask(&bnad->bna); in bnad_get_strings()
621 bmap = bna_rx_rid_mask(&bnad->bna); in bnad_get_strings()
629 for (i = 0; i < bnad->num_rx; i++) { in bnad_get_strings()
630 if (!bnad->rx_info[i].rx) in bnad_get_strings()
632 for (j = 0; j < bnad->num_rxp_per_rx; j++) in bnad_get_strings()
637 for (i = 0; i < bnad->num_rx; i++) { in bnad_get_strings()
638 if (!bnad->rx_info[i].rx) in bnad_get_strings()
640 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_get_strings()
642 if (bnad->rx_info[i].rx_ctrl[j].ccb && in bnad_get_strings()
643 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && in bnad_get_strings()
644 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) in bnad_get_strings()
650 for (i = 0; i < bnad->num_tx; i++) { in bnad_get_strings()
651 if (!bnad->tx_info[i].tx) in bnad_get_strings()
653 for (j = 0; j < bnad->num_txq_per_tx; j++) in bnad_get_strings()
657 mutex_unlock(&bnad->conf_mutex); in bnad_get_strings()
663 struct bnad *bnad = netdev_priv(netdev); in bnad_get_stats_count_locked() local
667 bmap = bna_tx_rid_mask(&bnad->bna); in bnad_get_stats_count_locked()
673 bmap = bna_rx_rid_mask(&bnad->bna); in bnad_get_stats_count_locked()
683 for (i = 0; i < bnad->num_rx; i++) { in bnad_get_stats_count_locked()
684 if (!bnad->rx_info[i].rx) in bnad_get_stats_count_locked()
686 count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS; in bnad_get_stats_count_locked()
687 count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS; in bnad_get_stats_count_locked()
688 for (j = 0; j < bnad->num_rxp_per_rx; j++) in bnad_get_stats_count_locked()
689 if (bnad->rx_info[i].rx_ctrl[j].ccb && in bnad_get_stats_count_locked()
690 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && in bnad_get_stats_count_locked()
691 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) in bnad_get_stats_count_locked()
695 for (i = 0; i < bnad->num_tx; i++) { in bnad_get_stats_count_locked()
696 if (!bnad->tx_info[i].tx) in bnad_get_stats_count_locked()
698 count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS; in bnad_get_stats_count_locked()
704 bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) in bnad_per_q_stats_fill() argument
710 for (i = 0; i < bnad->num_rx; i++) { in bnad_per_q_stats_fill()
711 if (!bnad->rx_info[i].rx) in bnad_per_q_stats_fill()
713 for (j = 0; j < bnad->num_rxp_per_rx; j++) in bnad_per_q_stats_fill()
714 if (bnad->rx_info[i].rx_ctrl[j].ccb && in bnad_per_q_stats_fill()
715 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && in bnad_per_q_stats_fill()
716 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) { in bnad_per_q_stats_fill()
717 buf[bi++] = bnad->rx_info[i].rx_ctrl[j]. in bnad_per_q_stats_fill()
720 buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j]. in bnad_per_q_stats_fill()
723 buf[bi++] = bnad->rx_info[i]. in bnad_per_q_stats_fill()
725 buf[bi++] = bnad->rx_info[i]. in bnad_per_q_stats_fill()
727 buf[bi++] = bnad->rx_info[i]. in bnad_per_q_stats_fill()
729 buf[bi++] = bnad->rx_info[i]. in bnad_per_q_stats_fill()
731 buf[bi++] = bnad->rx_info[i]. in bnad_per_q_stats_fill()
735 for (i = 0; i < bnad->num_rx; i++) { in bnad_per_q_stats_fill()
736 if (!bnad->rx_info[i].rx) in bnad_per_q_stats_fill()
738 for (j = 0; j < bnad->num_rxp_per_rx; j++) in bnad_per_q_stats_fill()
739 if (bnad->rx_info[i].rx_ctrl[j].ccb) { in bnad_per_q_stats_fill()
740 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && in bnad_per_q_stats_fill()
741 bnad->rx_info[i].rx_ctrl[j].ccb-> in bnad_per_q_stats_fill()
743 rcb = bnad->rx_info[i].rx_ctrl[j]. in bnad_per_q_stats_fill()
755 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && in bnad_per_q_stats_fill()
756 bnad->rx_info[i].rx_ctrl[j].ccb-> in bnad_per_q_stats_fill()
758 rcb = bnad->rx_info[i].rx_ctrl[j]. in bnad_per_q_stats_fill()
773 for (i = 0; i < bnad->num_tx; i++) { in bnad_per_q_stats_fill()
774 if (!bnad->tx_info[i].tx) in bnad_per_q_stats_fill()
776 for (j = 0; j < bnad->num_txq_per_tx; j++) in bnad_per_q_stats_fill()
777 if (bnad->tx_info[i].tcb[j] && in bnad_per_q_stats_fill()
778 bnad->tx_info[i].tcb[j]->txq) { in bnad_per_q_stats_fill()
779 tcb = bnad->tx_info[i].tcb[j]; in bnad_per_q_stats_fill()
795 struct bnad *bnad = netdev_priv(netdev); in bnad_get_ethtool_stats() local
802 mutex_lock(&bnad->conf_mutex); in bnad_get_ethtool_stats()
804 mutex_unlock(&bnad->conf_mutex); in bnad_get_ethtool_stats()
812 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_ethtool_stats()
815 bnad_netdev_qstats_fill(bnad, &net_stats64); in bnad_get_ethtool_stats()
816 bnad_netdev_hwstats_fill(bnad, &net_stats64); in bnad_get_ethtool_stats()
834 bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); in bnad_get_ethtool_stats()
837 stats64 = (u64 *)&bnad->stats.drv_stats; in bnad_get_ethtool_stats()
842 stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats; in bnad_get_ethtool_stats()
850 bmap = bna_tx_rid_mask(&bnad->bna); in bnad_get_ethtool_stats()
853 stats64 = (u64 *)&bnad->stats.bna_stats-> in bnad_get_ethtool_stats()
863 bmap = bna_rx_rid_mask(&bnad->bna); in bnad_get_ethtool_stats()
866 stats64 = (u64 *)&bnad->stats.bna_stats-> in bnad_get_ethtool_stats()
876 bi = bnad_per_q_stats_fill(bnad, buf, bi); in bnad_get_ethtool_stats()
878 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_ethtool_stats()
880 mutex_unlock(&bnad->conf_mutex); in bnad_get_ethtool_stats()
895 bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset, in bnad_get_flash_partition_by_offset() argument
907 fcomp.bnad = bnad; in bnad_get_flash_partition_by_offset()
911 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_flash_partition_by_offset()
912 ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr, in bnad_get_flash_partition_by_offset()
915 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_flash_partition_by_offset()
919 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_flash_partition_by_offset()
949 struct bnad *bnad = netdev_priv(netdev); in bnad_get_eeprom() local
956 eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16); in bnad_get_eeprom()
959 flash_part = bnad_get_flash_partition_by_offset(bnad, in bnad_get_eeprom()
964 fcomp.bnad = bnad; in bnad_get_eeprom()
968 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_eeprom()
969 ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part, in bnad_get_eeprom()
970 bnad->id, bytes, eeprom->len, in bnad_get_eeprom()
974 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_eeprom()
978 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_eeprom()
989 struct bnad *bnad = netdev_priv(netdev); in bnad_set_eeprom() local
996 if (eeprom->magic != (bnad->pcidev->vendor | in bnad_set_eeprom()
997 (bnad->pcidev->device << 16))) in bnad_set_eeprom()
1001 flash_part = bnad_get_flash_partition_by_offset(bnad, in bnad_set_eeprom()
1006 fcomp.bnad = bnad; in bnad_set_eeprom()
1010 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_eeprom()
1011 ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part, in bnad_set_eeprom()
1012 bnad->id, bytes, eeprom->len, in bnad_set_eeprom()
1016 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_eeprom()
1020 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_eeprom()
1030 struct bnad *bnad = netdev_priv(netdev); in bnad_flash_device() local
1035 ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev); in bnad_flash_device()
1041 fcomp.bnad = bnad; in bnad_flash_device()
1045 spin_lock_irq(&bnad->bna_lock); in bnad_flash_device()
1046 ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG, in bnad_flash_device()
1047 bnad->id, (u8 *)fw->data, fw->size, 0, in bnad_flash_device()
1052 spin_unlock_irq(&bnad->bna_lock); in bnad_flash_device()
1056 spin_unlock_irq(&bnad->bna_lock); in bnad_flash_device()