Lines Matching +full:num +full:- +full:rxq
55 struct alx_buffer *txb = &txq->bufs[entry]; in alx_free_txbuf()
58 dma_unmap_single(txq->dev, in alx_free_txbuf()
65 if (txb->skb) { in alx_free_txbuf()
66 dev_kfree_skb_any(txb->skb); in alx_free_txbuf()
67 txb->skb = NULL; in alx_free_txbuf()
73 struct alx_rx_queue *rxq = alx->qnapi[0]->rxq; in alx_refill_rx_ring() local
79 next = cur = rxq->write_idx; in alx_refill_rx_ring()
80 if (++next == alx->rx_ringsz) in alx_refill_rx_ring()
82 cur_buf = &rxq->bufs[cur]; in alx_refill_rx_ring()
84 while (!cur_buf->skb && next != rxq->read_idx) { in alx_refill_rx_ring()
85 struct alx_rfd *rfd = &rxq->rfd[cur]; in alx_refill_rx_ring()
96 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp); in alx_refill_rx_ring()
100 if (((unsigned long)skb->data & 0xfff) == 0xfc0) in alx_refill_rx_ring()
103 dma = dma_map_single(&alx->hw.pdev->dev, in alx_refill_rx_ring()
104 skb->data, alx->rxbuf_size, in alx_refill_rx_ring()
106 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) { in alx_refill_rx_ring()
111 /* Unfortunately, RX descriptor buffers must be 4-byte in alx_refill_rx_ring()
119 cur_buf->skb = skb; in alx_refill_rx_ring()
120 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size); in alx_refill_rx_ring()
122 rfd->addr = cpu_to_le64(dma); in alx_refill_rx_ring()
125 if (++next == alx->rx_ringsz) in alx_refill_rx_ring()
127 cur_buf = &rxq->bufs[cur]; in alx_refill_rx_ring()
134 rxq->write_idx = cur; in alx_refill_rx_ring()
135 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); in alx_refill_rx_ring()
144 unsigned int r_idx = skb->queue_mapping; in alx_tx_queue_mapping()
146 if (r_idx >= alx->num_txq) in alx_tx_queue_mapping()
147 r_idx = r_idx % alx->num_txq; in alx_tx_queue_mapping()
149 return alx->qnapi[r_idx]->txq; in alx_tx_queue_mapping()
154 return netdev_get_tx_queue(txq->netdev, txq->queue_idx); in alx_get_tx_queue()
159 if (txq->write_idx >= txq->read_idx) in alx_tpd_avail()
160 return txq->count + txq->read_idx - txq->write_idx - 1; in alx_tpd_avail()
161 return txq->read_idx - txq->write_idx - 1; in alx_tpd_avail()
172 alx = netdev_priv(txq->netdev); in alx_clean_tx_irq()
175 sw_read_idx = txq->read_idx; in alx_clean_tx_irq()
176 hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg); in alx_clean_tx_irq()
182 skb = txq->bufs[sw_read_idx].skb; in alx_clean_tx_irq()
184 total_bytes += skb->len; in alx_clean_tx_irq()
186 budget--; in alx_clean_tx_irq()
191 if (++sw_read_idx == txq->count) in alx_clean_tx_irq()
194 txq->read_idx = sw_read_idx; in alx_clean_tx_irq()
199 if (netif_tx_queue_stopped(tx_queue) && netif_carrier_ok(alx->dev) && in alx_clean_tx_irq()
200 alx_tpd_avail(txq) > txq->count / 4) in alx_clean_tx_irq()
208 schedule_work(&alx->link_check_wk); in alx_schedule_link_check()
213 schedule_work(&alx->reset_wk); in alx_schedule_reset()
216 static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget) in alx_clean_rx_irq() argument
225 alx = netdev_priv(rxq->netdev); in alx_clean_rx_irq()
228 rrd = &rxq->rrd[rxq->rrd_read_idx]; in alx_clean_rx_irq()
229 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) in alx_clean_rx_irq()
231 rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT); in alx_clean_rx_irq()
233 if (ALX_GET_FIELD(le32_to_cpu(rrd->word0), in alx_clean_rx_irq()
234 RRD_SI) != rxq->read_idx || in alx_clean_rx_irq()
235 ALX_GET_FIELD(le32_to_cpu(rrd->word0), in alx_clean_rx_irq()
241 rxb = &rxq->bufs[rxq->read_idx]; in alx_clean_rx_irq()
242 dma_unmap_single(rxq->dev, in alx_clean_rx_irq()
247 skb = rxb->skb; in alx_clean_rx_irq()
248 rxb->skb = NULL; in alx_clean_rx_irq()
250 if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) || in alx_clean_rx_irq()
251 rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) { in alx_clean_rx_irq()
252 rrd->word3 = 0; in alx_clean_rx_irq()
257 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3), in alx_clean_rx_irq()
258 RRD_PKTLEN) - ETH_FCS_LEN; in alx_clean_rx_irq()
260 skb->protocol = eth_type_trans(skb, rxq->netdev); in alx_clean_rx_irq()
263 if (alx->dev->features & NETIF_F_RXCSUM && in alx_clean_rx_irq()
264 !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) | in alx_clean_rx_irq()
266 switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2), in alx_clean_rx_irq()
272 skb->ip_summed = CHECKSUM_UNNECESSARY; in alx_clean_rx_irq()
277 napi_gro_receive(&rxq->np->napi, skb); in alx_clean_rx_irq()
281 if (++rxq->read_idx == rxq->count) in alx_clean_rx_irq()
282 rxq->read_idx = 0; in alx_clean_rx_irq()
283 if (++rxq->rrd_read_idx == rxq->count) in alx_clean_rx_irq()
284 rxq->rrd_read_idx = 0; in alx_clean_rx_irq()
287 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC); in alx_clean_rx_irq()
299 struct alx_priv *alx = np->alx; in alx_poll()
300 struct alx_hw *hw = &alx->hw; in alx_poll()
305 if (np->txq) in alx_poll()
306 tx_complete = alx_clean_tx_irq(np->txq); in alx_poll()
307 if (np->rxq) in alx_poll()
308 work = alx_clean_rx_irq(np->rxq, budget); in alx_poll()
313 napi_complete_done(&np->napi, work); in alx_poll()
316 if (alx->hw.pdev->msix_enabled) { in alx_poll()
317 alx_mask_msix(hw, np->vec_idx, false); in alx_poll()
319 spin_lock_irqsave(&alx->irq_lock, flags); in alx_poll()
320 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; in alx_poll()
321 alx_write_mem32(hw, ALX_IMR, alx->int_mask); in alx_poll()
322 spin_unlock_irqrestore(&alx->irq_lock, flags); in alx_poll()
332 struct alx_hw *hw = &alx->hw; in alx_intr_handle_misc()
335 netif_warn(alx, hw, alx->dev, in alx_intr_handle_misc()
342 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr); in alx_intr_handle_misc()
349 alx->int_mask &= ~ALX_ISR_PHY; in alx_intr_handle_misc()
350 alx_write_mem32(hw, ALX_IMR, alx->int_mask); in alx_intr_handle_misc()
359 struct alx_hw *hw = &alx->hw; in alx_intr_handle()
361 spin_lock(&alx->irq_lock); in alx_intr_handle()
365 intr &= alx->int_mask; in alx_intr_handle()
371 napi_schedule(&alx->qnapi[0]->napi); in alx_intr_handle()
373 alx->int_mask &= ~ALX_ISR_ALL_QUEUES; in alx_intr_handle()
374 alx_write_mem32(hw, ALX_IMR, alx->int_mask); in alx_intr_handle()
380 spin_unlock(&alx->irq_lock); in alx_intr_handle()
387 struct alx_hw *hw = &np->alx->hw; in alx_intr_msix_ring()
390 alx_mask_msix(hw, np->vec_idx, true); in alx_intr_msix_ring()
392 alx_write_mem32(hw, ALX_ISR, np->vec_mask); in alx_intr_msix_ring()
394 napi_schedule(&np->napi); in alx_intr_msix_ring()
402 struct alx_hw *hw = &alx->hw; in alx_intr_msix_misc()
410 intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES); in alx_intr_msix_misc()
428 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR)); in alx_intr_msi()
434 struct alx_hw *hw = &alx->hw; in alx_intr_legacy()
439 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask)) in alx_intr_legacy()
452 struct alx_hw *hw = &alx->hw; in alx_init_ring_ptrs()
453 u32 addr_hi = ((u64)alx->descmem.dma) >> 32; in alx_init_ring_ptrs()
457 for (i = 0; i < alx->num_napi; i++) { in alx_init_ring_ptrs()
458 np = alx->qnapi[i]; in alx_init_ring_ptrs()
459 if (np->txq) { in alx_init_ring_ptrs()
460 np->txq->read_idx = 0; in alx_init_ring_ptrs()
461 np->txq->write_idx = 0; in alx_init_ring_ptrs()
463 txring_header_reg[np->txq->queue_idx], in alx_init_ring_ptrs()
464 np->txq->tpd_dma); in alx_init_ring_ptrs()
467 if (np->rxq) { in alx_init_ring_ptrs()
468 np->rxq->read_idx = 0; in alx_init_ring_ptrs()
469 np->rxq->write_idx = 0; in alx_init_ring_ptrs()
470 np->rxq->rrd_read_idx = 0; in alx_init_ring_ptrs()
471 alx_write_mem32(hw, ALX_RRD_ADDR_LO, np->rxq->rrd_dma); in alx_init_ring_ptrs()
472 alx_write_mem32(hw, ALX_RFD_ADDR_LO, np->rxq->rfd_dma); in alx_init_ring_ptrs()
477 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz); in alx_init_ring_ptrs()
480 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz); in alx_init_ring_ptrs()
481 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz); in alx_init_ring_ptrs()
482 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size); in alx_init_ring_ptrs()
492 if (!txq->bufs) in alx_free_txring_buf()
495 for (i = 0; i < txq->count; i++) in alx_free_txring_buf()
498 memset(txq->bufs, 0, txq->count * sizeof(struct alx_buffer)); in alx_free_txring_buf()
499 memset(txq->tpd, 0, txq->count * sizeof(struct alx_txd)); in alx_free_txring_buf()
500 txq->write_idx = 0; in alx_free_txring_buf()
501 txq->read_idx = 0; in alx_free_txring_buf()
506 static void alx_free_rxring_buf(struct alx_rx_queue *rxq) in alx_free_rxring_buf() argument
511 if (!rxq->bufs) in alx_free_rxring_buf()
514 for (i = 0; i < rxq->count; i++) { in alx_free_rxring_buf()
515 cur_buf = rxq->bufs + i; in alx_free_rxring_buf()
516 if (cur_buf->skb) { in alx_free_rxring_buf()
517 dma_unmap_single(rxq->dev, in alx_free_rxring_buf()
521 dev_kfree_skb(cur_buf->skb); in alx_free_rxring_buf()
522 cur_buf->skb = NULL; in alx_free_rxring_buf()
528 rxq->write_idx = 0; in alx_free_rxring_buf()
529 rxq->read_idx = 0; in alx_free_rxring_buf()
530 rxq->rrd_read_idx = 0; in alx_free_rxring_buf()
537 for (i = 0; i < alx->num_txq; i++) in alx_free_buffers()
538 if (alx->qnapi[i] && alx->qnapi[i]->txq) in alx_free_buffers()
539 alx_free_txring_buf(alx->qnapi[i]->txq); in alx_free_buffers()
541 if (alx->qnapi[0] && alx->qnapi[0]->rxq) in alx_free_buffers()
542 alx_free_rxring_buf(alx->qnapi[0]->rxq); in alx_free_buffers()
552 return -ENOMEM; in alx_reinit_rings()
571 struct alx_hw *hw = &alx->hw; in __alx_set_rx_mode()
575 if (!(netdev->flags & IFF_ALLMULTI)) { in __alx_set_rx_mode()
577 alx_add_mc_addr(hw, ha->addr, mc_hash); in __alx_set_rx_mode()
583 hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN); in __alx_set_rx_mode()
584 if (netdev->flags & IFF_PROMISC) in __alx_set_rx_mode()
585 hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN; in __alx_set_rx_mode()
586 if (netdev->flags & IFF_ALLMULTI) in __alx_set_rx_mode()
587 hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN; in __alx_set_rx_mode()
589 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); in __alx_set_rx_mode()
600 struct alx_hw *hw = &alx->hw; in alx_set_mac_address()
603 if (!is_valid_ether_addr(addr->sa_data)) in alx_set_mac_address()
604 return -EADDRNOTAVAIL; in alx_set_mac_address()
606 if (netdev->addr_assign_type & NET_ADDR_RANDOM) in alx_set_mac_address()
607 netdev->addr_assign_type ^= NET_ADDR_RANDOM; in alx_set_mac_address()
609 eth_hw_addr_set(netdev, addr->sa_data); in alx_set_mac_address()
610 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); in alx_set_mac_address()
611 alx_set_macaddr(hw, hw->mac_addr); in alx_set_mac_address()
619 txq->bufs = kcalloc(txq->count, sizeof(struct alx_buffer), GFP_KERNEL); in alx_alloc_tx_ring()
620 if (!txq->bufs) in alx_alloc_tx_ring()
621 return -ENOMEM; in alx_alloc_tx_ring()
623 txq->tpd = alx->descmem.virt + offset; in alx_alloc_tx_ring()
624 txq->tpd_dma = alx->descmem.dma + offset; in alx_alloc_tx_ring()
625 offset += sizeof(struct alx_txd) * txq->count; in alx_alloc_tx_ring()
630 static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq, in alx_alloc_rx_ring() argument
633 rxq->bufs = kcalloc(rxq->count, sizeof(struct alx_buffer), GFP_KERNEL); in alx_alloc_rx_ring()
634 if (!rxq->bufs) in alx_alloc_rx_ring()
635 return -ENOMEM; in alx_alloc_rx_ring()
637 rxq->rrd = alx->descmem.virt + offset; in alx_alloc_rx_ring()
638 rxq->rrd_dma = alx->descmem.dma + offset; in alx_alloc_rx_ring()
639 offset += sizeof(struct alx_rrd) * rxq->count; in alx_alloc_rx_ring()
641 rxq->rfd = alx->descmem.virt + offset; in alx_alloc_rx_ring()
642 rxq->rfd_dma = alx->descmem.dma + offset; in alx_alloc_rx_ring()
643 offset += sizeof(struct alx_rfd) * rxq->count; in alx_alloc_rx_ring()
658 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz * in alx_alloc_rings()
659 alx->num_txq + in alx_alloc_rings()
660 sizeof(struct alx_rrd) * alx->rx_ringsz + in alx_alloc_rings()
661 sizeof(struct alx_rfd) * alx->rx_ringsz; in alx_alloc_rings()
662 alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev, in alx_alloc_rings()
663 alx->descmem.size, in alx_alloc_rings()
664 &alx->descmem.dma, GFP_KERNEL); in alx_alloc_rings()
665 if (!alx->descmem.virt) in alx_alloc_rings()
666 return -ENOMEM; in alx_alloc_rings()
672 for (i = 0; i < alx->num_txq; i++) { in alx_alloc_rings()
673 offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset); in alx_alloc_rings()
675 netdev_err(alx->dev, "Allocation of tx buffer failed!\n"); in alx_alloc_rings()
676 return -ENOMEM; in alx_alloc_rings()
680 offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset); in alx_alloc_rings()
682 netdev_err(alx->dev, "Allocation of rx buffer failed!\n"); in alx_alloc_rings()
683 return -ENOMEM; in alx_alloc_rings()
695 for (i = 0; i < alx->num_txq; i++) in alx_free_rings()
696 if (alx->qnapi[i] && alx->qnapi[i]->txq) in alx_free_rings()
697 kfree(alx->qnapi[i]->txq->bufs); in alx_free_rings()
699 if (alx->qnapi[0] && alx->qnapi[0]->rxq) in alx_free_rings()
700 kfree(alx->qnapi[0]->rxq->bufs); in alx_free_rings()
702 if (alx->descmem.virt) in alx_free_rings()
703 dma_free_coherent(&alx->hw.pdev->dev, in alx_free_rings()
704 alx->descmem.size, in alx_free_rings()
705 alx->descmem.virt, in alx_free_rings()
706 alx->descmem.dma); in alx_free_rings()
714 for (i = 0; i < alx->num_napi; i++) { in alx_free_napis()
715 np = alx->qnapi[i]; in alx_free_napis()
719 netif_napi_del(&np->napi); in alx_free_napis()
720 kfree(np->txq); in alx_free_napis()
721 kfree(np->rxq); in alx_free_napis()
723 alx->qnapi[i] = NULL; in alx_free_napis()
741 struct alx_rx_queue *rxq; in alx_alloc_napis() local
745 alx->int_mask &= ~ALX_ISR_ALL_QUEUES; in alx_alloc_napis()
748 for (i = 0; i < alx->num_napi; i++) { in alx_alloc_napis()
753 np->alx = alx; in alx_alloc_napis()
754 netif_napi_add(alx->dev, &np->napi, alx_poll); in alx_alloc_napis()
755 alx->qnapi[i] = np; in alx_alloc_napis()
759 for (i = 0; i < alx->num_txq; i++) { in alx_alloc_napis()
760 np = alx->qnapi[i]; in alx_alloc_napis()
765 np->txq = txq; in alx_alloc_napis()
766 txq->p_reg = tx_pidx_reg[i]; in alx_alloc_napis()
767 txq->c_reg = tx_cidx_reg[i]; in alx_alloc_napis()
768 txq->queue_idx = i; in alx_alloc_napis()
769 txq->count = alx->tx_ringsz; in alx_alloc_napis()
770 txq->netdev = alx->dev; in alx_alloc_napis()
771 txq->dev = &alx->hw.pdev->dev; in alx_alloc_napis()
772 np->vec_mask |= tx_vect_mask[i]; in alx_alloc_napis()
773 alx->int_mask |= tx_vect_mask[i]; in alx_alloc_napis()
777 np = alx->qnapi[0]; in alx_alloc_napis()
778 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); in alx_alloc_napis()
779 if (!rxq) in alx_alloc_napis()
782 np->rxq = rxq; in alx_alloc_napis()
783 rxq->np = alx->qnapi[0]; in alx_alloc_napis()
784 rxq->queue_idx = 0; in alx_alloc_napis()
785 rxq->count = alx->rx_ringsz; in alx_alloc_napis()
786 rxq->netdev = alx->dev; in alx_alloc_napis()
787 rxq->dev = &alx->hw.pdev->dev; in alx_alloc_napis()
788 np->vec_mask |= rx_vect_mask[0]; in alx_alloc_napis()
789 alx->int_mask |= rx_vect_mask[0]; in alx_alloc_napis()
794 netdev_err(alx->dev, "error allocating internal structures\n"); in alx_alloc_napis()
796 return -ENOMEM; in alx_alloc_napis()
808 struct alx_hw *hw = &alx->hw; in alx_config_vector_mapping()
812 if (alx->hw.pdev->msix_enabled) { in alx_config_vector_mapping()
814 for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) { in alx_config_vector_mapping()
837 err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec, in alx_enable_msix()
840 netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n"); in alx_enable_msix()
844 alx->num_vec = num_vec; in alx_enable_msix()
845 alx->num_napi = num_vec - 1; in alx_enable_msix()
846 alx->num_txq = num_txq; in alx_enable_msix()
847 alx->num_rxq = num_rxq; in alx_enable_msix()
854 struct net_device *netdev = alx->dev; in alx_request_msix()
857 err = request_irq(pci_irq_vector(alx->hw.pdev, 0), alx_intr_msix_misc, in alx_request_msix()
858 0, netdev->name, alx); in alx_request_msix()
862 for (i = 0; i < alx->num_napi; i++) { in alx_request_msix()
863 struct alx_napi *np = alx->qnapi[i]; in alx_request_msix()
867 if (np->txq && np->rxq) in alx_request_msix()
868 sprintf(np->irq_lbl, "%s-TxRx-%u", netdev->name, in alx_request_msix()
869 np->txq->queue_idx); in alx_request_msix()
870 else if (np->txq) in alx_request_msix()
871 sprintf(np->irq_lbl, "%s-tx-%u", netdev->name, in alx_request_msix()
872 np->txq->queue_idx); in alx_request_msix()
873 else if (np->rxq) in alx_request_msix()
874 sprintf(np->irq_lbl, "%s-rx-%u", netdev->name, in alx_request_msix()
875 np->rxq->queue_idx); in alx_request_msix()
877 sprintf(np->irq_lbl, "%s-unused", netdev->name); in alx_request_msix()
879 np->vec_idx = vector; in alx_request_msix()
880 err = request_irq(pci_irq_vector(alx->hw.pdev, vector), in alx_request_msix()
881 alx_intr_msix_ring, 0, np->irq_lbl, np); in alx_request_msix()
888 free_irq(pci_irq_vector(alx->hw.pdev, free_vector++), alx); in alx_request_msix()
890 vector--; in alx_request_msix()
892 free_irq(pci_irq_vector(alx->hw.pdev,free_vector++), in alx_request_msix()
893 alx->qnapi[i]); in alx_request_msix()
903 ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1, in alx_init_intr()
908 alx->num_vec = 1; in alx_init_intr()
909 alx->num_napi = 1; in alx_init_intr()
910 alx->num_txq = 1; in alx_init_intr()
911 alx->num_rxq = 1; in alx_init_intr()
917 struct alx_hw *hw = &alx->hw; in alx_irq_enable()
920 /* level-1 interrupt switch */ in alx_irq_enable()
922 alx_write_mem32(hw, ALX_IMR, alx->int_mask); in alx_irq_enable()
925 if (alx->hw.pdev->msix_enabled) { in alx_irq_enable()
927 for (i = 0; i < alx->num_vec; i++) in alx_irq_enable()
934 struct alx_hw *hw = &alx->hw; in alx_irq_disable()
941 if (alx->hw.pdev->msix_enabled) { in alx_irq_disable()
942 for (i = 0; i < alx->num_vec; i++) { in alx_irq_disable()
944 synchronize_irq(pci_irq_vector(alx->hw.pdev, i)); in alx_irq_disable()
947 synchronize_irq(pci_irq_vector(alx->hw.pdev, 0)); in alx_irq_disable()
957 pci_free_irq_vectors(alx->hw.pdev); in alx_realloc_resources()
976 struct pci_dev *pdev = alx->hw.pdev; in alx_request_irq()
977 struct alx_hw *hw = &alx->hw; in alx_request_irq()
981 msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT; in alx_request_irq()
983 if (alx->hw.pdev->msix_enabled) { in alx_request_irq()
995 if (alx->hw.pdev->msi_enabled) { in alx_request_irq()
999 alx->dev->name, alx); in alx_request_irq()
1004 pci_free_irq_vectors(alx->hw.pdev); in alx_request_irq()
1009 alx->dev->name, alx); in alx_request_irq()
1014 netdev_err(alx->dev, "IRQ registration failed!\n"); in alx_request_irq()
1020 struct pci_dev *pdev = alx->hw.pdev; in alx_free_irq()
1024 if (alx->hw.pdev->msix_enabled) { in alx_free_irq()
1025 for (i = 0; i < alx->num_napi; i++) in alx_free_irq()
1026 free_irq(pci_irq_vector(pdev, i + 1), alx->qnapi[i]); in alx_free_irq()
1034 struct alx_hw *hw = &alx->hw; in alx_identify_hw()
1038 return -EINVAL; in alx_identify_hw()
1040 hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2; in alx_identify_hw()
1047 struct pci_dev *pdev = alx->hw.pdev; in alx_init_sw()
1048 struct alx_hw *hw = &alx->hw; in alx_init_sw()
1053 dev_err(&pdev->dev, "unrecognized chip, aborting\n"); in alx_init_sw()
1057 alx->hw.lnk_patch = in alx_init_sw()
1058 pdev->device == ALX_DEV_ID_AR8161 && in alx_init_sw()
1059 pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC && in alx_init_sw()
1060 pdev->subsystem_device == 0x0091 && in alx_init_sw()
1061 pdev->revision == 0; in alx_init_sw()
1063 hw->smb_timer = 400; in alx_init_sw()
1064 hw->mtu = alx->dev->mtu; in alx_init_sw()
1065 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); in alx_init_sw()
1066 /* MTU range: 34 - 9256 */ in alx_init_sw()
1067 alx->dev->min_mtu = 34; in alx_init_sw()
1068 alx->dev->max_mtu = ALX_MAX_FRAME_LEN(ALX_MAX_FRAME_SIZE); in alx_init_sw()
1069 alx->tx_ringsz = 256; in alx_init_sw()
1070 alx->rx_ringsz = 512; in alx_init_sw()
1071 hw->imt = 200; in alx_init_sw()
1072 alx->int_mask = ALX_ISR_MISC; in alx_init_sw()
1073 hw->dma_chnl = hw->max_dma_chnl; in alx_init_sw()
1074 hw->ith_tpd = alx->tx_ringsz / 3; in alx_init_sw()
1075 hw->link_speed = SPEED_UNKNOWN; in alx_init_sw()
1076 hw->duplex = DUPLEX_UNKNOWN; in alx_init_sw()
1077 hw->adv_cfg = ADVERTISED_Autoneg | in alx_init_sw()
1083 hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; in alx_init_sw()
1085 hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN | in alx_init_sw()
1093 mutex_init(&alx->mtx); in alx_init_sw()
1102 if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE) in alx_fix_features()
1112 netif_trans_update(alx->dev); in alx_netif_stop()
1113 if (netif_carrier_ok(alx->dev)) { in alx_netif_stop()
1114 netif_carrier_off(alx->dev); in alx_netif_stop()
1115 netif_tx_disable(alx->dev); in alx_netif_stop()
1116 for (i = 0; i < alx->num_napi; i++) in alx_netif_stop()
1117 napi_disable(&alx->qnapi[i]->napi); in alx_netif_stop()
1123 struct alx_hw *hw = &alx->hw; in alx_halt()
1125 lockdep_assert_held(&alx->mtx); in alx_halt()
1128 hw->link_speed = SPEED_UNKNOWN; in alx_halt()
1129 hw->duplex = DUPLEX_UNKNOWN; in alx_halt()
1141 struct alx_hw *hw = &alx->hw; in alx_configure()
1145 __alx_set_rx_mode(alx->dev); in alx_configure()
1147 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); in alx_configure()
1152 lockdep_assert_held(&alx->mtx); in alx_activate()
1159 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); in alx_activate()
1168 lockdep_assert_held(&alx->mtx); in alx_reinit()
1179 WRITE_ONCE(netdev->mtu, mtu); in alx_change_mtu()
1180 alx->hw.mtu = mtu; in alx_change_mtu()
1181 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); in alx_change_mtu()
1184 mutex_lock(&alx->mtx); in alx_change_mtu()
1186 mutex_unlock(&alx->mtx); in alx_change_mtu()
1195 netif_tx_wake_all_queues(alx->dev); in alx_netif_start()
1196 for (i = 0; i < alx->num_napi; i++) in alx_netif_start()
1197 napi_enable(&alx->qnapi[i]->napi); in alx_netif_start()
1198 netif_carrier_on(alx->dev); in alx_netif_start()
1213 netif_carrier_off(alx->dev); in __alx_open()
1231 * requesting msi-x interrupts failed in __alx_open()
1235 netif_set_real_num_tx_queues(alx->dev, alx->num_txq); in __alx_open()
1236 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq); in __alx_open()
1239 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); in __alx_open()
1244 netif_tx_start_all_queues(alx->dev); in __alx_open()
1253 pci_free_irq_vectors(alx->hw.pdev); in __alx_open()
1259 lockdep_assert_held(&alx->mtx); in __alx_stop()
1263 cancel_work_sync(&alx->link_check_wk); in __alx_stop()
1264 cancel_work_sync(&alx->reset_wk); in __alx_stop()
1273 switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) { in alx_speed_desc()
1291 struct alx_hw *hw = &alx->hw; in alx_check_link()
1296 lockdep_assert_held(&alx->mtx); in alx_check_link()
1303 old_speed = hw->link_speed; in alx_check_link()
1308 spin_lock_irqsave(&alx->irq_lock, flags); in alx_check_link()
1309 alx->int_mask |= ALX_ISR_PHY; in alx_check_link()
1310 alx_write_mem32(hw, ALX_IMR, alx->int_mask); in alx_check_link()
1311 spin_unlock_irqrestore(&alx->irq_lock, flags); in alx_check_link()
1313 if (old_speed == hw->link_speed) in alx_check_link()
1316 if (hw->link_speed != SPEED_UNKNOWN) { in alx_check_link()
1317 netif_info(alx, link, alx->dev, in alx_check_link()
1328 netif_info(alx, link, alx->dev, "Link Down\n"); in alx_check_link()
1355 mutex_lock(&alx->mtx); in alx_open()
1357 mutex_unlock(&alx->mtx); in alx_open()
1366 mutex_lock(&alx->mtx); in alx_stop()
1368 mutex_unlock(&alx->mtx); in alx_stop()
1379 mutex_lock(&alx->mtx); in alx_link_check()
1381 mutex_unlock(&alx->mtx); in alx_link_check()
1388 mutex_lock(&alx->mtx); in alx_reset()
1390 mutex_unlock(&alx->mtx); in alx_reset()
1395 int num; in alx_tpd_req() local
1397 num = skb_shinfo(skb)->nr_frags + 1; in alx_tpd_req()
1399 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) in alx_tpd_req()
1400 num++; in alx_tpd_req()
1402 return num; in alx_tpd_req()
1409 if (skb->ip_summed != CHECKSUM_PARTIAL) in alx_tx_csum()
1414 return -EINVAL; in alx_tx_csum()
1416 css = cso + skb->csum_offset; in alx_tx_csum()
1417 first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT); in alx_tx_csum()
1418 first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT); in alx_tx_csum()
1419 first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT); in alx_tx_csum()
1428 if (skb->ip_summed != CHECKSUM_PARTIAL) in alx_tso()
1438 if (skb->protocol == htons(ETH_P_IP)) { in alx_tso()
1441 iph->check = 0; in alx_tso()
1442 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, in alx_tso()
1444 first->word1 |= 1 << TPD_IPV4_SHIFT; in alx_tso()
1448 first->adrl.l.pkt_len = skb->len; in alx_tso()
1449 first->word1 |= 1 << TPD_LSO_V2_SHIFT; in alx_tso()
1452 first->word1 |= 1 << TPD_LSO_EN_SHIFT; in alx_tso()
1453 first->word1 |= (skb_transport_offset(skb) & in alx_tso()
1455 first->word1 |= (skb_shinfo(skb)->gso_size & in alx_tso()
1464 int maplen, f, first_idx = txq->write_idx; in alx_map_tx_skb()
1466 first_tpd = &txq->tpd[txq->write_idx]; in alx_map_tx_skb()
1469 if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) { in alx_map_tx_skb()
1470 if (++txq->write_idx == txq->count) in alx_map_tx_skb()
1471 txq->write_idx = 0; in alx_map_tx_skb()
1473 tpd = &txq->tpd[txq->write_idx]; in alx_map_tx_skb()
1474 tpd->len = first_tpd->len; in alx_map_tx_skb()
1475 tpd->vlan_tag = first_tpd->vlan_tag; in alx_map_tx_skb()
1476 tpd->word1 = first_tpd->word1; in alx_map_tx_skb()
1480 dma = dma_map_single(txq->dev, skb->data, maplen, in alx_map_tx_skb()
1482 if (dma_mapping_error(txq->dev, dma)) in alx_map_tx_skb()
1485 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); in alx_map_tx_skb()
1486 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); in alx_map_tx_skb()
1488 tpd->adrl.addr = cpu_to_le64(dma); in alx_map_tx_skb()
1489 tpd->len = cpu_to_le16(maplen); in alx_map_tx_skb()
1491 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { in alx_map_tx_skb()
1492 skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in alx_map_tx_skb()
1494 if (++txq->write_idx == txq->count) in alx_map_tx_skb()
1495 txq->write_idx = 0; in alx_map_tx_skb()
1496 tpd = &txq->tpd[txq->write_idx]; in alx_map_tx_skb()
1498 tpd->word1 = first_tpd->word1; in alx_map_tx_skb()
1501 dma = skb_frag_dma_map(txq->dev, frag, 0, in alx_map_tx_skb()
1503 if (dma_mapping_error(txq->dev, dma)) in alx_map_tx_skb()
1505 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); in alx_map_tx_skb()
1506 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); in alx_map_tx_skb()
1508 tpd->adrl.addr = cpu_to_le64(dma); in alx_map_tx_skb()
1509 tpd->len = cpu_to_le16(maplen); in alx_map_tx_skb()
1513 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT); in alx_map_tx_skb()
1514 txq->bufs[txq->write_idx].skb = skb; in alx_map_tx_skb()
1516 if (++txq->write_idx == txq->count) in alx_map_tx_skb()
1517 txq->write_idx = 0; in alx_map_tx_skb()
1523 while (f != txq->write_idx) { in alx_map_tx_skb()
1525 if (++f == txq->count) in alx_map_tx_skb()
1528 return -ENOMEM; in alx_map_tx_skb()
1538 alx = netdev_priv(txq->netdev); in alx_start_xmit_ring()
1545 first = &txq->tpd[txq->write_idx]; in alx_start_xmit_ring()
1557 netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len); in alx_start_xmit_ring()
1561 alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx); in alx_start_xmit_ring()
1563 if (alx_tpd_avail(txq) < txq->count / 8) in alx_start_xmit_ring()
1591 struct alx_hw *hw = &alx->hw; in alx_mdio_read()
1595 if (prtad != hw->mdio.prtad) in alx_mdio_read()
1596 return -EINVAL; in alx_mdio_read()
1612 struct alx_hw *hw = &alx->hw; in alx_mdio_write()
1614 if (prtad != hw->mdio.prtad) in alx_mdio_write()
1615 return -EINVAL; in alx_mdio_write()
1628 return -EAGAIN; in alx_ioctl()
1630 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd); in alx_ioctl()
1639 if (alx->hw.pdev->msix_enabled) { in alx_poll_controller()
1641 for (i = 0; i < alx->num_txq; i++) in alx_poll_controller()
1642 alx_intr_msix_ring(0, alx->qnapi[i]); in alx_poll_controller()
1643 } else if (alx->hw.pdev->msi_enabled) in alx_poll_controller()
1654 struct alx_hw_stats *hw_stats = &alx->hw.stats; in alx_get_stats64()
1656 spin_lock(&alx->stats_lock); in alx_get_stats64()
1658 alx_update_hw_stats(&alx->hw); in alx_get_stats64()
1660 net_stats->tx_bytes = hw_stats->tx_byte_cnt; in alx_get_stats64()
1661 net_stats->rx_bytes = hw_stats->rx_byte_cnt; in alx_get_stats64()
1662 net_stats->multicast = hw_stats->rx_mcast; in alx_get_stats64()
1663 net_stats->collisions = hw_stats->tx_single_col + in alx_get_stats64()
1664 hw_stats->tx_multi_col + in alx_get_stats64()
1665 hw_stats->tx_late_col + in alx_get_stats64()
1666 hw_stats->tx_abort_col; in alx_get_stats64()
1668 net_stats->rx_errors = hw_stats->rx_frag + in alx_get_stats64()
1669 hw_stats->rx_fcs_err + in alx_get_stats64()
1670 hw_stats->rx_len_err + in alx_get_stats64()
1671 hw_stats->rx_ov_sz + in alx_get_stats64()
1672 hw_stats->rx_ov_rrd + in alx_get_stats64()
1673 hw_stats->rx_align_err + in alx_get_stats64()
1674 hw_stats->rx_ov_rxf; in alx_get_stats64()
1676 net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf; in alx_get_stats64()
1677 net_stats->rx_length_errors = hw_stats->rx_len_err; in alx_get_stats64()
1678 net_stats->rx_crc_errors = hw_stats->rx_fcs_err; in alx_get_stats64()
1679 net_stats->rx_frame_errors = hw_stats->rx_align_err; in alx_get_stats64()
1680 net_stats->rx_dropped = hw_stats->rx_ov_rrd; in alx_get_stats64()
1682 net_stats->tx_errors = hw_stats->tx_late_col + in alx_get_stats64()
1683 hw_stats->tx_abort_col + in alx_get_stats64()
1684 hw_stats->tx_underrun + in alx_get_stats64()
1685 hw_stats->tx_trunc; in alx_get_stats64()
1687 net_stats->tx_aborted_errors = hw_stats->tx_abort_col; in alx_get_stats64()
1688 net_stats->tx_fifo_errors = hw_stats->tx_underrun; in alx_get_stats64()
1689 net_stats->tx_window_errors = hw_stats->tx_late_col; in alx_get_stats64()
1691 net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; in alx_get_stats64()
1692 net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; in alx_get_stats64()
1694 spin_unlock(&alx->stats_lock); in alx_get_stats64()
1726 /* The alx chip can DMA to 64-bit addresses, but it uses a single in alx_probe()
1730 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { in alx_probe()
1731 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); in alx_probe()
1733 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in alx_probe()
1735 dev_err(&pdev->dev, "No usable DMA config, aborting\n"); in alx_probe()
1742 dev_err(&pdev->dev, in alx_probe()
1749 if (!pdev->pm_cap) { in alx_probe()
1750 dev_err(&pdev->dev, in alx_probe()
1752 err = -EIO; in alx_probe()
1759 err = -ENOMEM; in alx_probe()
1763 SET_NETDEV_DEV(netdev, &pdev->dev); in alx_probe()
1765 spin_lock_init(&alx->hw.mdio_lock); in alx_probe()
1766 spin_lock_init(&alx->irq_lock); in alx_probe()
1767 spin_lock_init(&alx->stats_lock); in alx_probe()
1768 alx->dev = netdev; in alx_probe()
1769 alx->hw.pdev = pdev; in alx_probe()
1770 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | in alx_probe()
1772 hw = &alx->hw; in alx_probe()
1775 hw->hw_addr = pci_ioremap_bar(pdev, 0); in alx_probe()
1776 if (!hw->hw_addr) { in alx_probe()
1777 dev_err(&pdev->dev, "cannot map device registers\n"); in alx_probe()
1778 err = -EIO; in alx_probe()
1782 netdev->netdev_ops = &alx_netdev_ops; in alx_probe()
1783 netdev->ethtool_ops = &alx_ethtool_ops; in alx_probe()
1784 netdev->irq = pci_irq_vector(pdev, 0); in alx_probe()
1785 netdev->watchdog_timeo = ALX_WATCHDOG_TIME; in alx_probe()
1787 if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) in alx_probe()
1788 pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; in alx_probe()
1792 dev_err(&pdev->dev, "net device private data init failed\n"); in alx_probe()
1796 mutex_lock(&alx->mtx); in alx_probe()
1807 dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); in alx_probe()
1813 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); in alx_probe()
1815 dev_err(&pdev->dev, in alx_probe()
1822 netdev->hw_features = NETIF_F_SG | in alx_probe()
1828 if (alx_get_perm_macaddr(hw, hw->perm_addr)) { in alx_probe()
1829 dev_warn(&pdev->dev, in alx_probe()
1832 memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len); in alx_probe()
1835 memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); in alx_probe()
1836 eth_hw_addr_set(netdev, hw->mac_addr); in alx_probe()
1837 memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); in alx_probe()
1839 hw->mdio.prtad = 0; in alx_probe()
1840 hw->mdio.mmds = 0; in alx_probe()
1841 hw->mdio.dev = netdev; in alx_probe()
1842 hw->mdio.mode_support = MDIO_SUPPORTS_C45 | in alx_probe()
1845 hw->mdio.mdio_read = alx_mdio_read; in alx_probe()
1846 hw->mdio.mdio_write = alx_mdio_write; in alx_probe()
1849 dev_err(&pdev->dev, "failed to identify PHY\n"); in alx_probe()
1850 err = -EIO; in alx_probe()
1854 mutex_unlock(&alx->mtx); in alx_probe()
1856 INIT_WORK(&alx->link_check_wk, alx_link_check); in alx_probe()
1857 INIT_WORK(&alx->reset_wk, alx_reset); in alx_probe()
1862 dev_err(&pdev->dev, "register netdevice failed\n"); in alx_probe()
1868 netdev->dev_addr); in alx_probe()
1873 mutex_unlock(&alx->mtx); in alx_probe()
1875 iounmap(hw->hw_addr); in alx_probe()
1888 struct alx_hw *hw = &alx->hw; in alx_remove()
1891 alx_set_macaddr(hw, hw->perm_addr); in alx_remove()
1893 unregister_netdev(alx->dev); in alx_remove()
1894 iounmap(hw->hw_addr); in alx_remove()
1899 mutex_destroy(&alx->mtx); in alx_remove()
1901 free_netdev(alx->dev); in alx_remove()
1908 if (!netif_running(alx->dev)) in alx_suspend()
1912 netif_device_detach(alx->dev); in alx_suspend()
1914 mutex_lock(&alx->mtx); in alx_suspend()
1916 mutex_unlock(&alx->mtx); in alx_suspend()
1925 struct alx_hw *hw = &alx->hw; in alx_resume()
1929 mutex_lock(&alx->mtx); in alx_resume()
1932 if (!netif_running(alx->dev)) { in alx_resume()
1941 netif_device_attach(alx->dev); in alx_resume()
1944 mutex_unlock(&alx->mtx); in alx_resume()
1955 struct net_device *netdev = alx->dev; in alx_pci_error_detected()
1958 dev_info(&pdev->dev, "pci error detected\n"); in alx_pci_error_detected()
1960 mutex_lock(&alx->mtx); in alx_pci_error_detected()
1972 mutex_unlock(&alx->mtx); in alx_pci_error_detected()
1980 struct alx_hw *hw = &alx->hw; in alx_pci_error_slot_reset()
1983 dev_info(&pdev->dev, "pci error slot reset\n"); in alx_pci_error_slot_reset()
1985 mutex_lock(&alx->mtx); in alx_pci_error_slot_reset()
1988 dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n"); in alx_pci_error_slot_reset()
1998 mutex_unlock(&alx->mtx); in alx_pci_error_slot_reset()
2006 struct net_device *netdev = alx->dev; in alx_pci_error_resume()
2008 dev_info(&pdev->dev, "pci error resume\n"); in alx_pci_error_resume()
2010 mutex_lock(&alx->mtx); in alx_pci_error_resume()
2017 mutex_unlock(&alx->mtx); in alx_pci_error_resume()
2056 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");