Lines Matching +full:4 +full:- +full:ring

33 	dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg);  in bgmac_wait_value()
41 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) in bgmac_dma_tx_reset() argument
46 if (!ring->mmio_base) in bgmac_dma_tx_reset()
49 /* Suspend DMA TX ring first. in bgmac_dma_tx_reset()
53 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, in bgmac_dma_tx_reset()
56 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); in bgmac_dma_tx_reset()
67 dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", in bgmac_dma_tx_reset()
68 ring->mmio_base, val); in bgmac_dma_tx_reset()
71 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); in bgmac_dma_tx_reset()
73 ring->mmio_base + BGMAC_DMA_TX_STATUS, in bgmac_dma_tx_reset()
76 dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", in bgmac_dma_tx_reset()
77 ring->mmio_base); in bgmac_dma_tx_reset()
79 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); in bgmac_dma_tx_reset()
81 dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n", in bgmac_dma_tx_reset()
82 ring->mmio_base); in bgmac_dma_tx_reset()
87 struct bgmac_dma_ring *ring) in bgmac_dma_tx_enable() argument
91 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL); in bgmac_dma_tx_enable()
92 if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) { in bgmac_dma_tx_enable()
107 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl); in bgmac_dma_tx_enable()
111 bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, in bgmac_dma_tx_add_buf() argument
118 if (i == BGMAC_TX_RING_SLOTS - 1) in bgmac_dma_tx_add_buf()
123 slot = &ring->slots[i]; in bgmac_dma_tx_add_buf()
124 dma_desc = &ring->cpu_base[i]; in bgmac_dma_tx_add_buf()
125 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); in bgmac_dma_tx_add_buf()
126 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); in bgmac_dma_tx_add_buf()
127 dma_desc->ctl0 = cpu_to_le32(ctl0); in bgmac_dma_tx_add_buf()
128 dma_desc->ctl1 = cpu_to_le32(ctl1); in bgmac_dma_tx_add_buf()
132 struct bgmac_dma_ring *ring, in bgmac_dma_tx_add() argument
135 struct device *dma_dev = bgmac->dma_dev; in bgmac_dma_tx_add()
136 struct net_device *net_dev = bgmac->net_dev; in bgmac_dma_tx_add()
137 int index = ring->end % BGMAC_TX_RING_SLOTS; in bgmac_dma_tx_add()
138 struct bgmac_slot_info *slot = &ring->slots[index]; in bgmac_dma_tx_add()
143 if (skb->len > BGMAC_DESC_CTL1_LEN) { in bgmac_dma_tx_add()
144 netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len); in bgmac_dma_tx_add()
148 if (skb->ip_summed == CHECKSUM_PARTIAL) in bgmac_dma_tx_add()
151 nr_frags = skb_shinfo(skb)->nr_frags; in bgmac_dma_tx_add()
153 /* ring->end - ring->start will return the number of valid slots, in bgmac_dma_tx_add()
154 * even when ring->end overflows in bgmac_dma_tx_add()
156 if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) { in bgmac_dma_tx_add()
157 netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n"); in bgmac_dma_tx_add()
162 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb), in bgmac_dma_tx_add()
164 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) in bgmac_dma_tx_add()
171 bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags); in bgmac_dma_tx_add()
175 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in bgmac_dma_tx_add()
179 slot = &ring->slots[index]; in bgmac_dma_tx_add()
180 slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0, in bgmac_dma_tx_add()
182 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) in bgmac_dma_tx_add()
185 if (i == nr_frags - 1) in bgmac_dma_tx_add()
188 bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags); in bgmac_dma_tx_add()
191 slot->skb = skb; in bgmac_dma_tx_add()
192 netdev_sent_queue(net_dev, skb->len); in bgmac_dma_tx_add()
193 ring->end += nr_frags + 1; in bgmac_dma_tx_add()
197 /* Increase ring->end to point empty slot. We tell hardware the first in bgmac_dma_tx_add()
200 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, in bgmac_dma_tx_add()
201 ring->index_base + in bgmac_dma_tx_add()
202 (ring->end % BGMAC_TX_RING_SLOTS) * in bgmac_dma_tx_add()
205 if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8) in bgmac_dma_tx_add()
211 dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), in bgmac_dma_tx_add()
214 while (i-- > 0) { in bgmac_dma_tx_add()
215 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; in bgmac_dma_tx_add()
216 struct bgmac_slot_info *slot = &ring->slots[index]; in bgmac_dma_tx_add()
217 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); in bgmac_dma_tx_add()
220 dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE); in bgmac_dma_tx_add()
224 netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n", in bgmac_dma_tx_add()
225 ring->mmio_base); in bgmac_dma_tx_add()
229 net_dev->stats.tx_dropped++; in bgmac_dma_tx_add()
230 net_dev->stats.tx_errors++; in bgmac_dma_tx_add()
235 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) in bgmac_dma_tx_free() argument
237 struct device *dma_dev = bgmac->dma_dev; in bgmac_dma_tx_free()
242 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); in bgmac_dma_tx_free()
244 empty_slot -= ring->index_base; in bgmac_dma_tx_free()
248 while (ring->start != ring->end) { in bgmac_dma_tx_free()
249 int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; in bgmac_dma_tx_free()
250 struct bgmac_slot_info *slot = &ring->slots[slot_idx]; in bgmac_dma_tx_free()
257 ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0); in bgmac_dma_tx_free()
258 ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); in bgmac_dma_tx_free()
262 dma_unmap_single(dma_dev, slot->dma_addr, len, in bgmac_dma_tx_free()
265 dma_unmap_page(dma_dev, slot->dma_addr, len, in bgmac_dma_tx_free()
268 if (slot->skb) { in bgmac_dma_tx_free()
269 bgmac->net_dev->stats.tx_bytes += slot->skb->len; in bgmac_dma_tx_free()
270 bgmac->net_dev->stats.tx_packets++; in bgmac_dma_tx_free()
271 bytes_compl += slot->skb->len; in bgmac_dma_tx_free()
275 dev_kfree_skb(slot->skb); in bgmac_dma_tx_free()
276 slot->skb = NULL; in bgmac_dma_tx_free()
279 slot->dma_addr = 0; in bgmac_dma_tx_free()
280 ring->start++; in bgmac_dma_tx_free()
286 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); in bgmac_dma_tx_free()
288 if (netif_queue_stopped(bgmac->net_dev)) in bgmac_dma_tx_free()
289 netif_wake_queue(bgmac->net_dev); in bgmac_dma_tx_free()
292 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) in bgmac_dma_rx_reset() argument
294 if (!ring->mmio_base) in bgmac_dma_rx_reset()
297 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0); in bgmac_dma_rx_reset()
299 ring->mmio_base + BGMAC_DMA_RX_STATUS, in bgmac_dma_rx_reset()
302 dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n", in bgmac_dma_rx_reset()
303 ring->mmio_base); in bgmac_dma_rx_reset()
307 struct bgmac_dma_ring *ring) in bgmac_dma_rx_enable() argument
311 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); in bgmac_dma_rx_enable()
313 /* preserve ONLY bits 16-17 from current hardware value */ in bgmac_dma_rx_enable()
316 if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { in bgmac_dma_rx_enable()
330 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl); in bgmac_dma_rx_enable()
336 struct device *dma_dev = bgmac->dma_dev; in bgmac_dma_rx_skb_for_slot()
344 return -ENOMEM; in bgmac_dma_rx_skb_for_slot()
346 /* Poison - if everything goes fine, hardware will overwrite it */ in bgmac_dma_rx_skb_for_slot()
348 rx->len = cpu_to_le16(0xdead); in bgmac_dma_rx_skb_for_slot()
349 rx->flags = cpu_to_le16(0xbeef); in bgmac_dma_rx_skb_for_slot()
355 netdev_err(bgmac->net_dev, "DMA mapping error\n"); in bgmac_dma_rx_skb_for_slot()
357 return -ENOMEM; in bgmac_dma_rx_skb_for_slot()
361 slot->buf = buf; in bgmac_dma_rx_skb_for_slot()
362 slot->dma_addr = dma_addr; in bgmac_dma_rx_skb_for_slot()
368 struct bgmac_dma_ring *ring) in bgmac_dma_rx_update_index() argument
372 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, in bgmac_dma_rx_update_index()
373 ring->index_base + in bgmac_dma_rx_update_index()
374 ring->end * sizeof(struct bgmac_dma_desc)); in bgmac_dma_rx_update_index()
378 struct bgmac_dma_ring *ring, int desc_idx) in bgmac_dma_rx_setup_desc() argument
380 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx; in bgmac_dma_rx_setup_desc()
383 if (desc_idx == BGMAC_RX_RING_SLOTS - 1) in bgmac_dma_rx_setup_desc()
391 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr)); in bgmac_dma_rx_setup_desc()
392 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)); in bgmac_dma_rx_setup_desc()
393 dma_desc->ctl0 = cpu_to_le32(ctl0); in bgmac_dma_rx_setup_desc()
394 dma_desc->ctl1 = cpu_to_le32(ctl1); in bgmac_dma_rx_setup_desc()
396 ring->end = desc_idx; in bgmac_dma_rx_setup_desc()
402 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; in bgmac_dma_rx_poison_buf()
404 dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, in bgmac_dma_rx_poison_buf()
406 rx->len = cpu_to_le16(0xdead); in bgmac_dma_rx_poison_buf()
407 rx->flags = cpu_to_le16(0xbeef); in bgmac_dma_rx_poison_buf()
408 dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, in bgmac_dma_rx_poison_buf()
412 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, in bgmac_dma_rx_read() argument
418 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); in bgmac_dma_rx_read()
420 end_slot -= ring->index_base; in bgmac_dma_rx_read()
424 while (ring->start != end_slot) { in bgmac_dma_rx_read()
425 struct device *dma_dev = bgmac->dma_dev; in bgmac_dma_rx_read()
426 struct bgmac_slot_info *slot = &ring->slots[ring->start]; in bgmac_dma_rx_read()
427 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; in bgmac_dma_rx_read()
429 void *buf = slot->buf; in bgmac_dma_rx_read()
430 dma_addr_t dma_addr = slot->dma_addr; in bgmac_dma_rx_read()
445 len = le16_to_cpu(rx->len); in bgmac_dma_rx_read()
446 flags = le16_to_cpu(rx->flags); in bgmac_dma_rx_read()
450 netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n", in bgmac_dma_rx_read()
451 ring->start); in bgmac_dma_rx_read()
453 bgmac->net_dev->stats.rx_errors++; in bgmac_dma_rx_read()
458 netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n", in bgmac_dma_rx_read()
459 ring->start); in bgmac_dma_rx_read()
461 bgmac->net_dev->stats.rx_length_errors++; in bgmac_dma_rx_read()
462 bgmac->net_dev->stats.rx_errors++; in bgmac_dma_rx_read()
467 len -= ETH_FCS_LEN; in bgmac_dma_rx_read()
471 netdev_err(bgmac->net_dev, "build_skb failed\n"); in bgmac_dma_rx_read()
473 bgmac->net_dev->stats.rx_errors++; in bgmac_dma_rx_read()
482 skb->protocol = eth_type_trans(skb, bgmac->net_dev); in bgmac_dma_rx_read()
483 bgmac->net_dev->stats.rx_bytes += len; in bgmac_dma_rx_read()
484 bgmac->net_dev->stats.rx_packets++; in bgmac_dma_rx_read()
485 napi_gro_receive(&bgmac->napi, skb); in bgmac_dma_rx_read()
489 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); in bgmac_dma_rx_read()
491 if (++ring->start >= BGMAC_RX_RING_SLOTS) in bgmac_dma_rx_read()
492 ring->start = 0; in bgmac_dma_rx_read()
498 bgmac_dma_rx_update_index(bgmac, ring); in bgmac_dma_rx_read()
503 /* Does ring support unaligned addressing? */
505 struct bgmac_dma_ring *ring, in bgmac_dma_unaligned() argument
510 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, in bgmac_dma_unaligned()
512 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO)) in bgmac_dma_unaligned()
516 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, in bgmac_dma_unaligned()
518 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO)) in bgmac_dma_unaligned()
526 struct bgmac_dma_ring *ring) in bgmac_dma_tx_ring_free() argument
528 struct device *dma_dev = bgmac->dma_dev; in bgmac_dma_tx_ring_free()
529 struct bgmac_dma_desc *dma_desc = ring->cpu_base; in bgmac_dma_tx_ring_free()
537 slot = &ring->slots[i]; in bgmac_dma_tx_ring_free()
538 dev_kfree_skb(slot->skb); in bgmac_dma_tx_ring_free()
540 if (!slot->dma_addr) in bgmac_dma_tx_ring_free()
543 if (slot->skb) in bgmac_dma_tx_ring_free()
544 dma_unmap_single(dma_dev, slot->dma_addr, in bgmac_dma_tx_ring_free()
547 dma_unmap_page(dma_dev, slot->dma_addr, in bgmac_dma_tx_ring_free()
553 struct bgmac_dma_ring *ring) in bgmac_dma_rx_ring_free() argument
555 struct device *dma_dev = bgmac->dma_dev; in bgmac_dma_rx_ring_free()
560 slot = &ring->slots[i]; in bgmac_dma_rx_ring_free()
561 if (!slot->dma_addr) in bgmac_dma_rx_ring_free()
564 dma_unmap_single(dma_dev, slot->dma_addr, in bgmac_dma_rx_ring_free()
567 put_page(virt_to_head_page(slot->buf)); in bgmac_dma_rx_ring_free()
568 slot->dma_addr = 0; in bgmac_dma_rx_ring_free()
573 struct bgmac_dma_ring *ring, in bgmac_dma_ring_desc_free() argument
576 struct device *dma_dev = bgmac->dma_dev; in bgmac_dma_ring_desc_free()
579 if (!ring->cpu_base) in bgmac_dma_ring_desc_free()
582 /* Free ring of descriptors */ in bgmac_dma_ring_desc_free()
584 dma_free_coherent(dma_dev, size, ring->cpu_base, in bgmac_dma_ring_desc_free()
585 ring->dma_base); in bgmac_dma_ring_desc_free()
593 bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]); in bgmac_dma_cleanup()
596 bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]); in bgmac_dma_cleanup()
604 bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i], in bgmac_dma_free()
608 bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i], in bgmac_dma_free()
614 struct device *dma_dev = bgmac->dma_dev; in bgmac_dma_alloc()
615 struct bgmac_dma_ring *ring; in bgmac_dma_alloc() local
618 int size; /* ring size: different for Tx and Rx */ in bgmac_dma_alloc()
624 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) { in bgmac_dma_alloc()
626 dev_err(bgmac->dev, "Core does not report 64-bit DMA\n"); in bgmac_dma_alloc()
627 return -ENOTSUPP; in bgmac_dma_alloc()
632 ring = &bgmac->tx_ring[i]; in bgmac_dma_alloc()
633 ring->mmio_base = ring_base[i]; in bgmac_dma_alloc()
635 /* Alloc ring of descriptors */ in bgmac_dma_alloc()
637 ring->cpu_base = dma_alloc_coherent(dma_dev, size, in bgmac_dma_alloc()
638 &ring->dma_base, in bgmac_dma_alloc()
640 if (!ring->cpu_base) { in bgmac_dma_alloc()
641 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", in bgmac_dma_alloc()
642 ring->mmio_base); in bgmac_dma_alloc()
646 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, in bgmac_dma_alloc()
648 if (ring->unaligned) in bgmac_dma_alloc()
649 ring->index_base = lower_32_bits(ring->dma_base); in bgmac_dma_alloc()
651 ring->index_base = 0; in bgmac_dma_alloc()
657 ring = &bgmac->rx_ring[i]; in bgmac_dma_alloc()
658 ring->mmio_base = ring_base[i]; in bgmac_dma_alloc()
660 /* Alloc ring of descriptors */ in bgmac_dma_alloc()
662 ring->cpu_base = dma_alloc_coherent(dma_dev, size, in bgmac_dma_alloc()
663 &ring->dma_base, in bgmac_dma_alloc()
665 if (!ring->cpu_base) { in bgmac_dma_alloc()
666 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", in bgmac_dma_alloc()
667 ring->mmio_base); in bgmac_dma_alloc()
671 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, in bgmac_dma_alloc()
673 if (ring->unaligned) in bgmac_dma_alloc()
674 ring->index_base = lower_32_bits(ring->dma_base); in bgmac_dma_alloc()
676 ring->index_base = 0; in bgmac_dma_alloc()
683 return -ENOMEM; in bgmac_dma_alloc()
688 struct bgmac_dma_ring *ring; in bgmac_dma_init() local
692 ring = &bgmac->tx_ring[i]; in bgmac_dma_init()
694 if (!ring->unaligned) in bgmac_dma_init()
695 bgmac_dma_tx_enable(bgmac, ring); in bgmac_dma_init()
696 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, in bgmac_dma_init()
697 lower_32_bits(ring->dma_base)); in bgmac_dma_init()
698 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, in bgmac_dma_init()
699 upper_32_bits(ring->dma_base)); in bgmac_dma_init()
700 if (ring->unaligned) in bgmac_dma_init()
701 bgmac_dma_tx_enable(bgmac, ring); in bgmac_dma_init()
703 ring->start = 0; in bgmac_dma_init()
704 ring->end = 0; /* Points the slot that should *not* be read */ in bgmac_dma_init()
710 ring = &bgmac->rx_ring[i]; in bgmac_dma_init()
712 if (!ring->unaligned) in bgmac_dma_init()
713 bgmac_dma_rx_enable(bgmac, ring); in bgmac_dma_init()
714 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, in bgmac_dma_init()
715 lower_32_bits(ring->dma_base)); in bgmac_dma_init()
716 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, in bgmac_dma_init()
717 upper_32_bits(ring->dma_base)); in bgmac_dma_init()
718 if (ring->unaligned) in bgmac_dma_init()
719 bgmac_dma_rx_enable(bgmac, ring); in bgmac_dma_init()
721 ring->start = 0; in bgmac_dma_init()
722 ring->end = 0; in bgmac_dma_init()
724 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); in bgmac_dma_init()
728 bgmac_dma_rx_setup_desc(bgmac, ring, j); in bgmac_dma_init()
731 bgmac_dma_rx_update_index(bgmac, ring); in bgmac_dma_init()
756 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) in bgmac_umac_cmd_maskset()
777 tmp = (addr[4] << 8) | addr[5]; in bgmac_write_mac_address()
785 if (net_dev->flags & IFF_PROMISC) in bgmac_set_rx_mode()
796 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) {
798 bgmac->mib_tx_regs[i] =
800 BGMAC_TX_GOOD_OCTETS + (i * 4));
802 bgmac->mib_rx_regs[i] =
804 BGMAC_RX_GOOD_OCTETS + (i * 4));
815 if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB) in bgmac_clear_mib()
820 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4)); in bgmac_clear_mib()
822 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4)); in bgmac_clear_mib()
825 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
831 switch (bgmac->mac_speed) { in bgmac_mac_speed()
845 dev_err(bgmac->dev, "Unsupported speed: %d\n", in bgmac_mac_speed()
846 bgmac->mac_speed); in bgmac_mac_speed()
849 if (bgmac->mac_duplex == DUPLEX_HALF) in bgmac_mac_speed()
857 if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) { in bgmac_miiconfig()
858 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) { in bgmac_miiconfig()
863 bgmac->mac_speed = SPEED_2500; in bgmac_miiconfig()
864 bgmac->mac_duplex = DUPLEX_FULL; in bgmac_miiconfig()
872 bgmac->mac_speed = SPEED_100; in bgmac_miiconfig()
873 bgmac->mac_duplex = DUPLEX_FULL; in bgmac_miiconfig()
884 if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED) in bgmac_chip_reset_idm_config()
888 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) { in bgmac_chip_reset_idm_config()
893 if (bgmac->in_init || !bgmac->has_robosw) in bgmac_chip_reset_idm_config()
899 if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw)) in bgmac_chip_reset_idm_config()
905 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
912 if (!bgmac->stats_grabbed) { in bgmac_chip_reset()
914 bgmac->stats_grabbed = true; in bgmac_chip_reset()
918 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]); in bgmac_chip_reset()
924 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]); in bgmac_chip_reset()
929 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) in bgmac_chip_reset()
933 if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) { in bgmac_chip_reset()
942 if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) { in bgmac_chip_reset()
946 char buf[4]; in bgmac_chip_reset()
950 dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n", in bgmac_chip_reset()
953 et_swtype <<= 4; in bgmac_chip_reset()
955 } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) { in bgmac_chip_reset()
958 } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) { in bgmac_chip_reset()
965 } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) { in bgmac_chip_reset()
969 char buf[4]; in bgmac_chip_reset()
973 dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n", in bgmac_chip_reset()
976 } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) { in bgmac_chip_reset()
980 bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK | in bgmac_chip_reset()
983 } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) { in bgmac_chip_reset()
988 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset in bgmac_chip_reset()
993 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) in bgmac_chip_reset()
1017 bgmac->mac_speed = SPEED_UNKNOWN; in bgmac_chip_reset()
1018 bgmac->mac_duplex = DUPLEX_UNKNOWN; in bgmac_chip_reset()
1021 if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL) in bgmac_chip_reset()
1027 if (bgmac->mii_bus) in bgmac_chip_reset()
1028 bgmac->mii_bus->reset(bgmac->mii_bus); in bgmac_chip_reset()
1030 netdev_reset_queue(bgmac->net_dev); in bgmac_chip_reset()
1035 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask); in bgmac_chip_intrs_on()
1044 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
1051 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) in bgmac_enable()
1065 if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0) in bgmac_enable()
1067 if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2) in bgmac_enable()
1071 if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 | in bgmac_enable()
1075 if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1) in bgmac_enable()
1084 if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) { in bgmac_enable()
1092 mdp = (bp_clk * 128 / 1000) - 3; in bgmac_enable()
1098 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1110 bgmac_set_rx_mode(bgmac->net_dev); in bgmac_chip_init()
1112 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr); in bgmac_chip_init()
1114 if (bgmac->loopback) in bgmac_chip_init()
1131 int_status &= bgmac->int_mask; in bgmac_interrupt()
1138 dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status); in bgmac_interrupt()
1143 napi_schedule(&bgmac->napi); in bgmac_interrupt()
1156 bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]); in bgmac_poll()
1157 handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight); in bgmac_poll()
1189 err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED, in bgmac_open()
1190 net_dev->name, net_dev); in bgmac_open()
1192 dev_err(bgmac->dev, "IRQ request error: %d!\n", err); in bgmac_open()
1196 napi_enable(&bgmac->napi); in bgmac_open()
1198 phy_start(net_dev->phydev); in bgmac_open()
1211 phy_stop(net_dev->phydev); in bgmac_stop()
1213 napi_disable(&bgmac->napi); in bgmac_stop()
1215 free_irq(bgmac->irq, net_dev); in bgmac_stop()
1227 struct bgmac_dma_ring *ring; in bgmac_start_xmit() local
1230 ring = &bgmac->tx_ring[0]; in bgmac_start_xmit()
1231 return bgmac_dma_tx_add(bgmac, ring, skb); in bgmac_start_xmit()
1244 eth_hw_addr_set(net_dev, sa->sa_data); in bgmac_set_mac_address()
1245 bgmac_write_mac_address(bgmac, net_dev->dev_addr); in bgmac_set_mac_address()
1282 { 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
1284 { 4, BGMAC_TX_PKTS, "tx_pkts" },
1285 { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
1286 { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
1287 { 4, BGMAC_TX_LEN_64, "tx_64" },
1288 { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
1289 { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
1290 { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
1291 { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
1292 { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
1293 { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
1294 { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
1295 { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
1296 { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
1297 { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
1298 { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
1299 { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
1300 { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
1301 { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
1302 { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
1303 { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
1304 { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
1305 { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
1306 { 4, BGMAC_TX_DEFERED, "tx_defered" },
1307 { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
1308 { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
1309 { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
1310 { 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
1312 { 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
1314 { 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
1316 { 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
1319 { 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
1321 { 4, BGMAC_RX_PKTS, "rx_pkts" },
1322 { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
1323 { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
1324 { 4, BGMAC_RX_LEN_64, "rx_64" },
1325 { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
1326 { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
1327 { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
1328 { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
1329 { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
1330 { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
1331 { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
1332 { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
1333 { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
1334 { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
1335 { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
1336 { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
1337 { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
1338 { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
1339 { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
1340 { 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
1341 { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
1342 { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
1343 { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
1344 { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
1345 { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
1346 { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
1358 return -EOPNOTSUPP; in bgmac_get_sset_count()
1388 if (s->size == 8) in bgmac_get_ethtool_stats()
1389 val = (u64)bgmac_read(bgmac, s->offset + 4) << 32; in bgmac_get_ethtool_stats()
1390 val |= bgmac_read(bgmac, s->offset); in bgmac_get_ethtool_stats()
1398 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); in bgmac_get_drvinfo()
1399 strscpy(info->bus_info, "AXI", sizeof(info->bus_info)); in bgmac_get_drvinfo()
1418 struct phy_device *phy_dev = net_dev->phydev; in bgmac_adjust_link()
1421 if (phy_dev->link) { in bgmac_adjust_link()
1422 if (phy_dev->speed != bgmac->mac_speed) { in bgmac_adjust_link()
1423 bgmac->mac_speed = phy_dev->speed; in bgmac_adjust_link()
1427 if (phy_dev->duplex != bgmac->mac_duplex) { in bgmac_adjust_link()
1428 bgmac->mac_duplex = phy_dev->duplex; in bgmac_adjust_link()
1452 dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); in bgmac_phy_connect_direct()
1456 err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, in bgmac_phy_connect_direct()
1459 dev_err(bgmac->dev, "Connecting PHY failed\n"); in bgmac_phy_connect_direct()
1477 net_dev->netdev_ops = &bgmac_netdev_ops; in bgmac_alloc()
1478 net_dev->ethtool_ops = &bgmac_ethtool_ops; in bgmac_alloc()
1481 bgmac->dev = dev; in bgmac_alloc()
1482 bgmac->net_dev = net_dev; in bgmac_alloc()
1490 struct net_device *net_dev = bgmac->net_dev; in bgmac_enet_probe()
1493 bgmac->in_init = true; in bgmac_enet_probe()
1495 net_dev->irq = bgmac->irq; in bgmac_enet_probe()
1496 SET_NETDEV_DEV(net_dev, bgmac->dev); in bgmac_enet_probe()
1497 dev_set_drvdata(bgmac->dev, bgmac); in bgmac_enet_probe()
1499 if (!is_valid_ether_addr(net_dev->dev_addr)) { in bgmac_enet_probe()
1500 dev_err(bgmac->dev, "Invalid MAC addr: %pM\n", in bgmac_enet_probe()
1501 net_dev->dev_addr); in bgmac_enet_probe()
1503 dev_warn(bgmac->dev, "Using random MAC: %pM\n", in bgmac_enet_probe()
1504 net_dev->dev_addr); in bgmac_enet_probe()
1515 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) { in bgmac_enet_probe()
1516 if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6) in bgmac_enet_probe()
1524 dev_err(bgmac->dev, "Unable to alloc memory for DMA\n"); in bgmac_enet_probe()
1528 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; in bgmac_enet_probe()
1530 bgmac->int_mask &= ~BGMAC_IS_TX_MASK; in bgmac_enet_probe()
1532 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll); in bgmac_enet_probe()
1536 dev_err(bgmac->dev, "Cannot connect to phy\n"); in bgmac_enet_probe()
1540 net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; in bgmac_enet_probe()
1541 net_dev->hw_features = net_dev->features; in bgmac_enet_probe()
1542 net_dev->vlan_features = net_dev->features; in bgmac_enet_probe()
1545 net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN; in bgmac_enet_probe()
1547 bgmac->in_init = false; in bgmac_enet_probe()
1549 err = register_netdev(bgmac->net_dev); in bgmac_enet_probe()
1551 dev_err(bgmac->dev, "Cannot register net device\n"); in bgmac_enet_probe()
1560 phy_disconnect(net_dev->phydev); in bgmac_enet_probe()
1571 unregister_netdev(bgmac->net_dev); in bgmac_enet_remove()
1572 phy_disconnect(bgmac->net_dev->phydev); in bgmac_enet_remove()
1573 netif_napi_del(&bgmac->napi); in bgmac_enet_remove()
1580 if (!netif_running(bgmac->net_dev)) in bgmac_enet_suspend()
1583 phy_stop(bgmac->net_dev->phydev); in bgmac_enet_suspend()
1585 netif_stop_queue(bgmac->net_dev); in bgmac_enet_suspend()
1587 napi_disable(&bgmac->napi); in bgmac_enet_suspend()
1589 netif_tx_lock(bgmac->net_dev); in bgmac_enet_suspend()
1590 netif_device_detach(bgmac->net_dev); in bgmac_enet_suspend()
1591 netif_tx_unlock(bgmac->net_dev); in bgmac_enet_suspend()
1605 if (!netif_running(bgmac->net_dev)) in bgmac_enet_resume()
1614 napi_enable(&bgmac->napi); in bgmac_enet_resume()
1616 netif_tx_lock(bgmac->net_dev); in bgmac_enet_resume()
1617 netif_device_attach(bgmac->net_dev); in bgmac_enet_resume()
1618 netif_tx_unlock(bgmac->net_dev); in bgmac_enet_resume()
1620 netif_start_queue(bgmac->net_dev); in bgmac_enet_resume()
1622 phy_start(bgmac->net_dev->phydev); in bgmac_enet_resume()