Lines Matching +full:asym +full:- +full:pause
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
14 #include <linux/dma-mapping.h>
58 return -ETIMEDOUT; in ravb_wait()
90 switch (priv->speed) { in ravb_set_rate_gbeth()
107 switch (priv->speed) { in ravb_set_rate_rcar()
148 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); in ravb_mdio_ctrl()
175 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0; in ravb_get_mdio_data()
191 return priv->rx_ring[q].raw + priv->info->rx_desc_size * i; in ravb_rx_get_desc()
194 /* Free TX skb function for AVB-IP */
198 struct net_device_stats *stats = &priv->stats[q]; in ravb_tx_free()
199 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_tx_free()
205 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { in ravb_tx_free()
208 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * in ravb_tx_free()
210 desc = &priv->tx_ring[q][entry]; in ravb_tx_free()
211 txed = desc->die_dt == DT_FEMPTY; in ravb_tx_free()
216 size = le16_to_cpu(desc->ds_tagl) & TX_DS; in ravb_tx_free()
218 if (priv->tx_skb[q][entry / num_tx_desc]) { in ravb_tx_free()
219 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_tx_free()
222 if (entry % num_tx_desc == num_tx_desc - 1) { in ravb_tx_free()
224 dev_kfree_skb_any(priv->tx_skb[q][entry]); in ravb_tx_free()
225 priv->tx_skb[q][entry] = NULL; in ravb_tx_free()
227 stats->tx_packets++; in ravb_tx_free()
232 stats->tx_bytes += size; in ravb_tx_free()
233 desc->die_dt = DT_EEMPTY; in ravb_tx_free()
243 if (!priv->rx_ring[q].raw) in ravb_rx_ring_free()
246 ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1); in ravb_rx_ring_free()
247 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw, in ravb_rx_ring_free()
248 priv->rx_desc_dma[q]); in ravb_rx_ring_free()
249 priv->rx_ring[q].raw = NULL; in ravb_rx_ring_free()
256 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_free()
262 if (priv->tx_ring[q]) { in ravb_ring_free()
266 (priv->num_tx_ring[q] * num_tx_desc + 1); in ravb_ring_free()
267 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], in ravb_ring_free()
268 priv->tx_desc_dma[q]); in ravb_ring_free()
269 priv->tx_ring[q] = NULL; in ravb_ring_free()
273 for (i = 0; i < priv->num_rx_ring[q]; i++) { in ravb_ring_free()
274 if (priv->rx_buffers[q][i].page) in ravb_ring_free()
275 page_pool_put_page(priv->rx_pool[q], in ravb_ring_free()
276 priv->rx_buffers[q][i].page, in ravb_ring_free()
279 kfree(priv->rx_buffers[q]); in ravb_ring_free()
280 priv->rx_buffers[q] = NULL; in ravb_ring_free()
281 page_pool_destroy(priv->rx_pool[q]); in ravb_ring_free()
284 kfree(priv->tx_align[q]); in ravb_ring_free()
285 priv->tx_align[q] = NULL; in ravb_ring_free()
290 kfree(priv->tx_skb[q]); in ravb_ring_free()
291 priv->tx_skb[q] = NULL; in ravb_ring_free()
299 const struct ravb_hw_info *info = priv->info; in ravb_alloc_rx_buffer()
304 rx_buff = &priv->rx_buffers[q][entry]; in ravb_alloc_rx_buffer()
305 size = info->rx_buffer_size; in ravb_alloc_rx_buffer()
306 rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset, in ravb_alloc_rx_buffer()
308 if (unlikely(!rx_buff->page)) { in ravb_alloc_rx_buffer()
312 rx_desc->ds_cc = cpu_to_le16(0); in ravb_alloc_rx_buffer()
313 return -ENOMEM; in ravb_alloc_rx_buffer()
316 dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset; in ravb_alloc_rx_buffer()
317 dma_sync_single_for_device(ndev->dev.parent, dma_addr, in ravb_alloc_rx_buffer()
318 info->rx_buffer_size, DMA_FROM_DEVICE); in ravb_alloc_rx_buffer()
319 rx_desc->dptr = cpu_to_le32(dma_addr); in ravb_alloc_rx_buffer()
324 rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size - in ravb_alloc_rx_buffer()
325 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - in ravb_alloc_rx_buffer()
338 entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q]; in ravb_rx_ring_refill()
341 if (!priv->rx_buffers[q][entry].page) { in ravb_rx_ring_refill()
348 rx_desc->die_dt = DT_FEMPTY; in ravb_rx_ring_refill()
358 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_format()
362 unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * in ravb_ring_format()
366 priv->cur_rx[q] = 0; in ravb_ring_format()
367 priv->cur_tx[q] = 0; in ravb_ring_format()
368 priv->dirty_rx[q] = 0; in ravb_ring_format()
369 priv->dirty_tx[q] = 0; in ravb_ring_format()
375 rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]); in ravb_ring_format()
376 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); in ravb_ring_format()
377 rx_desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
379 memset(priv->tx_ring[q], 0, tx_ring_size); in ravb_ring_format()
381 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; in ravb_ring_format()
383 tx_desc->die_dt = DT_EEMPTY; in ravb_ring_format()
386 tx_desc->die_dt = DT_EEMPTY; in ravb_ring_format()
389 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); in ravb_ring_format()
390 tx_desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
393 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q]; in ravb_ring_format()
394 desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
395 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); in ravb_ring_format()
398 desc = &priv->desc_bat[q]; in ravb_ring_format()
399 desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
400 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); in ravb_ring_format()
408 ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1); in ravb_alloc_rx_desc()
410 priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_alloc_rx_desc()
411 &priv->rx_desc_dma[q], in ravb_alloc_rx_desc()
414 return priv->rx_ring[q].raw; in ravb_alloc_rx_desc()
421 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_init()
425 .pool_size = priv->num_rx_ring[q], in ravb_ring_init()
427 .dev = ndev->dev.parent, in ravb_ring_init()
434 priv->rx_pool[q] = page_pool_create(¶ms); in ravb_ring_init()
435 if (IS_ERR(priv->rx_pool[q])) in ravb_ring_init()
439 priv->rx_buffers[q] = kcalloc(priv->num_rx_ring[q], in ravb_ring_init()
440 sizeof(*priv->rx_buffers[q]), GFP_KERNEL); in ravb_ring_init()
441 if (!priv->rx_buffers[q]) in ravb_ring_init()
445 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], in ravb_ring_init()
446 sizeof(*priv->tx_skb[q]), GFP_KERNEL); in ravb_ring_init()
447 if (!priv->tx_skb[q]) in ravb_ring_init()
455 priv->dirty_rx[q] = 0; in ravb_ring_init()
456 ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q]; in ravb_ring_init()
457 memset(priv->rx_ring[q].raw, 0, ring_size); in ravb_ring_init()
458 num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q], in ravb_ring_init()
460 if (num_filled != priv->num_rx_ring[q]) in ravb_ring_init()
465 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + in ravb_ring_init()
466 DPTR_ALIGN - 1, GFP_KERNEL); in ravb_ring_init()
467 if (!priv->tx_align[q]) in ravb_ring_init()
473 (priv->num_tx_ring[q] * num_tx_desc + 1); in ravb_ring_init()
474 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_ring_init()
475 &priv->tx_desc_dma[q], in ravb_ring_init()
477 if (!priv->tx_ring[q]) in ravb_ring_init()
485 return -ENOMEM; in ravb_ring_init()
490 bool tx_enable = ndev->features & NETIF_F_HW_CSUM; in ravb_csum_init_gbeth()
491 bool rx_enable = ndev->features & NETIF_F_RXCSUM; in ravb_csum_init_gbeth()
501 ndev->features &= ~NETIF_F_HW_CSUM; in ravb_csum_init_gbeth()
504 ndev->features &= ~NETIF_F_RXCSUM; in ravb_csum_init_gbeth()
522 if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { in ravb_emac_init_gbeth()
532 ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR); in ravb_emac_init_gbeth()
534 /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */ in ravb_emac_init_gbeth()
535 ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) | in ravb_emac_init_gbeth()
543 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in ravb_emac_init_gbeth()
544 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in ravb_emac_init_gbeth()
545 ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in ravb_emac_init_gbeth()
547 /* E-MAC status register clear */ in ravb_emac_init_gbeth()
552 /* E-MAC interrupt enable register */ in ravb_emac_init_gbeth()
567 ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR); in ravb_emac_init_rcar()
569 /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ in ravb_emac_init_rcar()
571 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | in ravb_emac_init_rcar()
578 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in ravb_emac_init_rcar()
579 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in ravb_emac_init_rcar()
581 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in ravb_emac_init_rcar()
583 /* E-MAC status register clear */ in ravb_emac_init_rcar()
586 /* E-MAC interrupt enable register */ in ravb_emac_init_rcar()
593 bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII; in ravb_emac_init_rcar_gen4()
600 /* E-MAC init function */
604 const struct ravb_hw_info *info = priv->info; in ravb_emac_init()
606 info->emac_init(ndev); in ravb_emac_init()
625 ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC); in ravb_dmac_init_gbeth()
647 const struct ravb_hw_info *info = priv->info; in ravb_dmac_init_rcar()
674 if (info->multi_irqs) { in ravb_dmac_init_rcar()
696 const struct ravb_hw_info *info = priv->info; in ravb_dmac_init()
704 error = info->dmac_init(ndev); in ravb_dmac_init()
708 /* Setting the control will start the AVB-DMAC process. */ in ravb_dmac_init()
724 while (count--) { in ravb_get_tx_tstamp()
732 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, in ravb_get_tx_tstamp()
734 skb = ts_skb->skb; in ravb_get_tx_tstamp()
735 tag = ts_skb->tag; in ravb_get_tx_tstamp()
736 list_del(&ts_skb->list); in ravb_get_tx_tstamp()
761 if (unlikely(skb->len < sizeof(__sum16) * 2)) in ravb_rx_csum_gbeth()
765 last_frag = &shinfo->frags[shinfo->nr_frags - 1]; in ravb_rx_csum_gbeth()
772 hw_csum -= sizeof(__sum16); in ravb_rx_csum_gbeth()
775 hw_csum -= sizeof(__sum16); in ravb_rx_csum_gbeth()
781 skb_trim(skb, skb->len - 2 * sizeof(__sum16)); in ravb_rx_csum_gbeth()
784 if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto) in ravb_rx_csum_gbeth()
785 skb->ip_summed = CHECKSUM_UNNECESSARY; in ravb_rx_csum_gbeth()
795 if (unlikely(skb->len < sizeof(__sum16))) in ravb_rx_csum()
797 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); in ravb_rx_csum()
798 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); in ravb_rx_csum()
799 skb->ip_summed = CHECKSUM_COMPLETE; in ravb_rx_csum()
800 skb_trim(skb, skb->len - sizeof(__sum16)); in ravb_rx_csum()
807 const struct ravb_hw_info *info = priv->info; in ravb_rx_gbeth()
819 limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; in ravb_rx_gbeth()
820 stats = &priv->stats[q]; in ravb_rx_gbeth()
822 for (i = 0; i < limit; i++, priv->cur_rx[q]++) { in ravb_rx_gbeth()
823 entry = priv->cur_rx[q] % priv->num_rx_ring[q]; in ravb_rx_gbeth()
824 desc = &priv->rx_ring[q].desc[entry]; in ravb_rx_gbeth()
825 if (rx_packets == budget || desc->die_dt == DT_FEMPTY) in ravb_rx_gbeth()
830 desc_status = desc->msc; in ravb_rx_gbeth()
831 desc_len = le16_to_cpu(desc->ds_cc) & RX_DS; in ravb_rx_gbeth()
833 /* We use 0-byte descriptors to mark the DMA mapping errors */ in ravb_rx_gbeth()
838 stats->multicast++; in ravb_rx_gbeth()
841 stats->rx_errors++; in ravb_rx_gbeth()
843 stats->rx_crc_errors++; in ravb_rx_gbeth()
845 stats->rx_frame_errors++; in ravb_rx_gbeth()
847 stats->rx_length_errors++; in ravb_rx_gbeth()
849 stats->rx_missed_errors++; in ravb_rx_gbeth()
854 rx_buff = &priv->rx_buffers[q][entry]; in ravb_rx_gbeth()
855 rx_addr = page_address(rx_buff->page) + rx_buff->offset; in ravb_rx_gbeth()
856 die_dt = desc->die_dt & 0xF0; in ravb_rx_gbeth()
857 dma_sync_single_for_cpu(ndev->dev.parent, in ravb_rx_gbeth()
858 le32_to_cpu(desc->dptr), in ravb_rx_gbeth()
866 info->rx_buffer_size); in ravb_rx_gbeth()
868 stats->rx_errors++; in ravb_rx_gbeth()
869 page_pool_put_page(priv->rx_pool[q], in ravb_rx_gbeth()
870 rx_buff->page, 0, in ravb_rx_gbeth()
881 priv->rx_1st_skb = skb; in ravb_rx_gbeth()
892 * multi-descriptor packet. in ravb_rx_gbeth()
894 if (unlikely(!priv->rx_1st_skb)) { in ravb_rx_gbeth()
895 stats->rx_errors++; in ravb_rx_gbeth()
896 page_pool_put_page(priv->rx_pool[q], in ravb_rx_gbeth()
897 rx_buff->page, 0, in ravb_rx_gbeth()
906 skb_add_rx_frag(priv->rx_1st_skb, in ravb_rx_gbeth()
907 skb_shinfo(priv->rx_1st_skb)->nr_frags, in ravb_rx_gbeth()
908 rx_buff->page, rx_buff->offset, in ravb_rx_gbeth()
909 desc_len, info->rx_buffer_size); in ravb_rx_gbeth()
915 skb = priv->rx_1st_skb; in ravb_rx_gbeth()
925 skb->protocol = eth_type_trans(skb, ndev); in ravb_rx_gbeth()
926 if (ndev->features & NETIF_F_RXCSUM) in ravb_rx_gbeth()
928 stats->rx_bytes += skb->len; in ravb_rx_gbeth()
929 napi_gro_receive(&priv->napi[q], skb); in ravb_rx_gbeth()
933 * non-NULL when valid. in ravb_rx_gbeth()
935 priv->rx_1st_skb = NULL; in ravb_rx_gbeth()
939 rx_buff->page = NULL; in ravb_rx_gbeth()
945 priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q, in ravb_rx_gbeth()
946 priv->cur_rx[q] - priv->dirty_rx[q], in ravb_rx_gbeth()
949 stats->rx_packets += rx_packets; in ravb_rx_gbeth()
957 const struct ravb_hw_info *info = priv->info; in ravb_rx_rcar()
958 struct net_device_stats *stats = &priv->stats[q]; in ravb_rx_rcar()
968 limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; in ravb_rx_rcar()
969 for (i = 0; i < limit; i++, priv->cur_rx[q]++) { in ravb_rx_rcar()
970 entry = priv->cur_rx[q] % priv->num_rx_ring[q]; in ravb_rx_rcar()
971 desc = &priv->rx_ring[q].ex_desc[entry]; in ravb_rx_rcar()
972 if (rx_packets == budget || desc->die_dt == DT_FEMPTY) in ravb_rx_rcar()
977 desc_status = desc->msc; in ravb_rx_rcar()
978 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; in ravb_rx_rcar()
980 /* We use 0-byte descriptors to mark the DMA mapping errors */ in ravb_rx_rcar()
985 stats->multicast++; in ravb_rx_rcar()
989 stats->rx_errors++; in ravb_rx_rcar()
991 stats->rx_crc_errors++; in ravb_rx_rcar()
993 stats->rx_frame_errors++; in ravb_rx_rcar()
995 stats->rx_length_errors++; in ravb_rx_rcar()
997 stats->rx_missed_errors++; in ravb_rx_rcar()
999 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; in ravb_rx_rcar()
1003 rx_buff = &priv->rx_buffers[q][entry]; in ravb_rx_rcar()
1004 rx_addr = page_address(rx_buff->page) + rx_buff->offset; in ravb_rx_rcar()
1005 dma_sync_single_for_cpu(ndev->dev.parent, in ravb_rx_rcar()
1006 le32_to_cpu(desc->dptr), in ravb_rx_rcar()
1009 skb = napi_build_skb(rx_addr, info->rx_buffer_size); in ravb_rx_rcar()
1011 stats->rx_errors++; in ravb_rx_rcar()
1012 page_pool_put_page(priv->rx_pool[q], in ravb_rx_rcar()
1013 rx_buff->page, 0, true); in ravb_rx_rcar()
1025 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) << in ravb_rx_rcar()
1026 32) | le32_to_cpu(desc->ts_sl); in ravb_rx_rcar()
1027 ts.tv_nsec = le32_to_cpu(desc->ts_n); in ravb_rx_rcar()
1028 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); in ravb_rx_rcar()
1032 skb->protocol = eth_type_trans(skb, ndev); in ravb_rx_rcar()
1033 if (ndev->features & NETIF_F_RXCSUM) in ravb_rx_rcar()
1035 napi_gro_receive(&priv->napi[q], skb); in ravb_rx_rcar()
1037 stats->rx_bytes += pkt_len; in ravb_rx_rcar()
1040 rx_buff->page = NULL; in ravb_rx_rcar()
1045 priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q, in ravb_rx_rcar()
1046 priv->cur_rx[q] - priv->dirty_rx[q], in ravb_rx_rcar()
1049 stats->rx_packets += rx_packets; in ravb_rx_rcar()
1057 const struct ravb_hw_info *info = priv->info; in ravb_rx()
1059 return info->receive(ndev, budget, q); in ravb_rx()
1078 const struct ravb_hw_info *info = priv->info; in ravb_stop_dma()
1082 error = ravb_wait(ndev, TCCR, info->tccr_mask, 0); in ravb_stop_dma()
1092 /* Stop the E-MAC's RX/TX processes. */ in ravb_stop_dma()
1100 /* Stop AVB-DMAC process */ in ravb_stop_dma()
1104 /* E-MAC interrupt handler */
1114 pm_wakeup_event(&priv->pdev->dev, 0); in ravb_emac_interrupt_unlocked()
1116 ndev->stats.tx_carrier_errors++; in ravb_emac_interrupt_unlocked()
1119 if (priv->no_avb_link) in ravb_emac_interrupt_unlocked()
1122 if (priv->avb_link_active_low) in ravb_emac_interrupt_unlocked()
1138 struct device *dev = &priv->pdev->dev; in ravb_emac_interrupt()
1148 spin_lock(&priv->lock); in ravb_emac_interrupt()
1150 spin_unlock(&priv->lock); in ravb_emac_interrupt()
1172 priv->stats[RAVB_BE].rx_over_errors++; in ravb_error_interrupt()
1176 priv->stats[RAVB_NC].rx_over_errors++; in ravb_error_interrupt()
1180 priv->rx_fifo_errors++; in ravb_error_interrupt()
1187 const struct ravb_hw_info *info = priv->info; in ravb_queue_interrupt()
1194 if (napi_schedule_prep(&priv->napi[q])) { in ravb_queue_interrupt()
1196 if (!info->irq_en_dis) { in ravb_queue_interrupt()
1203 __napi_schedule(&priv->napi[q]); in ravb_queue_interrupt()
1233 const struct ravb_hw_info *info = priv->info; in ravb_interrupt()
1234 struct device *dev = &priv->pdev->dev; in ravb_interrupt()
1243 spin_lock(&priv->lock); in ravb_interrupt()
1256 if (info->nc_queues) { in ravb_interrupt()
1257 for (q = RAVB_NC; q >= RAVB_BE; q--) { in ravb_interrupt()
1267 /* E-MAC status summary */ in ravb_interrupt()
1285 spin_unlock(&priv->lock); in ravb_interrupt()
1297 struct device *dev = &priv->pdev->dev; in ravb_multi_interrupt()
1306 spin_lock(&priv->lock); in ravb_multi_interrupt()
1326 spin_unlock(&priv->lock); in ravb_multi_interrupt()
1337 struct device *dev = &priv->pdev->dev; in ravb_dma_interrupt()
1345 spin_lock(&priv->lock); in ravb_dma_interrupt()
1351 spin_unlock(&priv->lock); in ravb_dma_interrupt()
1370 struct net_device *ndev = napi->dev; in ravb_poll()
1372 const struct ravb_hw_info *info = priv->info; in ravb_poll()
1374 int q = napi - priv->napi; in ravb_poll()
1384 spin_lock_irqsave(&priv->lock, flags); in ravb_poll()
1389 spin_unlock_irqrestore(&priv->lock, flags); in ravb_poll()
1392 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; in ravb_poll()
1393 if (info->nc_queues) in ravb_poll()
1394 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; in ravb_poll()
1395 if (priv->rx_over_errors != ndev->stats.rx_over_errors) in ravb_poll()
1396 ndev->stats.rx_over_errors = priv->rx_over_errors; in ravb_poll()
1397 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) in ravb_poll()
1398 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; in ravb_poll()
1401 /* Re-enable RX/TX interrupts */ in ravb_poll()
1402 spin_lock_irqsave(&priv->lock, flags); in ravb_poll()
1403 if (!info->irq_en_dis) { in ravb_poll()
1410 spin_unlock_irqrestore(&priv->lock, flags); in ravb_poll()
1420 ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0); in ravb_set_duplex_gbeth()
1427 const struct ravb_hw_info *info = priv->info; in ravb_adjust_link()
1428 struct phy_device *phydev = ndev->phydev; in ravb_adjust_link()
1432 spin_lock_irqsave(&priv->lock, flags); in ravb_adjust_link()
1434 /* Disable TX and RX right over here, if E-MAC change is ignored */ in ravb_adjust_link()
1435 if (priv->no_avb_link) in ravb_adjust_link()
1438 if (phydev->link) { in ravb_adjust_link()
1439 if (info->half_duplex && phydev->duplex != priv->duplex) { in ravb_adjust_link()
1441 priv->duplex = phydev->duplex; in ravb_adjust_link()
1445 if (phydev->speed != priv->speed) { in ravb_adjust_link()
1447 priv->speed = phydev->speed; in ravb_adjust_link()
1448 info->set_rate(ndev); in ravb_adjust_link()
1450 if (!priv->link) { in ravb_adjust_link()
1453 priv->link = phydev->link; in ravb_adjust_link()
1455 } else if (priv->link) { in ravb_adjust_link()
1457 priv->link = 0; in ravb_adjust_link()
1458 priv->speed = 0; in ravb_adjust_link()
1459 if (info->half_duplex) in ravb_adjust_link()
1460 priv->duplex = -1; in ravb_adjust_link()
1463 /* Enable TX and RX right over here, if E-MAC change is ignored */ in ravb_adjust_link()
1464 if (priv->no_avb_link && phydev->link) in ravb_adjust_link()
1467 spin_unlock_irqrestore(&priv->lock, flags); in ravb_adjust_link()
1476 struct device_node *np = ndev->dev.parent->of_node; in ravb_phy_init()
1478 const struct ravb_hw_info *info = priv->info; in ravb_phy_init()
1484 priv->link = 0; in ravb_phy_init()
1485 priv->speed = 0; in ravb_phy_init()
1486 priv->duplex = -1; in ravb_phy_init()
1489 pn = of_parse_phandle(np, "phy-handle", 0); in ravb_phy_init()
1502 iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII in ravb_phy_init()
1503 : priv->phy_interface; in ravb_phy_init()
1508 err = -ENOENT; in ravb_phy_init()
1512 if (!info->half_duplex) { in ravb_phy_init()
1513 /* 10BASE, Pause and Asym Pause is not supported */ in ravb_phy_init()
1544 phy_start(ndev->phydev); in ravb_phy_start()
1553 return priv->msg_enable; in ravb_get_msglevel()
1560 priv->msg_enable = value; in ravb_set_msglevel()
1618 const struct ravb_hw_info *info = priv->info; in ravb_get_sset_count()
1622 return info->stats_len; in ravb_get_sset_count()
1624 return -EOPNOTSUPP; in ravb_get_sset_count()
1632 const struct ravb_hw_info *info = priv->info; in ravb_get_ethtool_stats()
1637 num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1; in ravb_get_ethtool_stats()
1638 /* Device-specific stats */ in ravb_get_ethtool_stats()
1640 struct net_device_stats *stats = &priv->stats[q]; in ravb_get_ethtool_stats()
1642 data[i++] = priv->cur_rx[q]; in ravb_get_ethtool_stats()
1643 data[i++] = priv->cur_tx[q]; in ravb_get_ethtool_stats()
1644 data[i++] = priv->dirty_rx[q]; in ravb_get_ethtool_stats()
1645 data[i++] = priv->dirty_tx[q]; in ravb_get_ethtool_stats()
1646 data[i++] = stats->rx_packets; in ravb_get_ethtool_stats()
1647 data[i++] = stats->tx_packets; in ravb_get_ethtool_stats()
1648 data[i++] = stats->rx_bytes; in ravb_get_ethtool_stats()
1649 data[i++] = stats->tx_bytes; in ravb_get_ethtool_stats()
1650 data[i++] = stats->multicast; in ravb_get_ethtool_stats()
1651 data[i++] = stats->rx_errors; in ravb_get_ethtool_stats()
1652 data[i++] = stats->rx_crc_errors; in ravb_get_ethtool_stats()
1653 data[i++] = stats->rx_frame_errors; in ravb_get_ethtool_stats()
1654 data[i++] = stats->rx_length_errors; in ravb_get_ethtool_stats()
1655 data[i++] = stats->rx_missed_errors; in ravb_get_ethtool_stats()
1656 data[i++] = stats->rx_over_errors; in ravb_get_ethtool_stats()
1663 const struct ravb_hw_info *info = priv->info; in ravb_get_strings()
1667 memcpy(data, info->gstrings_stats, info->gstrings_size); in ravb_get_strings()
1679 ring->rx_max_pending = BE_RX_RING_MAX; in ravb_get_ringparam()
1680 ring->tx_max_pending = BE_TX_RING_MAX; in ravb_get_ringparam()
1681 ring->rx_pending = priv->num_rx_ring[RAVB_BE]; in ravb_get_ringparam()
1682 ring->tx_pending = priv->num_tx_ring[RAVB_BE]; in ravb_get_ringparam()
1691 const struct ravb_hw_info *info = priv->info; in ravb_set_ringparam()
1694 if (ring->tx_pending > BE_TX_RING_MAX || in ravb_set_ringparam()
1695 ring->rx_pending > BE_RX_RING_MAX || in ravb_set_ringparam()
1696 ring->tx_pending < BE_TX_RING_MIN || in ravb_set_ringparam()
1697 ring->rx_pending < BE_RX_RING_MIN) in ravb_set_ringparam()
1698 return -EINVAL; in ravb_set_ringparam()
1699 if (ring->rx_mini_pending || ring->rx_jumbo_pending) in ravb_set_ringparam()
1700 return -EINVAL; in ravb_set_ringparam()
1705 if (info->gptp) in ravb_set_ringparam()
1714 synchronize_irq(ndev->irq); in ravb_set_ringparam()
1718 if (info->nc_queues) in ravb_set_ringparam()
1723 priv->num_rx_ring[RAVB_BE] = ring->rx_pending; in ravb_set_ringparam()
1724 priv->num_tx_ring[RAVB_BE] = ring->tx_pending; in ravb_set_ringparam()
1738 if (info->gptp) in ravb_set_ringparam()
1739 ravb_ptp_init(ndev, priv->pdev); in ravb_set_ringparam()
1751 const struct ravb_hw_info *hw_info = priv->info; in ravb_get_ts_info()
1753 if (hw_info->gptp || hw_info->ccc_gac) { in ravb_get_ts_info()
1754 info->so_timestamping = in ravb_get_ts_info()
1759 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); in ravb_get_ts_info()
1760 info->rx_filters = in ravb_get_ts_info()
1764 info->phc_index = ptp_clock_index(priv->ptp.clock); in ravb_get_ts_info()
1774 wol->supported = WAKE_MAGIC; in ravb_get_wol()
1775 wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; in ravb_get_wol()
1781 const struct ravb_hw_info *info = priv->info; in ravb_set_wol()
1783 if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC)) in ravb_set_wol()
1784 return -EOPNOTSUPP; in ravb_set_wol()
1786 priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); in ravb_set_wol()
1788 device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled); in ravb_set_wol()
1813 const struct ravb_hw_info *info = priv->info; in ravb_set_config_mode()
1816 if (info->gptp) { in ravb_set_config_mode()
1822 } else if (info->ccc_gac) { in ravb_set_config_mode()
1834 const struct ravb_hw_info *info = priv->info; in ravb_set_gti()
1836 if (!(info->gptp || info->ccc_gac)) in ravb_set_gti()
1839 ravb_write(ndev, priv->gti_tiv, GTI); in ravb_set_gti()
1848 const struct ravb_hw_info *info = priv->info; in ravb_compute_gti()
1849 struct device *dev = ndev->dev.parent; in ravb_compute_gti()
1853 if (!(info->gptp || info->ccc_gac)) in ravb_compute_gti()
1856 if (info->gptp_ref_clk) in ravb_compute_gti()
1857 rate = clk_get_rate(priv->gptp_clk); in ravb_compute_gti()
1859 rate = clk_get_rate(priv->clk); in ravb_compute_gti()
1861 return -EINVAL; in ravb_compute_gti()
1866 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", in ravb_compute_gti()
1868 return -EINVAL; in ravb_compute_gti()
1870 priv->gti_tiv = inc; in ravb_compute_gti()
1882 if (!priv->info->internal_delay) in ravb_parse_delay_mode()
1885 if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) { in ravb_parse_delay_mode()
1887 priv->rxcidm = !!delay; in ravb_parse_delay_mode()
1890 if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) { in ravb_parse_delay_mode()
1892 priv->txcidm = !!delay; in ravb_parse_delay_mode()
1899 /* Fall back to legacy rgmii-*id behavior */ in ravb_parse_delay_mode()
1900 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in ravb_parse_delay_mode()
1901 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) { in ravb_parse_delay_mode()
1902 priv->rxcidm = 1; in ravb_parse_delay_mode()
1903 priv->rgmii_override = 1; in ravb_parse_delay_mode()
1906 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in ravb_parse_delay_mode()
1907 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { in ravb_parse_delay_mode()
1908 priv->txcidm = 1; in ravb_parse_delay_mode()
1909 priv->rgmii_override = 1; in ravb_parse_delay_mode()
1918 if (!priv->info->internal_delay) in ravb_set_delay_mode()
1921 if (priv->rxcidm) in ravb_set_delay_mode()
1923 if (priv->txcidm) in ravb_set_delay_mode()
1932 const struct ravb_hw_info *info = priv->info; in ravb_open()
1933 struct device *dev = &priv->pdev->dev; in ravb_open()
1936 napi_enable(&priv->napi[RAVB_BE]); in ravb_open()
1937 if (info->nc_queues) in ravb_open()
1938 napi_enable(&priv->napi[RAVB_NC]); in ravb_open()
1950 ravb_write(ndev, priv->desc_bat_dma, DBAT); in ravb_open()
1962 if (info->gptp || info->ccc_gac) in ravb_open()
1963 ravb_ptp_init(ndev, priv->pdev); in ravb_open()
1976 if (info->gptp || info->ccc_gac) in ravb_open()
1985 if (info->nc_queues) in ravb_open()
1986 napi_disable(&priv->napi[RAVB_NC]); in ravb_open()
1987 napi_disable(&priv->napi[RAVB_BE]); in ravb_open()
2001 ndev->stats.tx_errors++; in ravb_tx_timeout()
2003 schedule_work(&priv->work); in ravb_tx_timeout()
2010 const struct ravb_hw_info *info = priv->info; in ravb_tx_timeout_work()
2011 struct net_device *ndev = priv->ndev; in ravb_tx_timeout_work()
2016 schedule_work(&priv->work); in ravb_tx_timeout_work()
2023 if (info->gptp) in ravb_tx_timeout_work()
2033 * re-enables the TX and RX and skip the following in ravb_tx_timeout_work()
2034 * re-initialization procedure. in ravb_tx_timeout_work()
2041 if (info->nc_queues) in ravb_tx_timeout_work()
2048 * should return here to avoid re-enabling the TX and RX in in ravb_tx_timeout_work()
2059 if (info->gptp) in ravb_tx_timeout_work()
2060 ravb_ptp_init(ndev, priv->pdev); in ravb_tx_timeout_work()
2077 if (skb->protocol != htons(ETH_P_IP)) in ravb_can_tx_csum_gbeth()
2080 switch (ip->protocol) { in ravb_can_tx_csum_gbeth()
2088 if (udp_hdr(skb)->check == 0) in ravb_can_tx_csum_gbeth()
2102 const struct ravb_hw_info *info = priv->info; in ravb_start_xmit()
2103 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_start_xmit()
2113 if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb)) in ravb_start_xmit()
2116 spin_lock_irqsave(&priv->lock, flags); in ravb_start_xmit()
2117 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * in ravb_start_xmit()
2122 spin_unlock_irqrestore(&priv->lock, flags); in ravb_start_xmit()
2129 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); in ravb_start_xmit()
2130 priv->tx_skb[q][entry / num_tx_desc] = skb; in ravb_start_xmit()
2133 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + in ravb_start_xmit()
2135 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; in ravb_start_xmit()
2146 * length of the second DMA descriptor (skb->len - len) in ravb_start_xmit()
2152 memcpy(buffer, skb->data, len); in ravb_start_xmit()
2153 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, in ravb_start_xmit()
2155 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2158 desc = &priv->tx_ring[q][entry]; in ravb_start_xmit()
2159 desc->ds_tagl = cpu_to_le16(len); in ravb_start_xmit()
2160 desc->dptr = cpu_to_le32(dma_addr); in ravb_start_xmit()
2162 buffer = skb->data + len; in ravb_start_xmit()
2163 len = skb->len - len; in ravb_start_xmit()
2164 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, in ravb_start_xmit()
2166 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2171 desc = &priv->tx_ring[q][entry]; in ravb_start_xmit()
2172 len = skb->len; in ravb_start_xmit()
2173 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, in ravb_start_xmit()
2175 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2178 desc->ds_tagl = cpu_to_le16(len); in ravb_start_xmit()
2179 desc->dptr = cpu_to_le32(dma_addr); in ravb_start_xmit()
2182 if (info->gptp || info->ccc_gac) { in ravb_start_xmit()
2187 desc--; in ravb_start_xmit()
2188 dma_unmap_single(ndev->dev.parent, dma_addr, in ravb_start_xmit()
2193 ts_skb->skb = skb_get(skb); in ravb_start_xmit()
2194 ts_skb->tag = priv->ts_skb_tag++; in ravb_start_xmit()
2195 priv->ts_skb_tag &= 0x3ff; in ravb_start_xmit()
2196 list_add_tail(&ts_skb->list, &priv->ts_skb_list); in ravb_start_xmit()
2199 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in ravb_start_xmit()
2200 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; in ravb_start_xmit()
2201 desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); in ravb_start_xmit()
2209 desc->die_dt = DT_FEND; in ravb_start_xmit()
2210 desc--; in ravb_start_xmit()
2211 desc->die_dt = DT_FSTART; in ravb_start_xmit()
2213 desc->die_dt = DT_FSINGLE; in ravb_start_xmit()
2217 priv->cur_tx[q] += num_tx_desc; in ravb_start_xmit()
2218 if (priv->cur_tx[q] - priv->dirty_tx[q] > in ravb_start_xmit()
2219 (priv->num_tx_ring[q] - 1) * num_tx_desc && in ravb_start_xmit()
2224 spin_unlock_irqrestore(&priv->lock, flags); in ravb_start_xmit()
2228 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_start_xmit()
2229 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); in ravb_start_xmit()
2232 priv->tx_skb[q][entry / num_tx_desc] = NULL; in ravb_start_xmit()
2240 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : in ravb_select_queue()
2248 const struct ravb_hw_info *info = priv->info; in ravb_get_stats()
2250 struct device *dev = &priv->pdev->dev; in ravb_get_stats()
2252 nstats = &ndev->stats; in ravb_get_stats()
2259 stats0 = &priv->stats[RAVB_BE]; in ravb_get_stats()
2261 if (info->tx_counters) { in ravb_get_stats()
2262 nstats->tx_dropped += ravb_read(ndev, TROCR); in ravb_get_stats()
2266 if (info->carrier_counters) { in ravb_get_stats()
2267 nstats->collisions += ravb_read(ndev, CXR41); in ravb_get_stats()
2269 nstats->tx_carrier_errors += ravb_read(ndev, CXR42); in ravb_get_stats()
2273 nstats->rx_packets = stats0->rx_packets; in ravb_get_stats()
2274 nstats->tx_packets = stats0->tx_packets; in ravb_get_stats()
2275 nstats->rx_bytes = stats0->rx_bytes; in ravb_get_stats()
2276 nstats->tx_bytes = stats0->tx_bytes; in ravb_get_stats()
2277 nstats->multicast = stats0->multicast; in ravb_get_stats()
2278 nstats->rx_errors = stats0->rx_errors; in ravb_get_stats()
2279 nstats->rx_crc_errors = stats0->rx_crc_errors; in ravb_get_stats()
2280 nstats->rx_frame_errors = stats0->rx_frame_errors; in ravb_get_stats()
2281 nstats->rx_length_errors = stats0->rx_length_errors; in ravb_get_stats()
2282 nstats->rx_missed_errors = stats0->rx_missed_errors; in ravb_get_stats()
2283 nstats->rx_over_errors = stats0->rx_over_errors; in ravb_get_stats()
2284 if (info->nc_queues) { in ravb_get_stats()
2285 stats1 = &priv->stats[RAVB_NC]; in ravb_get_stats()
2287 nstats->rx_packets += stats1->rx_packets; in ravb_get_stats()
2288 nstats->tx_packets += stats1->tx_packets; in ravb_get_stats()
2289 nstats->rx_bytes += stats1->rx_bytes; in ravb_get_stats()
2290 nstats->tx_bytes += stats1->tx_bytes; in ravb_get_stats()
2291 nstats->multicast += stats1->multicast; in ravb_get_stats()
2292 nstats->rx_errors += stats1->rx_errors; in ravb_get_stats()
2293 nstats->rx_crc_errors += stats1->rx_crc_errors; in ravb_get_stats()
2294 nstats->rx_frame_errors += stats1->rx_frame_errors; in ravb_get_stats()
2295 nstats->rx_length_errors += stats1->rx_length_errors; in ravb_get_stats()
2296 nstats->rx_missed_errors += stats1->rx_missed_errors; in ravb_get_stats()
2297 nstats->rx_over_errors += stats1->rx_over_errors; in ravb_get_stats()
2311 spin_lock_irqsave(&priv->lock, flags); in ravb_set_rx_mode()
2313 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); in ravb_set_rx_mode()
2314 spin_unlock_irqrestore(&priv->lock, flags); in ravb_set_rx_mode()
2320 struct device_node *np = ndev->dev.parent->of_node; in ravb_close()
2322 const struct ravb_hw_info *info = priv->info; in ravb_close()
2324 struct device *dev = &priv->pdev->dev; in ravb_close()
2335 if (ndev->phydev) { in ravb_close()
2336 phy_stop(ndev->phydev); in ravb_close()
2337 phy_disconnect(ndev->phydev); in ravb_close()
2343 if (info->gptp || info->ccc_gac) in ravb_close()
2346 /* Set the config mode to stop the AVB-DMAC's processes */ in ravb_close()
2352 if (info->gptp || info->ccc_gac) { in ravb_close()
2353 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { in ravb_close()
2354 list_del(&ts_skb->list); in ravb_close()
2355 kfree_skb(ts_skb->skb); in ravb_close()
2360 cancel_work_sync(&priv->work); in ravb_close()
2362 if (info->nc_queues) in ravb_close()
2363 napi_disable(&priv->napi[RAVB_NC]); in ravb_close()
2364 napi_disable(&priv->napi[RAVB_BE]); in ravb_close()
2368 if (info->nc_queues) in ravb_close()
2391 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : in ravb_hwtstamp_get()
2393 switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) { in ravb_hwtstamp_get()
2404 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? in ravb_hwtstamp_get()
2405 -EFAULT : 0; in ravb_hwtstamp_get()
2416 if (copy_from_user(&config, req->ifr_data, sizeof(config))) in ravb_hwtstamp_set()
2417 return -EFAULT; in ravb_hwtstamp_set()
2427 return -ERANGE; in ravb_hwtstamp_set()
2442 priv->tstamp_tx_ctrl = tstamp_tx_ctrl; in ravb_hwtstamp_set()
2443 priv->tstamp_rx_ctrl = tstamp_rx_ctrl; in ravb_hwtstamp_set()
2445 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? in ravb_hwtstamp_set()
2446 -EFAULT : 0; in ravb_hwtstamp_set()
2452 struct phy_device *phydev = ndev->phydev; in ravb_do_ioctl()
2455 return -EINVAL; in ravb_do_ioctl()
2458 return -ENODEV; in ravb_do_ioctl()
2474 WRITE_ONCE(ndev->mtu, new_mtu); in ravb_change_mtu()
2477 synchronize_irq(priv->emac_irq); in ravb_change_mtu()
2491 spin_lock_irqsave(&priv->lock, flags); in ravb_set_rx_csum()
2502 spin_unlock_irqrestore(&priv->lock, flags); in ravb_set_rx_csum()
2524 netdev_features_t changed = ndev->features ^ features; in ravb_set_features_gbeth()
2530 spin_lock_irqsave(&priv->lock, flags); in ravb_set_features_gbeth()
2554 spin_unlock_irqrestore(&priv->lock, flags); in ravb_set_features_gbeth()
2562 netdev_features_t changed = ndev->features ^ features; in ravb_set_features_rcar()
2574 const struct ravb_hw_info *info = priv->info; in ravb_set_features()
2575 struct device *dev = &priv->pdev->dev; in ravb_set_features()
2581 ret = info->set_feature(ndev, features); in ravb_set_features()
2590 ndev->features = features; in ravb_set_features()
2613 struct platform_device *pdev = priv->pdev; in ravb_mdio_init()
2614 struct device *dev = &pdev->dev; in ravb_mdio_init()
2621 priv->mdiobb.ops = &bb_ops; in ravb_mdio_init()
2624 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); in ravb_mdio_init()
2625 if (!priv->mii_bus) in ravb_mdio_init()
2626 return -ENOMEM; in ravb_mdio_init()
2629 priv->mii_bus->name = "ravb_mii"; in ravb_mdio_init()
2630 priv->mii_bus->parent = dev; in ravb_mdio_init()
2631 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in ravb_mdio_init()
2632 pdev->name, pdev->id); in ravb_mdio_init()
2635 mdio_node = of_get_child_by_name(dev->of_node, "mdio"); in ravb_mdio_init()
2638 mdio_node = of_node_get(dev->of_node); in ravb_mdio_init()
2640 error = of_mdiobus_register(priv->mii_bus, mdio_node); in ravb_mdio_init()
2645 pn = of_parse_phandle(dev->of_node, "phy-handle", 0); in ravb_mdio_init()
2648 phydev->mac_managed_pm = true; in ravb_mdio_init()
2649 put_device(&phydev->mdio.dev); in ravb_mdio_init()
2656 free_mdio_bitbang(priv->mii_bus); in ravb_mdio_init()
2664 mdiobus_unregister(priv->mii_bus); in ravb_mdio_release()
2667 free_mdio_bitbang(priv->mii_bus); in ravb_mdio_release()
2795 { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2796 { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2797 { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2798 { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2799 { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2800 { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info },
2801 { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
2802 { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2810 struct platform_device *pdev = priv->pdev; in ravb_setup_irq()
2811 struct net_device *ndev = priv->ndev; in ravb_setup_irq()
2812 struct device *dev = &pdev->dev; in ravb_setup_irq()
2820 return -ENOMEM; in ravb_setup_irq()
2843 const struct ravb_hw_info *info = priv->info; in ravb_setup_irqs()
2844 struct net_device *ndev = priv->ndev; in ravb_setup_irqs()
2848 if (!info->multi_irqs) in ravb_setup_irqs()
2849 return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt); in ravb_setup_irqs()
2851 if (info->err_mgmt_irqs) { in ravb_setup_irqs()
2859 error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt); in ravb_setup_irqs()
2863 error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq, in ravb_setup_irqs()
2868 if (info->err_mgmt_irqs) { in ravb_setup_irqs()
2895 struct device_node *np = pdev->dev.of_node; in ravb_probe()
2904 dev_err(&pdev->dev, in ravb_probe()
2906 return -EINVAL; in ravb_probe()
2909 rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); in ravb_probe()
2911 return dev_err_probe(&pdev->dev, PTR_ERR(rstc), in ravb_probe()
2917 return -ENOMEM; in ravb_probe()
2919 info = of_device_get_match_data(&pdev->dev); in ravb_probe()
2921 ndev->features = info->net_features; in ravb_probe()
2922 ndev->hw_features = info->net_hw_features; in ravb_probe()
2928 SET_NETDEV_DEV(ndev, &pdev->dev); in ravb_probe()
2931 priv->info = info; in ravb_probe()
2932 priv->rstc = rstc; in ravb_probe()
2933 priv->ndev = ndev; in ravb_probe()
2934 priv->pdev = pdev; in ravb_probe()
2935 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; in ravb_probe()
2936 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; in ravb_probe()
2937 if (info->nc_queues) { in ravb_probe()
2938 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; in ravb_probe()
2939 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; in ravb_probe()
2946 priv->clk = devm_clk_get(&pdev->dev, NULL); in ravb_probe()
2947 if (IS_ERR(priv->clk)) { in ravb_probe()
2948 error = PTR_ERR(priv->clk); in ravb_probe()
2952 if (info->gptp_ref_clk) { in ravb_probe()
2953 priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp"); in ravb_probe()
2954 if (IS_ERR(priv->gptp_clk)) { in ravb_probe()
2955 error = PTR_ERR(priv->gptp_clk); in ravb_probe()
2960 priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk"); in ravb_probe()
2961 if (IS_ERR(priv->refclk)) { in ravb_probe()
2962 error = PTR_ERR(priv->refclk); in ravb_probe()
2965 clk_prepare(priv->refclk); in ravb_probe()
2968 pm_runtime_set_autosuspend_delay(&pdev->dev, 100); in ravb_probe()
2969 pm_runtime_use_autosuspend(&pdev->dev); in ravb_probe()
2970 pm_runtime_enable(&pdev->dev); in ravb_probe()
2971 error = pm_runtime_resume_and_get(&pdev->dev); in ravb_probe()
2975 priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in ravb_probe()
2976 if (IS_ERR(priv->addr)) { in ravb_probe()
2977 error = PTR_ERR(priv->addr); in ravb_probe()
2981 /* The Ether-specific entries in the device structure. */ in ravb_probe()
2982 ndev->base_addr = res->start; in ravb_probe()
2984 spin_lock_init(&priv->lock); in ravb_probe()
2985 INIT_WORK(&priv->work, ravb_tx_timeout_work); in ravb_probe()
2987 error = of_get_phy_mode(np, &priv->phy_interface); in ravb_probe()
2988 if (error && error != -ENODEV) in ravb_probe()
2991 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); in ravb_probe()
2992 priv->avb_link_active_low = in ravb_probe()
2993 of_property_read_bool(np, "renesas,ether-link-active-low"); in ravb_probe()
2995 ndev->max_mtu = info->tx_max_frame_size - in ravb_probe()
2997 ndev->min_mtu = ETH_MIN_MTU; in ravb_probe()
2999 /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer in ravb_probe()
3004 priv->num_tx_desc = info->aligned_tx ? 2 : 1; in ravb_probe()
3007 ndev->netdev_ops = &ravb_netdev_ops; in ravb_probe()
3008 ndev->ethtool_ops = &ravb_ethtool_ops; in ravb_probe()
3017 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; in ravb_probe()
3018 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, in ravb_probe()
3019 &priv->desc_bat_dma, GFP_KERNEL); in ravb_probe()
3020 if (!priv->desc_bat) { in ravb_probe()
3021 dev_err(&pdev->dev, in ravb_probe()
3023 priv->desc_bat_size); in ravb_probe()
3024 error = -ENOMEM; in ravb_probe()
3028 priv->desc_bat[q].die_dt = DT_EOS; in ravb_probe()
3031 INIT_LIST_HEAD(&priv->ts_skb_list); in ravb_probe()
3034 priv->msg_enable = RAVB_DEF_MSG_ENABLE; in ravb_probe()
3043 if (!is_valid_ether_addr(ndev->dev_addr)) { in ravb_probe()
3044 dev_warn(&pdev->dev, in ravb_probe()
3052 dev_err(&pdev->dev, "failed to initialize MDIO\n"); in ravb_probe()
3061 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll); in ravb_probe()
3062 if (info->nc_queues) in ravb_probe()
3063 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll); in ravb_probe()
3065 if (info->coalesce_irqs) { in ravb_probe()
3076 device_set_wakeup_capable(&pdev->dev, 1); in ravb_probe()
3080 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); in ravb_probe()
3082 pm_runtime_mark_last_busy(&pdev->dev); in ravb_probe()
3083 pm_runtime_put_autosuspend(&pdev->dev); in ravb_probe()
3088 if (info->nc_queues) in ravb_probe()
3089 netif_napi_del(&priv->napi[RAVB_NC]); in ravb_probe()
3091 netif_napi_del(&priv->napi[RAVB_BE]); in ravb_probe()
3096 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_probe()
3097 priv->desc_bat_dma); in ravb_probe()
3099 pm_runtime_put(&pdev->dev); in ravb_probe()
3101 pm_runtime_disable(&pdev->dev); in ravb_probe()
3102 pm_runtime_dont_use_autosuspend(&pdev->dev); in ravb_probe()
3103 clk_unprepare(priv->refclk); in ravb_probe()
3115 const struct ravb_hw_info *info = priv->info; in ravb_remove()
3116 struct device *dev = &priv->pdev->dev; in ravb_remove()
3124 if (info->nc_queues) in ravb_remove()
3125 netif_napi_del(&priv->napi[RAVB_NC]); in ravb_remove()
3126 netif_napi_del(&priv->napi[RAVB_BE]); in ravb_remove()
3130 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_remove()
3131 priv->desc_bat_dma); in ravb_remove()
3133 pm_runtime_put_sync_suspend(&pdev->dev); in ravb_remove()
3134 pm_runtime_disable(&pdev->dev); in ravb_remove()
3136 clk_unprepare(priv->refclk); in ravb_remove()
3137 reset_control_assert(priv->rstc); in ravb_remove()
3145 const struct ravb_hw_info *info = priv->info; in ravb_wol_setup()
3153 synchronize_irq(priv->emac_irq); in ravb_wol_setup()
3154 if (info->nc_queues) in ravb_wol_setup()
3155 napi_disable(&priv->napi[RAVB_NC]); in ravb_wol_setup()
3156 napi_disable(&priv->napi[RAVB_BE]); in ravb_wol_setup()
3162 if (priv->info->ccc_gac) in ravb_wol_setup()
3165 return enable_irq_wake(priv->emac_irq); in ravb_wol_setup()
3171 const struct ravb_hw_info *info = priv->info; in ravb_wol_restore()
3184 if (priv->info->ccc_gac) in ravb_wol_restore()
3185 ravb_ptp_init(ndev, priv->pdev); in ravb_wol_restore()
3187 if (info->nc_queues) in ravb_wol_restore()
3188 napi_enable(&priv->napi[RAVB_NC]); in ravb_wol_restore()
3189 napi_enable(&priv->napi[RAVB_BE]); in ravb_wol_restore()
3196 return disable_irq_wake(priv->emac_irq); in ravb_wol_restore()
3210 if (priv->wol_enabled) in ravb_suspend()
3217 ret = pm_runtime_force_suspend(&priv->pdev->dev); in ravb_suspend()
3222 return reset_control_assert(priv->rstc); in ravb_suspend()
3231 ret = reset_control_deassert(priv->rstc); in ravb_resume()
3239 if (priv->wol_enabled) { in ravb_resume()
3260 if (!priv->wol_enabled) { in ravb_resume()
3273 clk_disable(priv->refclk); in ravb_runtime_suspend()
3283 return clk_enable(priv->refclk); in ravb_runtime_resume()