Lines Matching +full:pa +full:- +full:stats
5 * This program is dual-licensed; you may select either version 2 of
22 #include "dwc-xlgmac.h"
23 #include "dwc-xlgmac-reg.h"
30 return (ring->dma_desc_count - (ring->cur - ring->dirty)); in xlgmac_tx_avail_desc()
35 return (ring->cur - ring->dirty); in xlgmac_rx_dirty_desc()
43 struct xlgmac_pdata *pdata = channel->pdata; in xlgmac_maybe_stop_tx_queue()
46 netif_info(pdata, drv, pdata->netdev, in xlgmac_maybe_stop_tx_queue()
48 netif_stop_subqueue(pdata->netdev, channel->queue_index); in xlgmac_maybe_stop_tx_queue()
49 ring->tx.queue_stopped = 1; in xlgmac_maybe_stop_tx_queue()
54 if (ring->tx.xmit_more) in xlgmac_maybe_stop_tx_queue()
55 pdata->hw_ops.tx_start_xmit(channel, ring); in xlgmac_maybe_stop_tx_queue()
67 pkt_info->vlan_ctag = skb_vlan_tag_get(skb); in xlgmac_prep_vlan()
75 if (!XLGMAC_GET_REG_BITS(pkt_info->attributes, in xlgmac_prep_tso()
84 pkt_info->header_len = skb_tcp_all_headers(skb); in xlgmac_prep_tso()
85 pkt_info->tcp_header_len = tcp_hdrlen(skb); in xlgmac_prep_tso()
86 pkt_info->tcp_payload_len = skb->len - pkt_info->header_len; in xlgmac_prep_tso()
87 pkt_info->mss = skb_shinfo(skb)->gso_size; in xlgmac_prep_tso()
89 XLGMAC_PR("header_len=%u\n", pkt_info->header_len); in xlgmac_prep_tso()
91 pkt_info->tcp_header_len, pkt_info->tcp_payload_len); in xlgmac_prep_tso()
92 XLGMAC_PR("mss=%u\n", pkt_info->mss); in xlgmac_prep_tso()
97 pkt_info->tx_packets = skb_shinfo(skb)->gso_segs; in xlgmac_prep_tso()
98 pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len; in xlgmac_prep_tso()
105 if (skb->ip_summed != CHECKSUM_PARTIAL) in xlgmac_is_tso()
124 pkt_info->skb = skb; in xlgmac_prep_tx_pkt()
127 pkt_info->desc_count = 0; in xlgmac_prep_tx_pkt()
129 pkt_info->tx_packets = 1; in xlgmac_prep_tx_pkt()
130 pkt_info->tx_bytes = skb->len; in xlgmac_prep_tx_pkt()
134 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { in xlgmac_prep_tx_pkt()
136 pkt_info->desc_count++; in xlgmac_prep_tx_pkt()
140 pkt_info->desc_count++; in xlgmac_prep_tx_pkt()
142 pkt_info->attributes = XLGMAC_SET_REG_BITS( in xlgmac_prep_tx_pkt()
143 pkt_info->attributes, in xlgmac_prep_tx_pkt()
147 pkt_info->attributes = XLGMAC_SET_REG_BITS( in xlgmac_prep_tx_pkt()
148 pkt_info->attributes, in xlgmac_prep_tx_pkt()
152 } else if (skb->ip_summed == CHECKSUM_PARTIAL) in xlgmac_prep_tx_pkt()
153 pkt_info->attributes = XLGMAC_SET_REG_BITS( in xlgmac_prep_tx_pkt()
154 pkt_info->attributes, in xlgmac_prep_tx_pkt()
161 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) in xlgmac_prep_tx_pkt()
165 pkt_info->desc_count++; in xlgmac_prep_tx_pkt()
168 pkt_info->attributes = XLGMAC_SET_REG_BITS( in xlgmac_prep_tx_pkt()
169 pkt_info->attributes, in xlgmac_prep_tx_pkt()
176 pkt_info->desc_count++; in xlgmac_prep_tx_pkt()
177 len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE); in xlgmac_prep_tx_pkt()
180 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in xlgmac_prep_tx_pkt()
181 frag = &skb_shinfo(skb)->frags[i]; in xlgmac_prep_tx_pkt()
183 pkt_info->desc_count++; in xlgmac_prep_tx_pkt()
184 len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE); in xlgmac_prep_tx_pkt()
195 return -EINVAL; in xlgmac_calc_rx_buf_size()
201 rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) & in xlgmac_calc_rx_buf_size()
202 ~(XLGMAC_RX_BUF_ALIGN - 1); in xlgmac_calc_rx_buf_size()
209 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; in xlgmac_enable_rx_tx_ints()
214 channel = pdata->channel_head; in xlgmac_enable_rx_tx_ints()
215 for (i = 0; i < pdata->channel_count; i++, channel++) { in xlgmac_enable_rx_tx_ints()
216 if (channel->tx_ring && channel->rx_ring) in xlgmac_enable_rx_tx_ints()
218 else if (channel->tx_ring) in xlgmac_enable_rx_tx_ints()
220 else if (channel->rx_ring) in xlgmac_enable_rx_tx_ints()
225 hw_ops->enable_int(channel, int_id); in xlgmac_enable_rx_tx_ints()
231 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; in xlgmac_disable_rx_tx_ints()
236 channel = pdata->channel_head; in xlgmac_disable_rx_tx_ints()
237 for (i = 0; i < pdata->channel_count; i++, channel++) { in xlgmac_disable_rx_tx_ints()
238 if (channel->tx_ring && channel->rx_ring) in xlgmac_disable_rx_tx_ints()
240 else if (channel->tx_ring) in xlgmac_disable_rx_tx_ints()
242 else if (channel->rx_ring) in xlgmac_disable_rx_tx_ints()
247 hw_ops->disable_int(channel, int_id); in xlgmac_disable_rx_tx_ints()
259 hw_ops = &pdata->hw_ops; in xlgmac_isr()
263 * this register to be non-zero in xlgmac_isr()
265 dma_isr = readl(pdata->mac_regs + DMA_ISR); in xlgmac_isr()
269 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); in xlgmac_isr()
271 for (i = 0; i < pdata->channel_count; i++) { in xlgmac_isr()
275 channel = pdata->channel_head + i; in xlgmac_isr()
278 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", in xlgmac_isr()
289 if (!pdata->per_channel_irq && (ti || ri)) { in xlgmac_isr()
290 if (napi_schedule_prep(&pdata->napi)) { in xlgmac_isr()
294 pdata->stats.napi_poll_isr++; in xlgmac_isr()
296 __napi_schedule_irqoff(&pdata->napi); in xlgmac_isr()
302 pdata->stats.tx_process_stopped++; in xlgmac_isr()
306 pdata->stats.rx_process_stopped++; in xlgmac_isr()
310 pdata->stats.tx_buffer_unavailable++; in xlgmac_isr()
314 pdata->stats.rx_buffer_unavailable++; in xlgmac_isr()
319 pdata->stats.fatal_bus_error++; in xlgmac_isr()
320 schedule_work(&pdata->restart_work); in xlgmac_isr()
329 mac_isr = readl(pdata->mac_regs + MAC_ISR); in xlgmac_isr()
333 hw_ops->tx_mmc_int(pdata); in xlgmac_isr()
337 hw_ops->rx_mmc_int(pdata); in xlgmac_isr()
350 if (napi_schedule_prep(&channel->napi)) { in xlgmac_dma_isr()
352 disable_irq_nosync(channel->dma_irq); in xlgmac_dma_isr()
355 __napi_schedule_irqoff(&channel->napi); in xlgmac_dma_isr()
364 struct xlgmac_pdata *pdata = channel->pdata; in xlgmac_tx_timer()
367 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; in xlgmac_tx_timer()
371 if (pdata->per_channel_irq) in xlgmac_tx_timer()
372 disable_irq_nosync(channel->dma_irq); in xlgmac_tx_timer()
376 pdata->stats.napi_poll_txtimer++; in xlgmac_tx_timer()
381 channel->tx_timer_active = 0; in xlgmac_tx_timer()
389 channel = pdata->channel_head; in xlgmac_init_timers()
390 for (i = 0; i < pdata->channel_count; i++, channel++) { in xlgmac_init_timers()
391 if (!channel->tx_ring) in xlgmac_init_timers()
394 timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0); in xlgmac_init_timers()
403 channel = pdata->channel_head; in xlgmac_stop_timers()
404 for (i = 0; i < pdata->channel_count; i++, channel++) { in xlgmac_stop_timers()
405 if (!channel->tx_ring) in xlgmac_stop_timers()
408 del_timer_sync(&channel->tx_timer); in xlgmac_stop_timers()
417 if (pdata->per_channel_irq) { in xlgmac_napi_enable()
418 channel = pdata->channel_head; in xlgmac_napi_enable()
419 for (i = 0; i < pdata->channel_count; i++, channel++) { in xlgmac_napi_enable()
421 netif_napi_add(pdata->netdev, &channel->napi, in xlgmac_napi_enable()
424 napi_enable(&channel->napi); in xlgmac_napi_enable()
428 netif_napi_add(pdata->netdev, &pdata->napi, in xlgmac_napi_enable()
431 napi_enable(&pdata->napi); in xlgmac_napi_enable()
440 if (pdata->per_channel_irq) { in xlgmac_napi_disable()
441 channel = pdata->channel_head; in xlgmac_napi_disable()
442 for (i = 0; i < pdata->channel_count; i++, channel++) { in xlgmac_napi_disable()
443 napi_disable(&channel->napi); in xlgmac_napi_disable()
446 netif_napi_del(&channel->napi); in xlgmac_napi_disable()
449 napi_disable(&pdata->napi); in xlgmac_napi_disable()
452 netif_napi_del(&pdata->napi); in xlgmac_napi_disable()
458 struct net_device *netdev = pdata->netdev; in xlgmac_request_irqs()
463 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr, in xlgmac_request_irqs()
464 IRQF_SHARED, netdev->name, pdata); in xlgmac_request_irqs()
467 pdata->dev_irq); in xlgmac_request_irqs()
471 if (!pdata->per_channel_irq) in xlgmac_request_irqs()
474 channel = pdata->channel_head; in xlgmac_request_irqs()
475 for (i = 0; i < pdata->channel_count; i++, channel++) { in xlgmac_request_irqs()
476 snprintf(channel->dma_irq_name, in xlgmac_request_irqs()
477 sizeof(channel->dma_irq_name) - 1, in xlgmac_request_irqs()
478 "%s-TxRx-%u", netdev_name(netdev), in xlgmac_request_irqs()
479 channel->queue_index); in xlgmac_request_irqs()
481 ret = devm_request_irq(pdata->dev, channel->dma_irq, in xlgmac_request_irqs()
483 channel->dma_irq_name, channel); in xlgmac_request_irqs()
486 channel->dma_irq); in xlgmac_request_irqs()
495 for (i--, channel--; i < pdata->channel_count; i--, channel--) in xlgmac_request_irqs()
496 devm_free_irq(pdata->dev, channel->dma_irq, channel); in xlgmac_request_irqs()
498 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); in xlgmac_request_irqs()
508 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); in xlgmac_free_irqs()
510 if (!pdata->per_channel_irq) in xlgmac_free_irqs()
513 channel = pdata->channel_head; in xlgmac_free_irqs()
514 for (i = 0; i < pdata->channel_count; i++, channel++) in xlgmac_free_irqs()
515 devm_free_irq(pdata->dev, channel->dma_irq, channel); in xlgmac_free_irqs()
520 struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; in xlgmac_free_tx_data()
526 channel = pdata->channel_head; in xlgmac_free_tx_data()
527 for (i = 0; i < pdata->channel_count; i++, channel++) { in xlgmac_free_tx_data()
528 ring = channel->tx_ring; in xlgmac_free_tx_data()
532 for (j = 0; j < ring->dma_desc_count; j++) { in xlgmac_free_tx_data()
534 desc_ops->unmap_desc_data(pdata, desc_data); in xlgmac_free_tx_data()
541 struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; in xlgmac_free_rx_data()
547 channel = pdata->channel_head; in xlgmac_free_rx_data()
548 for (i = 0; i < pdata->channel_count; i++, channel++) { in xlgmac_free_rx_data()
549 ring = channel->rx_ring; in xlgmac_free_rx_data()
553 for (j = 0; j < ring->dma_desc_count; j++) { in xlgmac_free_rx_data()
555 desc_ops->unmap_desc_data(pdata, desc_data); in xlgmac_free_rx_data()
562 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; in xlgmac_start()
563 struct net_device *netdev = pdata->netdev; in xlgmac_start()
566 hw_ops->init(pdata); in xlgmac_start()
573 hw_ops->enable_tx(pdata); in xlgmac_start()
574 hw_ops->enable_rx(pdata); in xlgmac_start()
581 hw_ops->exit(pdata); in xlgmac_start()
588 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; in xlgmac_stop()
589 struct net_device *netdev = pdata->netdev; in xlgmac_stop()
596 hw_ops->disable_tx(pdata); in xlgmac_stop()
597 hw_ops->disable_rx(pdata); in xlgmac_stop()
600 hw_ops->exit(pdata); in xlgmac_stop()
602 channel = pdata->channel_head; in xlgmac_stop()
603 for (i = 0; i < pdata->channel_count; i++, channel++) { in xlgmac_stop()
604 if (!channel->tx_ring) in xlgmac_stop()
607 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xlgmac_stop()
615 if (!netif_running(pdata->netdev)) in xlgmac_restart_dev()
645 desc_ops = &pdata->desc_ops; in xlgmac_open()
650 ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu); in xlgmac_open()
653 pdata->rx_buf_size = ret; in xlgmac_open()
656 ret = desc_ops->alloc_channels_and_rings(pdata); in xlgmac_open()
660 INIT_WORK(&pdata->restart_work, xlgmac_restart); in xlgmac_open()
670 desc_ops->free_channels_and_rings(pdata); in xlgmac_open()
680 desc_ops = &pdata->desc_ops; in xlgmac_close()
686 desc_ops->free_channels_and_rings(pdata); in xlgmac_close()
696 schedule_work(&pdata->restart_work); in xlgmac_tx_timeout()
710 desc_ops = &pdata->desc_ops; in xlgmac_xmit()
711 hw_ops = &pdata->hw_ops; in xlgmac_xmit()
713 XLGMAC_PR("skb->len = %d\n", skb->len); in xlgmac_xmit()
715 channel = pdata->channel_head + skb->queue_mapping; in xlgmac_xmit()
716 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xlgmac_xmit()
717 ring = channel->tx_ring; in xlgmac_xmit()
718 tx_pkt_info = &ring->pkt_info; in xlgmac_xmit()
720 if (skb->len == 0) { in xlgmac_xmit()
733 tx_pkt_info->desc_count); in xlgmac_xmit()
746 if (!desc_ops->map_tx_skb(channel, skb)) { in xlgmac_xmit()
752 netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes); in xlgmac_xmit()
755 hw_ops->dev_xmit(channel); in xlgmac_xmit()
770 struct xlgmac_stats *pstats = &pdata->stats; in xlgmac_get_stats64()
772 pdata->hw_ops.read_mmc_stats(pdata); in xlgmac_get_stats64()
774 s->rx_packets = pstats->rxframecount_gb; in xlgmac_get_stats64()
775 s->rx_bytes = pstats->rxoctetcount_gb; in xlgmac_get_stats64()
776 s->rx_errors = pstats->rxframecount_gb - in xlgmac_get_stats64()
777 pstats->rxbroadcastframes_g - in xlgmac_get_stats64()
778 pstats->rxmulticastframes_g - in xlgmac_get_stats64()
779 pstats->rxunicastframes_g; in xlgmac_get_stats64()
780 s->multicast = pstats->rxmulticastframes_g; in xlgmac_get_stats64()
781 s->rx_length_errors = pstats->rxlengtherror; in xlgmac_get_stats64()
782 s->rx_crc_errors = pstats->rxcrcerror; in xlgmac_get_stats64()
783 s->rx_fifo_errors = pstats->rxfifooverflow; in xlgmac_get_stats64()
785 s->tx_packets = pstats->txframecount_gb; in xlgmac_get_stats64()
786 s->tx_bytes = pstats->txoctetcount_gb; in xlgmac_get_stats64()
787 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; in xlgmac_get_stats64()
788 s->tx_dropped = netdev->stats.tx_dropped; in xlgmac_get_stats64()
794 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; in xlgmac_set_mac_address()
797 if (!is_valid_ether_addr(saddr->sa_data)) in xlgmac_set_mac_address()
798 return -EADDRNOTAVAIL; in xlgmac_set_mac_address()
800 eth_hw_addr_set(netdev, saddr->sa_data); in xlgmac_set_mac_address()
802 hw_ops->set_mac_address(pdata, netdev->dev_addr); in xlgmac_set_mac_address()
811 return -ENODEV; in xlgmac_ioctl()
825 pdata->rx_buf_size = ret; in xlgmac_change_mtu()
826 WRITE_ONCE(netdev->mtu, mtu); in xlgmac_change_mtu()
838 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; in xlgmac_vlan_rx_add_vid()
840 set_bit(vid, pdata->active_vlans); in xlgmac_vlan_rx_add_vid()
841 hw_ops->update_vlan_hash_table(pdata); in xlgmac_vlan_rx_add_vid()
851 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; in xlgmac_vlan_rx_kill_vid()
853 clear_bit(vid, pdata->active_vlans); in xlgmac_vlan_rx_kill_vid()
854 hw_ops->update_vlan_hash_table(pdata); in xlgmac_vlan_rx_kill_vid()
866 if (pdata->per_channel_irq) { in xlgmac_poll_controller()
867 channel = pdata->channel_head; in xlgmac_poll_controller()
868 for (i = 0; i < pdata->channel_count; i++, channel++) in xlgmac_poll_controller()
869 xlgmac_dma_isr(channel->dma_irq, channel); in xlgmac_poll_controller()
871 disable_irq(pdata->dev_irq); in xlgmac_poll_controller()
872 xlgmac_isr(pdata->dev_irq, pdata); in xlgmac_poll_controller()
873 enable_irq(pdata->dev_irq); in xlgmac_poll_controller()
883 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; in xlgmac_set_features()
886 rxhash = pdata->netdev_features & NETIF_F_RXHASH; in xlgmac_set_features()
887 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; in xlgmac_set_features()
888 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; in xlgmac_set_features()
889 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; in xlgmac_set_features()
892 ret = hw_ops->enable_rss(pdata); in xlgmac_set_features()
894 ret = hw_ops->disable_rss(pdata); in xlgmac_set_features()
899 hw_ops->enable_rx_csum(pdata); in xlgmac_set_features()
901 hw_ops->disable_rx_csum(pdata); in xlgmac_set_features()
904 hw_ops->enable_rx_vlan_stripping(pdata); in xlgmac_set_features()
906 hw_ops->disable_rx_vlan_stripping(pdata); in xlgmac_set_features()
909 hw_ops->enable_rx_vlan_filtering(pdata); in xlgmac_set_features()
911 hw_ops->disable_rx_vlan_filtering(pdata); in xlgmac_set_features()
913 pdata->netdev_features = features; in xlgmac_set_features()
921 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; in xlgmac_set_rx_mode()
923 hw_ops->config_rx_mode(pdata); in xlgmac_set_rx_mode()
952 struct xlgmac_pdata *pdata = channel->pdata; in xlgmac_rx_refresh()
953 struct xlgmac_ring *ring = channel->rx_ring; in xlgmac_rx_refresh()
958 desc_ops = &pdata->desc_ops; in xlgmac_rx_refresh()
959 hw_ops = &pdata->hw_ops; in xlgmac_rx_refresh()
961 while (ring->dirty != ring->cur) { in xlgmac_rx_refresh()
962 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); in xlgmac_rx_refresh()
965 desc_ops->unmap_desc_data(pdata, desc_data); in xlgmac_rx_refresh()
967 if (desc_ops->map_rx_buffer(pdata, ring, desc_data)) in xlgmac_rx_refresh()
970 hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty); in xlgmac_rx_refresh()
972 ring->dirty++; in xlgmac_rx_refresh()
981 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1); in xlgmac_rx_refresh()
982 writel(lower_32_bits(desc_data->dma_desc_addr), in xlgmac_rx_refresh()
995 skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len); in xlgmac_create_skb()
1002 dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base, in xlgmac_create_skb()
1003 desc_data->rx.hdr.dma_off, in xlgmac_create_skb()
1004 desc_data->rx.hdr.dma_len, in xlgmac_create_skb()
1007 packet = page_address(desc_data->rx.hdr.pa.pages) + in xlgmac_create_skb()
1008 desc_data->rx.hdr.pa.pages_offset; in xlgmac_create_skb()
1009 copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len; in xlgmac_create_skb()
1010 copy_len = min(desc_data->rx.hdr.dma_len, copy_len); in xlgmac_create_skb()
1014 len -= copy_len; in xlgmac_create_skb()
1017 dma_sync_single_range_for_cpu(pdata->dev, in xlgmac_create_skb()
1018 desc_data->rx.buf.dma_base, in xlgmac_create_skb()
1019 desc_data->rx.buf.dma_off, in xlgmac_create_skb()
1020 desc_data->rx.buf.dma_len, in xlgmac_create_skb()
1023 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in xlgmac_create_skb()
1024 desc_data->rx.buf.pa.pages, in xlgmac_create_skb()
1025 desc_data->rx.buf.pa.pages_offset, in xlgmac_create_skb()
1026 len, desc_data->rx.buf.dma_len); in xlgmac_create_skb()
1027 desc_data->rx.buf.pa.pages = NULL; in xlgmac_create_skb()
1035 struct xlgmac_pdata *pdata = channel->pdata; in xlgmac_tx_poll()
1036 struct xlgmac_ring *ring = channel->tx_ring; in xlgmac_tx_poll()
1037 struct net_device *netdev = pdata->netdev; in xlgmac_tx_poll()
1047 desc_ops = &pdata->desc_ops; in xlgmac_tx_poll()
1048 hw_ops = &pdata->hw_ops; in xlgmac_tx_poll()
1054 cur = ring->cur; in xlgmac_tx_poll()
1056 /* Be sure we get ring->cur before accessing descriptor data */ in xlgmac_tx_poll()
1059 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xlgmac_tx_poll()
1062 (ring->dirty != cur)) { in xlgmac_tx_poll()
1063 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); in xlgmac_tx_poll()
1064 dma_desc = desc_data->dma_desc; in xlgmac_tx_poll()
1066 if (!hw_ops->tx_complete(dma_desc)) in xlgmac_tx_poll()
1075 xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); in xlgmac_tx_poll()
1077 if (hw_ops->is_last_desc(dma_desc)) { in xlgmac_tx_poll()
1078 tx_packets += desc_data->tx.packets; in xlgmac_tx_poll()
1079 tx_bytes += desc_data->tx.bytes; in xlgmac_tx_poll()
1082 /* Free the SKB and reset the descriptor for re-use */ in xlgmac_tx_poll()
1083 desc_ops->unmap_desc_data(pdata, desc_data); in xlgmac_tx_poll()
1084 hw_ops->tx_desc_reset(desc_data); in xlgmac_tx_poll()
1087 ring->dirty++; in xlgmac_tx_poll()
1095 if ((ring->tx.queue_stopped == 1) && in xlgmac_tx_poll()
1097 ring->tx.queue_stopped = 0; in xlgmac_tx_poll()
1108 struct xlgmac_pdata *pdata = channel->pdata; in xlgmac_rx_poll()
1109 struct xlgmac_ring *ring = channel->rx_ring; in xlgmac_rx_poll()
1110 struct net_device *netdev = pdata->netdev; in xlgmac_rx_poll()
1122 hw_ops = &pdata->hw_ops; in xlgmac_rx_poll()
1131 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; in xlgmac_rx_poll()
1133 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); in xlgmac_rx_poll()
1134 pkt_info = &ring->pkt_info; in xlgmac_rx_poll()
1137 if (!received && desc_data->state_saved) { in xlgmac_rx_poll()
1138 skb = desc_data->state.skb; in xlgmac_rx_poll()
1139 error = desc_data->state.error; in xlgmac_rx_poll()
1140 len = desc_data->state.len; in xlgmac_rx_poll()
1149 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); in xlgmac_rx_poll()
1154 if (hw_ops->dev_read(channel)) in xlgmac_rx_poll()
1158 ring->cur++; in xlgmac_rx_poll()
1161 pkt_info->attributes, in xlgmac_rx_poll()
1165 pkt_info->attributes, in xlgmac_rx_poll()
1169 pkt_info->attributes, in xlgmac_rx_poll()
1177 if (error || pkt_info->errors) { in xlgmac_rx_poll()
1178 if (pkt_info->errors) in xlgmac_rx_poll()
1187 dma_desc_len = desc_data->rx.len - len; in xlgmac_rx_poll()
1197 pdata->dev, in xlgmac_rx_poll()
1198 desc_data->rx.buf.dma_base, in xlgmac_rx_poll()
1199 desc_data->rx.buf.dma_off, in xlgmac_rx_poll()
1200 desc_data->rx.buf.dma_len, in xlgmac_rx_poll()
1204 skb, skb_shinfo(skb)->nr_frags, in xlgmac_rx_poll()
1205 desc_data->rx.buf.pa.pages, in xlgmac_rx_poll()
1206 desc_data->rx.buf.pa.pages_offset, in xlgmac_rx_poll()
1208 desc_data->rx.buf.dma_len); in xlgmac_rx_poll()
1209 desc_data->rx.buf.pa.pages = NULL; in xlgmac_rx_poll()
1220 max_len = netdev->mtu + ETH_HLEN; in xlgmac_rx_poll()
1221 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && in xlgmac_rx_poll()
1222 (skb->protocol == htons(ETH_P_8021Q))) in xlgmac_rx_poll()
1225 if (skb->len > max_len) { in xlgmac_rx_poll()
1236 if (XLGMAC_GET_REG_BITS(pkt_info->attributes, in xlgmac_rx_poll()
1239 skb->ip_summed = CHECKSUM_UNNECESSARY; in xlgmac_rx_poll()
1241 if (XLGMAC_GET_REG_BITS(pkt_info->attributes, in xlgmac_rx_poll()
1245 pkt_info->vlan_ctag); in xlgmac_rx_poll()
1246 pdata->stats.rx_vlan_packets++; in xlgmac_rx_poll()
1249 if (XLGMAC_GET_REG_BITS(pkt_info->attributes, in xlgmac_rx_poll()
1252 skb_set_hash(skb, pkt_info->rss_hash, in xlgmac_rx_poll()
1253 pkt_info->rss_hash_type); in xlgmac_rx_poll()
1255 skb->dev = netdev; in xlgmac_rx_poll()
1256 skb->protocol = eth_type_trans(skb, netdev); in xlgmac_rx_poll()
1257 skb_record_rx_queue(skb, channel->queue_index); in xlgmac_rx_poll()
1267 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); in xlgmac_rx_poll()
1268 desc_data->state_saved = 1; in xlgmac_rx_poll()
1269 desc_data->state.skb = skb; in xlgmac_rx_poll()
1270 desc_data->state.len = len; in xlgmac_rx_poll()
1271 desc_data->state.error = error; in xlgmac_rx_poll()
1300 enable_irq(channel->dma_irq); in xlgmac_one_poll()
1321 ring_budget = budget / pdata->rx_ring_count; in xlgmac_all_poll()
1325 channel = pdata->channel_head; in xlgmac_all_poll()
1326 for (i = 0; i < pdata->channel_count; i++, channel++) { in xlgmac_all_poll()
1331 if (ring_budget > (budget - processed)) in xlgmac_all_poll()
1332 ring_budget = budget - processed; in xlgmac_all_poll()