Lines Matching +full:frc +full:- +full:shared

1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2022 NXP
23 #include "dpaa2-eth.h"
29 #include "dpaa2-eth-trace.h"
40 priv->features = 0; in dpaa2_eth_detect_features()
44 priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT; in dpaa2_eth_detect_features()
57 if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg)) in dpaa2_update_ptp_onestep_indirect()
72 if (priv->onestep_reg_base) in dpaa2_update_ptp_onestep_direct()
73 writel(val, priv->onestep_reg_base); in dpaa2_update_ptp_onestep_direct()
78 struct device *dev = priv->net_dev->dev.parent; in dpaa2_ptp_onestep_reg_update_method()
81 priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect; in dpaa2_ptp_onestep_reg_update_method()
83 if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT)) in dpaa2_ptp_onestep_reg_update_method()
86 if (dpni_get_single_step_cfg(priv->mc_io, 0, in dpaa2_ptp_onestep_reg_update_method()
87 priv->mc_token, &ptp_cfg)) { in dpaa2_ptp_onestep_reg_update_method()
97 priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base, in dpaa2_ptp_onestep_reg_update_method()
99 if (!priv->onestep_reg_base) { in dpaa2_ptp_onestep_reg_update_method()
104 priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct; in dpaa2_ptp_onestep_reg_update_method()
124 if (!(priv->net_dev->features & NETIF_F_RXCSUM)) in dpaa2_eth_validate_rx_csum()
133 skb->ip_summed = CHECKSUM_UNNECESSARY; in dpaa2_eth_validate_rx_csum()
143 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_free_rx_fd()
163 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); in dpaa2_eth_free_rx_fd()
164 dma_unmap_page(dev, addr, priv->rx_buf_size, in dpaa2_eth_free_rx_fd()
176 /* Build a linear skb based on a single-buffer frame descriptor */
185 ch->buf_count--; in dpaa2_eth_build_linear_skb()
203 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_build_frag_skb()
221 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); in dpaa2_eth_build_frag_skb()
222 dma_unmap_page(dev, sg_addr, priv->rx_buf_size, in dpaa2_eth_build_frag_skb()
259 (PAGE_SIZE - 1)) + in dpaa2_eth_build_frag_skb()
260 (page_address(page) - page_address(head_page)); in dpaa2_eth_build_frag_skb()
262 skb_add_rx_frag(skb, i - 1, head_page, page_offset, in dpaa2_eth_build_frag_skb()
263 sg_length, priv->rx_buf_size); in dpaa2_eth_build_frag_skb()
273 ch->buf_count -= i + 2; in dpaa2_eth_build_frag_skb()
284 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_free_bufs()
291 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); in dpaa2_eth_free_bufs()
294 dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, in dpaa2_eth_free_bufs()
300 xdp_buff = swa->xsk.xdp_buff; in dpaa2_eth_free_bufs()
313 ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr; in dpaa2_eth_recycle_buf()
314 if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD) in dpaa2_eth_recycle_buf()
317 while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, in dpaa2_eth_recycle_buf()
318 ch->recycled_bufs, in dpaa2_eth_recycle_buf()
319 ch->recycled_bufs_cnt)) == -EBUSY) { in dpaa2_eth_recycle_buf()
326 dpaa2_eth_free_bufs(priv, ch->recycled_bufs, in dpaa2_eth_recycle_buf()
327 ch->recycled_bufs_cnt, ch->xsk_zc); in dpaa2_eth_recycle_buf()
328 ch->buf_count -= ch->recycled_bufs_cnt; in dpaa2_eth_recycle_buf()
331 ch->recycled_bufs_cnt = 0; in dpaa2_eth_recycle_buf()
343 percpu_extras = this_cpu_ptr(priv->percpu_extras); in dpaa2_eth_xdp_flush()
346 fds = xdp_fds->fds; in dpaa2_eth_xdp_flush()
347 num_fds = xdp_fds->num; in dpaa2_eth_xdp_flush()
350 err = priv->enqueue(priv, fq, &fds[total_enqueued], in dpaa2_eth_xdp_flush()
351 0, num_fds - total_enqueued, &enqueued); in dpaa2_eth_xdp_flush()
352 if (err == -EBUSY) { in dpaa2_eth_xdp_flush()
353 percpu_extras->tx_portal_busy += ++retries; in dpaa2_eth_xdp_flush()
358 xdp_fds->num = 0; in dpaa2_eth_xdp_flush()
371 percpu_stats = this_cpu_ptr(priv->percpu_stats); in dpaa2_eth_xdp_tx_flush()
374 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds); in dpaa2_eth_xdp_tx_flush()
377 percpu_stats->tx_packets += enqueued; in dpaa2_eth_xdp_tx_flush()
378 fds = fq->xdp_tx_fds.fds; in dpaa2_eth_xdp_tx_flush()
380 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); in dpaa2_eth_xdp_tx_flush()
381 ch->stats.xdp_tx++; in dpaa2_eth_xdp_tx_flush()
383 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { in dpaa2_eth_xdp_tx_flush()
385 percpu_stats->tx_errors++; in dpaa2_eth_xdp_tx_flush()
386 ch->stats.xdp_tx_err++; in dpaa2_eth_xdp_tx_flush()
388 fq->xdp_tx_fds.num = 0; in dpaa2_eth_xdp_tx_flush()
399 u32 ctrl, frc; in dpaa2_eth_xdp_enqueue() local
402 frc = dpaa2_fd_get_frc(fd); in dpaa2_eth_xdp_enqueue()
403 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); in dpaa2_eth_xdp_enqueue()
412 faead->ctrl = cpu_to_le32(ctrl); in dpaa2_eth_xdp_enqueue()
413 faead->conf_fqid = 0; in dpaa2_eth_xdp_enqueue()
415 fq = &priv->fq[queue_id]; in dpaa2_eth_xdp_enqueue()
416 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++]; in dpaa2_eth_xdp_enqueue()
419 if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE) in dpaa2_eth_xdp_enqueue()
436 xdp_prog = READ_ONCE(ch->xdp.prog); in dpaa2_eth_run_xdp()
440 offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM; in dpaa2_eth_run_xdp()
441 xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq); in dpaa2_eth_run_xdp()
448 dpaa2_fd_set_offset(fd, xdp.data - vaddr); in dpaa2_eth_run_xdp()
449 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data); in dpaa2_eth_run_xdp()
455 dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); in dpaa2_eth_run_xdp()
458 bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act); in dpaa2_eth_run_xdp()
461 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); in dpaa2_eth_run_xdp()
465 ch->stats.xdp_drop++; in dpaa2_eth_run_xdp()
468 dma_unmap_page(priv->net_dev->dev.parent, addr, in dpaa2_eth_run_xdp()
469 priv->rx_buf_size, DMA_BIDIRECTIONAL); in dpaa2_eth_run_xdp()
470 ch->buf_count--; in dpaa2_eth_run_xdp()
476 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); in dpaa2_eth_run_xdp()
478 addr = dma_map_page(priv->net_dev->dev.parent, in dpaa2_eth_run_xdp()
480 priv->rx_buf_size, DMA_BIDIRECTIONAL); in dpaa2_eth_run_xdp()
481 if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) { in dpaa2_eth_run_xdp()
484 ch->buf_count++; in dpaa2_eth_run_xdp()
487 ch->stats.xdp_drop++; in dpaa2_eth_run_xdp()
489 ch->stats.xdp_redirect++; in dpaa2_eth_run_xdp()
494 ch->xdp.res |= xdp_act; in dpaa2_eth_run_xdp()
510 skb = napi_alloc_skb(&ch->napi, skb_len); in dpaa2_eth_alloc_skb()
517 memcpy(skb->data, fd_vaddr + fd_offset, fd_length); in dpaa2_eth_alloc_skb()
526 struct dpaa2_eth_priv *priv = ch->priv; in dpaa2_eth_copybreak()
529 if (fd_length > priv->rx_copybreak) in dpaa2_eth_copybreak()
547 prefetch(skb->data); in dpaa2_eth_receive_skb()
550 if (priv->rx_tstamp) { in dpaa2_eth_receive_skb()
558 shhwtstamps->hwtstamp = ns_to_ktime(ns); in dpaa2_eth_receive_skb()
563 status = le32_to_cpu(fas->status); in dpaa2_eth_receive_skb()
567 skb->protocol = eth_type_trans(skb, priv->net_dev); in dpaa2_eth_receive_skb()
568 skb_record_rx_queue(skb, fq->flowid); in dpaa2_eth_receive_skb()
570 percpu_stats->rx_packets++; in dpaa2_eth_receive_skb()
571 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); in dpaa2_eth_receive_skb()
572 ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd); in dpaa2_eth_receive_skb()
574 list_add_tail(&skb->list, ch->rx_list); in dpaa2_eth_receive_skb()
589 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_rx()
595 trace_dpaa2_rx_fd(priv->net_dev, fd); in dpaa2_eth_rx()
597 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); in dpaa2_eth_rx()
598 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, in dpaa2_eth_rx()
604 percpu_stats = this_cpu_ptr(priv->percpu_stats); in dpaa2_eth_rx()
605 percpu_extras = this_cpu_ptr(priv->percpu_extras); in dpaa2_eth_rx()
610 percpu_stats->rx_packets++; in dpaa2_eth_rx()
611 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); in dpaa2_eth_rx()
617 dma_unmap_page(dev, addr, priv->rx_buf_size, in dpaa2_eth_rx()
624 WARN_ON(priv->xdp_prog); in dpaa2_eth_rx()
626 dma_unmap_page(dev, addr, priv->rx_buf_size, in dpaa2_eth_rx()
630 percpu_extras->rx_sg_frames++; in dpaa2_eth_rx()
631 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); in dpaa2_eth_rx()
649 percpu_stats->rx_dropped++; in dpaa2_eth_rx()
660 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_rx_err()
670 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); in dpaa2_eth_rx_err()
671 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, in dpaa2_eth_rx_err()
677 dma_unmap_page(dev, addr, priv->rx_buf_size, in dpaa2_eth_rx_err()
681 dma_unmap_page(dev, addr, priv->rx_buf_size, in dpaa2_eth_rx_err()
694 devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx, in dpaa2_eth_rx_err()
695 &priv->devlink_port, NULL); in dpaa2_eth_rx_err()
699 percpu_stats = this_cpu_ptr(priv->percpu_stats); in dpaa2_eth_rx_err()
700 percpu_stats->rx_errors++; in dpaa2_eth_rx_err()
701 ch->buf_count--; in dpaa2_eth_rx_err()
704 /* Consume all frames pull-dequeued into the store. This is the simplest way to
713 struct dpaa2_eth_priv *priv = ch->priv; in dpaa2_eth_consume_frames()
721 dq = dpaa2_io_store_next(ch->store, &is_last); in dpaa2_eth_consume_frames()
729 netdev_err_once(priv->net_dev, in dpaa2_eth_consume_frames()
731 return -ETIMEDOUT; in dpaa2_eth_consume_frames()
739 fq->consume(priv, ch, fd, fq); in dpaa2_eth_consume_frames()
747 fq->stats.frames += cleaned; in dpaa2_eth_consume_frames()
748 ch->stats.frames += cleaned; in dpaa2_eth_consume_frames()
749 ch->stats.frames_per_cdan += cleaned; in dpaa2_eth_consume_frames()
772 return -EINVAL; in dpaa2_eth_ptp_parse()
776 return -EINVAL; in dpaa2_eth_ptp_parse()
779 *twostep = hdr->flag_field[0] & 0x2; in dpaa2_eth_ptp_parse()
789 *correction_offset = (u8 *)&hdr->correction - base; in dpaa2_eth_ptp_parse()
790 *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; in dpaa2_eth_ptp_parse()
807 u32 ctrl, frc; in dpaa2_eth_enable_tx_tstamp() local
812 frc = dpaa2_fd_get_frc(fd); in dpaa2_eth_enable_tx_tstamp()
813 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); in dpaa2_eth_enable_tx_tstamp()
824 faead->ctrl = cpu_to_le32(ctrl); in dpaa2_eth_enable_tx_tstamp()
826 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { in dpaa2_eth_enable_tx_tstamp()
830 WARN_ONCE(1, "Bad packet for one-step timestamping\n"); in dpaa2_eth_enable_tx_tstamp()
835 frc = dpaa2_fd_get_frc(fd); in dpaa2_eth_enable_tx_tstamp()
836 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV); in dpaa2_eth_enable_tx_tstamp()
840 fas->status = cpu_to_le32(DPAA2_FAS_PTP); in dpaa2_eth_enable_tx_tstamp()
842 dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts); in dpaa2_eth_enable_tx_tstamp()
855 if (priv->ptp_correction_off == offset1) in dpaa2_eth_enable_tx_tstamp()
858 priv->dpaa2_set_onestep_params_cb(priv, offset1, udp); in dpaa2_eth_enable_tx_tstamp()
859 priv->ptp_correction_off = offset1; in dpaa2_eth_enable_tx_tstamp()
870 sgt_cache = this_cpu_ptr(priv->sgt_cache); in dpaa2_eth_sgt_get()
871 sgt_buf_size = priv->tx_data_offset + in dpaa2_eth_sgt_get()
874 if (sgt_cache->count == 0) in dpaa2_eth_sgt_get()
877 sgt_buf = sgt_cache->buf[--sgt_cache->count]; in dpaa2_eth_sgt_get()
890 sgt_cache = this_cpu_ptr(priv->sgt_cache); in dpaa2_eth_sgt_recycle()
891 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE) in dpaa2_eth_sgt_recycle()
894 sgt_cache->buf[sgt_cache->count++] = sgt_buf; in dpaa2_eth_sgt_recycle()
903 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_build_sg_fd()
906 int nr_frags = skb_shinfo(skb)->nr_frags; in dpaa2_eth_build_sg_fd()
921 return -EINVAL; in dpaa2_eth_build_sg_fd()
925 return -ENOMEM; in dpaa2_eth_build_sg_fd()
928 num_sg = skb_to_sgvec(skb, scl, 0, skb->len); in dpaa2_eth_build_sg_fd()
930 err = -ENOMEM; in dpaa2_eth_build_sg_fd()
935 err = -ENOMEM; in dpaa2_eth_build_sg_fd()
940 sgt_buf_size = priv->tx_data_offset + in dpaa2_eth_build_sg_fd()
944 err = -ENOMEM; in dpaa2_eth_build_sg_fd()
948 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); in dpaa2_eth_build_sg_fd()
954 * - offset is 0 in dpaa2_eth_build_sg_fd()
955 * - format is 'dpaa2_sg_single' in dpaa2_eth_build_sg_fd()
961 dpaa2_sg_set_final(&sgt[i - 1], true); in dpaa2_eth_build_sg_fd()
970 swa->type = DPAA2_ETH_SWA_SG; in dpaa2_eth_build_sg_fd()
971 swa->sg.skb = skb; in dpaa2_eth_build_sg_fd()
972 swa->sg.scl = scl; in dpaa2_eth_build_sg_fd()
973 swa->sg.num_sg = num_sg; in dpaa2_eth_build_sg_fd()
974 swa->sg.sgt_size = sgt_buf_size; in dpaa2_eth_build_sg_fd()
979 err = -ENOMEM; in dpaa2_eth_build_sg_fd()
983 dpaa2_fd_set_offset(fd, priv->tx_data_offset); in dpaa2_eth_build_sg_fd()
986 dpaa2_fd_set_len(fd, skb->len); in dpaa2_eth_build_sg_fd()
1003 * enough for the HW requirements, thus instead of realloc-ing the skb we
1011 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_build_sg_fd_single_buf()
1020 sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry); in dpaa2_eth_build_sg_fd_single_buf()
1023 return -ENOMEM; in dpaa2_eth_build_sg_fd_single_buf()
1024 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); in dpaa2_eth_build_sg_fd_single_buf()
1026 addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL); in dpaa2_eth_build_sg_fd_single_buf()
1028 err = -ENOMEM; in dpaa2_eth_build_sg_fd_single_buf()
1034 dpaa2_sg_set_len(sgt, skb->len); in dpaa2_eth_build_sg_fd_single_buf()
1040 swa->type = DPAA2_ETH_SWA_SINGLE; in dpaa2_eth_build_sg_fd_single_buf()
1041 swa->single.skb = skb; in dpaa2_eth_build_sg_fd_single_buf()
1042 swa->single.sgt_size = sgt_buf_size; in dpaa2_eth_build_sg_fd_single_buf()
1047 err = -ENOMEM; in dpaa2_eth_build_sg_fd_single_buf()
1052 dpaa2_fd_set_offset(fd, priv->tx_data_offset); in dpaa2_eth_build_sg_fd_single_buf()
1055 dpaa2_fd_set_len(fd, skb->len); in dpaa2_eth_build_sg_fd_single_buf()
1061 dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL); in dpaa2_eth_build_sg_fd_single_buf()
1074 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_build_single_fd()
1079 buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); in dpaa2_eth_build_single_fd()
1080 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, in dpaa2_eth_build_single_fd()
1082 if (aligned_start >= skb->head) in dpaa2_eth_build_single_fd()
1085 return -ENOMEM; in dpaa2_eth_build_single_fd()
1093 swa->type = DPAA2_ETH_SWA_SINGLE; in dpaa2_eth_build_single_fd()
1094 swa->single.skb = skb; in dpaa2_eth_build_single_fd()
1097 skb_tail_pointer(skb) - buffer_start, in dpaa2_eth_build_single_fd()
1100 return -ENOMEM; in dpaa2_eth_build_single_fd()
1104 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); in dpaa2_eth_build_single_fd()
1105 dpaa2_fd_set_len(fd, skb->len); in dpaa2_eth_build_single_fd()
1114 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
1115 * back-pointed to is also freed.
1124 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_free_tx_fd()
1137 buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); in dpaa2_eth_free_tx_fd()
1141 if (swa->type == DPAA2_ETH_SWA_SINGLE) { in dpaa2_eth_free_tx_fd()
1142 skb = swa->single.skb; in dpaa2_eth_free_tx_fd()
1147 skb_tail_pointer(skb) - buffer_start, in dpaa2_eth_free_tx_fd()
1150 WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type"); in dpaa2_eth_free_tx_fd()
1151 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, in dpaa2_eth_free_tx_fd()
1155 if (swa->type == DPAA2_ETH_SWA_SG) { in dpaa2_eth_free_tx_fd()
1156 skb = swa->sg.skb; in dpaa2_eth_free_tx_fd()
1159 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, in dpaa2_eth_free_tx_fd()
1161 kfree(swa->sg.scl); in dpaa2_eth_free_tx_fd()
1164 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, in dpaa2_eth_free_tx_fd()
1166 } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) { in dpaa2_eth_free_tx_fd()
1167 skb = swa->tso.skb; in dpaa2_eth_free_tx_fd()
1170 priv->tx_data_offset); in dpaa2_eth_free_tx_fd()
1173 dma_unmap_single(dev, fd_addr, swa->tso.sgt_size, in dpaa2_eth_free_tx_fd()
1177 tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt)); in dpaa2_eth_free_tx_fd()
1183 for (i = 1; i < swa->tso.num_sg; i++) in dpaa2_eth_free_tx_fd()
1187 if (!swa->tso.is_last_fd) in dpaa2_eth_free_tx_fd()
1189 } else if (swa->type == DPAA2_ETH_SWA_XSK) { in dpaa2_eth_free_tx_fd()
1191 dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size, in dpaa2_eth_free_tx_fd()
1194 skb = swa->single.skb; in dpaa2_eth_free_tx_fd()
1197 dma_unmap_single(dev, fd_addr, swa->single.sgt_size, in dpaa2_eth_free_tx_fd()
1201 priv->tx_data_offset); in dpaa2_eth_free_tx_fd()
1203 dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL); in dpaa2_eth_free_tx_fd()
1206 netdev_dbg(priv->net_dev, "Invalid FD format\n"); in dpaa2_eth_free_tx_fd()
1210 if (swa->type == DPAA2_ETH_SWA_XSK) { in dpaa2_eth_free_tx_fd()
1211 ch->xsk_tx_pkts_sent++; in dpaa2_eth_free_tx_fd()
1216 if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { in dpaa2_eth_free_tx_fd()
1217 fq->dq_frames++; in dpaa2_eth_free_tx_fd()
1218 fq->dq_bytes += fd_len; in dpaa2_eth_free_tx_fd()
1221 if (swa->type == DPAA2_ETH_SWA_XDP) { in dpaa2_eth_free_tx_fd()
1222 xdp_return_frame(swa->xdp.xdpf); in dpaa2_eth_free_tx_fd()
1227 if (swa->type != DPAA2_ETH_SWA_SW_TSO) { in dpaa2_eth_free_tx_fd()
1228 if (skb->cb[0] == TX_TSTAMP) { in dpaa2_eth_free_tx_fd()
1238 } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { in dpaa2_eth_free_tx_fd()
1239 mutex_unlock(&priv->onestep_tstamp_lock); in dpaa2_eth_free_tx_fd()
1259 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_build_gso_fd()
1276 total_len = skb->len - hdr_len; in dpaa2_eth_build_gso_fd()
1281 netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n"); in dpaa2_eth_build_gso_fd()
1282 err = -ENOMEM; in dpaa2_eth_build_gso_fd()
1285 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); in dpaa2_eth_build_gso_fd()
1288 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); in dpaa2_eth_build_gso_fd()
1289 total_len -= data_left; in dpaa2_eth_build_gso_fd()
1295 err = -ENOMEM; in dpaa2_eth_build_gso_fd()
1302 netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n"); in dpaa2_eth_build_gso_fd()
1303 err = -ENOMEM; in dpaa2_eth_build_gso_fd()
1323 netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n"); in dpaa2_eth_build_gso_fd()
1324 err = -ENOMEM; in dpaa2_eth_build_gso_fd()
1334 data_left -= size; in dpaa2_eth_build_gso_fd()
1339 sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry); in dpaa2_eth_build_gso_fd()
1341 swa->type = DPAA2_ETH_SWA_SW_TSO; in dpaa2_eth_build_gso_fd()
1342 swa->tso.skb = skb; in dpaa2_eth_build_gso_fd()
1343 swa->tso.num_sg = num_sge; in dpaa2_eth_build_gso_fd()
1344 swa->tso.sgt_size = sgt_buf_size; in dpaa2_eth_build_gso_fd()
1345 swa->tso.is_last_fd = total_len == 0 ? 1 : 0; in dpaa2_eth_build_gso_fd()
1350 netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n"); in dpaa2_eth_build_gso_fd()
1351 err = -ENOMEM; in dpaa2_eth_build_gso_fd()
1357 dpaa2_fd_set_offset(fd, priv->tx_data_offset); in dpaa2_eth_build_gso_fd()
1376 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); in dpaa2_eth_build_gso_fd()
1413 percpu_stats = this_cpu_ptr(priv->percpu_stats); in __dpaa2_eth_tx()
1414 percpu_extras = this_cpu_ptr(priv->percpu_extras); in __dpaa2_eth_tx()
1415 fd = (this_cpu_ptr(priv->fd))->array; in __dpaa2_eth_tx()
1419 /* We'll be holding a back-reference to the skb until Tx Confirmation; in __dpaa2_eth_tx()
1425 percpu_stats->tx_dropped++; in __dpaa2_eth_tx()
1433 percpu_extras->tx_sg_frames += num_fds; in __dpaa2_eth_tx()
1434 percpu_extras->tx_sg_bytes += fd_len; in __dpaa2_eth_tx()
1435 percpu_extras->tx_tso_frames += num_fds; in __dpaa2_eth_tx()
1436 percpu_extras->tx_tso_bytes += fd_len; in __dpaa2_eth_tx()
1439 percpu_extras->tx_sg_frames++; in __dpaa2_eth_tx()
1440 percpu_extras->tx_sg_bytes += skb->len; in __dpaa2_eth_tx()
1444 percpu_extras->tx_sg_frames++; in __dpaa2_eth_tx()
1445 percpu_extras->tx_sg_bytes += skb->len; in __dpaa2_eth_tx()
1446 percpu_extras->tx_converted_sg_frames++; in __dpaa2_eth_tx()
1447 percpu_extras->tx_converted_sg_bytes += skb->len; in __dpaa2_eth_tx()
1455 percpu_stats->tx_dropped++; in __dpaa2_eth_tx()
1459 if (swa && skb->cb[0]) in __dpaa2_eth_tx()
1472 if (net_dev->num_tc) { in __dpaa2_eth_tx()
1477 prio = net_dev->num_tc - prio - 1; in __dpaa2_eth_tx()
1483 fq = &priv->fq[queue_mapping]; in __dpaa2_eth_tx()
1492 err = priv->enqueue(priv, fq, &fd[total_enqueued], in __dpaa2_eth_tx()
1493 prio, num_fds - total_enqueued, &enqueued); in __dpaa2_eth_tx()
1494 if (err == -EBUSY) { in __dpaa2_eth_tx()
1501 percpu_extras->tx_portal_busy += retries; in __dpaa2_eth_tx()
1504 percpu_stats->tx_errors++; in __dpaa2_eth_tx()
1509 percpu_stats->tx_packets += total_enqueued; in __dpaa2_eth_tx()
1510 percpu_stats->tx_bytes += fd_len; in __dpaa2_eth_tx()
1528 skb = skb_dequeue(&priv->tx_skbs); in dpaa2_eth_tx_onestep_tstamp()
1532 /* Lock just before TX one-step timestamping packet, in dpaa2_eth_tx_onestep_tstamp()
1537 mutex_lock(&priv->onestep_tstamp_lock); in dpaa2_eth_tx_onestep_tstamp()
1538 __dpaa2_eth_tx(skb, priv->net_dev); in dpaa2_eth_tx_onestep_tstamp()
1548 /* Utilize skb->cb[0] for timestamping request per skb */ in dpaa2_eth_tx()
1549 skb->cb[0] = 0; in dpaa2_eth_tx()
1551 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) { in dpaa2_eth_tx()
1552 if (priv->tx_tstamp_type == HWTSTAMP_TX_ON) in dpaa2_eth_tx()
1553 skb->cb[0] = TX_TSTAMP; in dpaa2_eth_tx()
1554 else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC) in dpaa2_eth_tx()
1555 skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC; in dpaa2_eth_tx()
1558 /* TX for one-step timestamping PTP Sync packet */ in dpaa2_eth_tx()
1559 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { in dpaa2_eth_tx()
1563 skb_queue_tail(&priv->tx_skbs, skb); in dpaa2_eth_tx()
1564 queue_work(priv->dpaa2_ptp_wq, in dpaa2_eth_tx()
1565 &priv->tx_onestep_tstamp); in dpaa2_eth_tx()
1568 /* Use two-step timestamping if not one-step timestamping in dpaa2_eth_tx()
1571 skb->cb[0] = TX_TSTAMP; in dpaa2_eth_tx()
1590 trace_dpaa2_tx_conf_fd(priv->net_dev, fd); in dpaa2_eth_tx_conf()
1592 percpu_extras = this_cpu_ptr(priv->percpu_extras); in dpaa2_eth_tx_conf()
1593 percpu_extras->tx_conf_frames++; in dpaa2_eth_tx_conf()
1594 percpu_extras->tx_conf_bytes += fd_len; in dpaa2_eth_tx_conf()
1595 ch->stats.bytes_per_cdan += fd_len; in dpaa2_eth_tx_conf()
1605 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", in dpaa2_eth_tx_conf()
1608 percpu_stats = this_cpu_ptr(priv->percpu_stats); in dpaa2_eth_tx_conf()
1609 /* Tx-conf logically pertains to the egress path. */ in dpaa2_eth_tx_conf()
1610 percpu_stats->tx_errors++; in dpaa2_eth_tx_conf()
1618 err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable); in dpaa2_eth_set_rx_vlan_filtering()
1621 netdev_err(priv->net_dev, in dpaa2_eth_set_rx_vlan_filtering()
1633 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_rx_csum()
1636 netdev_err(priv->net_dev, in dpaa2_eth_set_rx_csum()
1641 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_rx_csum()
1644 netdev_err(priv->net_dev, in dpaa2_eth_set_rx_csum()
1656 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_tx_csum()
1659 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); in dpaa2_eth_set_tx_csum()
1663 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_tx_csum()
1666 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); in dpaa2_eth_set_tx_csum()
1680 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_add_bufs()
1690 if (!ch->xsk_zc) { in dpaa2_eth_add_bufs()
1692 /* Also allocate skb shared info and alignment padding. in dpaa2_eth_add_bufs()
1695 * skb shared info in dpaa2_eth_add_bufs()
1701 addr = dma_map_page(dev, page, 0, priv->rx_buf_size, in dpaa2_eth_add_bufs()
1709 trace_dpaa2_eth_buf_seed(priv->net_dev, in dpaa2_eth_add_bufs()
1712 addr, priv->rx_buf_size, in dpaa2_eth_add_bufs()
1713 ch->bp->bpid); in dpaa2_eth_add_bufs()
1715 } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) { in dpaa2_eth_add_bufs()
1720 batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs, in dpaa2_eth_add_bufs()
1726 swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start + in dpaa2_eth_add_bufs()
1728 swa->xsk.xdp_buff = xdp_buffs[i]; in dpaa2_eth_add_bufs()
1736 trace_dpaa2_xsk_buf_seed(priv->net_dev, in dpaa2_eth_add_bufs()
1737 xdp_buffs[i]->data_hard_start, in dpaa2_eth_add_bufs()
1739 addr, priv->rx_buf_size, in dpaa2_eth_add_bufs()
1740 ch->bp->bpid); in dpaa2_eth_add_bufs()
1746 while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, in dpaa2_eth_add_bufs()
1747 buf_array, i)) == -EBUSY) { in dpaa2_eth_add_bufs()
1757 dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc); in dpaa2_eth_add_bufs()
1764 if (!ch->xsk_zc) { in dpaa2_eth_add_bufs()
1788 ch->buf_count += new_count; in dpaa2_eth_seed_pool()
1791 return -ENOMEM; in dpaa2_eth_seed_pool()
1799 struct net_device *net_dev = priv->net_dev; in dpaa2_eth_seed_pools()
1803 for (i = 0; i < priv->num_channels; i++) { in dpaa2_eth_seed_pools()
1804 channel = priv->channel[i]; in dpaa2_eth_seed_pools()
1814 channel->bp->dev->obj_desc.id, in dpaa2_eth_seed_pools()
1815 channel->bp->bpid); in dpaa2_eth_seed_pools()
1832 for (i = 0; i < priv->num_channels; i++) in dpaa2_eth_drain_bufs()
1833 if (priv->channel[i]->bp->bpid == bpid) in dpaa2_eth_drain_bufs()
1834 xsk_zc = priv->channel[i]->xsk_zc; in dpaa2_eth_drain_bufs()
1839 if (ret == -EBUSY && in dpaa2_eth_drain_bufs()
1842 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); in dpaa2_eth_drain_bufs()
1861 for (i = 0; i < priv->num_channels; i++) in dpaa2_eth_drain_pool()
1862 if (priv->channel[i]->bp->bpid == bpid) in dpaa2_eth_drain_pool()
1863 priv->channel[i]->buf_count = 0; in dpaa2_eth_drain_pool()
1870 for (i = 0; i < priv->num_bps; i++) in dpaa2_eth_drain_pools()
1871 dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid); in dpaa2_eth_drain_pools()
1882 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) in dpaa2_eth_refill_pool()
1891 ch->buf_count += new_count; in dpaa2_eth_refill_pool()
1892 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); in dpaa2_eth_refill_pool()
1894 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) in dpaa2_eth_refill_pool()
1895 return -ENOMEM; in dpaa2_eth_refill_pool()
1907 sgt_cache = per_cpu_ptr(priv->sgt_cache, k); in dpaa2_eth_sgt_cache_drain()
1908 count = sgt_cache->count; in dpaa2_eth_sgt_cache_drain()
1911 skb_free_frag(sgt_cache->buf[i]); in dpaa2_eth_sgt_cache_drain()
1912 sgt_cache->count = 0; in dpaa2_eth_sgt_cache_drain()
1919 int dequeues = -1; in dpaa2_eth_pull_channel()
1923 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, in dpaa2_eth_pull_channel()
1924 ch->store); in dpaa2_eth_pull_channel()
1927 } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES); in dpaa2_eth_pull_channel()
1929 ch->stats.dequeue_portal_busy += dequeues; in dpaa2_eth_pull_channel()
1931 ch->stats.pull_err++; in dpaa2_eth_pull_channel()
1957 ch->xdp.res = 0; in dpaa2_eth_poll()
1958 priv = ch->priv; in dpaa2_eth_poll()
1961 ch->rx_list = &rx_list; in dpaa2_eth_poll()
1963 if (ch->xsk_zc) { in dpaa2_eth_poll()
1983 if (fq->type == DPAA2_RX_FQ) { in dpaa2_eth_poll()
1985 flowid = fq->flowid; in dpaa2_eth_poll()
1998 if (ch->xdp.res & XDP_REDIRECT) in dpaa2_eth_poll()
2004 if (ch->xdp.res & XDP_REDIRECT) in dpaa2_eth_poll()
2008 dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan, in dpaa2_eth_poll()
2009 ch->stats.bytes_per_cdan); in dpaa2_eth_poll()
2010 ch->stats.frames_per_cdan = 0; in dpaa2_eth_poll()
2011 ch->stats.bytes_per_cdan = 0; in dpaa2_eth_poll()
2014 * re-enable data availability notifications in dpaa2_eth_poll()
2018 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); in dpaa2_eth_poll()
2020 } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES); in dpaa2_eth_poll()
2022 ch->nctx.desired_cpu); in dpaa2_eth_poll()
2027 netif_receive_skb_list(ch->rx_list); in dpaa2_eth_poll()
2029 if (ch->xsk_tx_pkts_sent) { in dpaa2_eth_poll()
2030 xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent); in dpaa2_eth_poll()
2031 ch->xsk_tx_pkts_sent = 0; in dpaa2_eth_poll()
2034 if (txc_fq && txc_fq->dq_frames) { in dpaa2_eth_poll()
2035 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); in dpaa2_eth_poll()
2036 netdev_tx_completed_queue(nq, txc_fq->dq_frames, in dpaa2_eth_poll()
2037 txc_fq->dq_bytes); in dpaa2_eth_poll()
2038 txc_fq->dq_frames = 0; in dpaa2_eth_poll()
2039 txc_fq->dq_bytes = 0; in dpaa2_eth_poll()
2042 if (rx_cleaned && ch->xdp.res & XDP_TX) in dpaa2_eth_poll()
2043 dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]); in dpaa2_eth_poll()
2053 for (i = 0; i < priv->num_channels; i++) { in dpaa2_eth_enable_ch_napi()
2054 ch = priv->channel[i]; in dpaa2_eth_enable_ch_napi()
2055 napi_enable(&ch->napi); in dpaa2_eth_enable_ch_napi()
2064 for (i = 0; i < priv->num_channels; i++) { in dpaa2_eth_disable_ch_napi()
2065 ch = priv->channel[i]; in dpaa2_eth_disable_ch_napi()
2066 napi_disable(&ch->napi); in dpaa2_eth_disable_ch_napi()
2083 if (priv->rx_fqtd_enabled == td.enable) in dpaa2_eth_set_rx_taildrop()
2089 for (i = 0; i < priv->num_fqs; i++) { in dpaa2_eth_set_rx_taildrop()
2090 fq = &priv->fq[i]; in dpaa2_eth_set_rx_taildrop()
2091 if (fq->type != DPAA2_RX_FQ) in dpaa2_eth_set_rx_taildrop()
2093 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_rx_taildrop()
2095 fq->tc, fq->flowid, &td); in dpaa2_eth_set_rx_taildrop()
2097 netdev_err(priv->net_dev, in dpaa2_eth_set_rx_taildrop()
2103 priv->rx_fqtd_enabled = td.enable; in dpaa2_eth_set_rx_taildrop()
2111 * want frames in non-PFC enabled traffic classes to be kept in check) in dpaa2_eth_set_rx_taildrop()
2114 if (priv->rx_cgtd_enabled == td.enable) in dpaa2_eth_set_rx_taildrop()
2120 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_rx_taildrop()
2124 netdev_err(priv->net_dev, in dpaa2_eth_set_rx_taildrop()
2130 priv->rx_cgtd_enabled = td.enable; in dpaa2_eth_set_rx_taildrop()
2139 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); in dpaa2_eth_link_state_update()
2141 netdev_err(priv->net_dev, in dpaa2_eth_link_state_update()
2151 dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled); in dpaa2_eth_link_state_update()
2157 * (the writer to priv->mac), so we cannot race with it. in dpaa2_eth_link_state_update()
2159 if (dpaa2_mac_is_type_phy(priv->mac)) in dpaa2_eth_link_state_update()
2163 if (priv->link_state.up == state.up) in dpaa2_eth_link_state_update()
2167 netif_carrier_on(priv->net_dev); in dpaa2_eth_link_state_update()
2168 netif_tx_start_all_queues(priv->net_dev); in dpaa2_eth_link_state_update()
2170 netif_tx_stop_all_queues(priv->net_dev); in dpaa2_eth_link_state_update()
2171 netif_carrier_off(priv->net_dev); in dpaa2_eth_link_state_update()
2174 netdev_info(priv->net_dev, "Link Event: state %s\n", in dpaa2_eth_link_state_update()
2178 priv->link_state = state; in dpaa2_eth_link_state_update()
2190 mutex_lock(&priv->mac_lock); in dpaa2_eth_open()
2208 err = dpni_enable(priv->mc_io, 0, priv->mc_token); in dpaa2_eth_open()
2210 mutex_unlock(&priv->mac_lock); in dpaa2_eth_open()
2216 dpaa2_mac_start(priv->mac); in dpaa2_eth_open()
2218 mutex_unlock(&priv->mac_lock); in dpaa2_eth_open()
2228 /* Total number of in-flight frames on ingress queues */
2235 for (i = 0; i < priv->num_fqs; i++) { in dpaa2_eth_ingress_fq_count()
2236 fq = &priv->fq[i]; in dpaa2_eth_ingress_fq_count()
2237 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); in dpaa2_eth_ingress_fq_count()
2239 netdev_warn(priv->net_dev, "query_fq_count failed"); in dpaa2_eth_ingress_fq_count()
2257 } while (pending && --retries); in dpaa2_eth_wait_for_ingress_fq_empty()
2273 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6, in dpaa2_eth_wait_for_egress_fq_empty()
2279 } while (--retries); in dpaa2_eth_wait_for_egress_fq_empty()
2291 mutex_lock(&priv->mac_lock); in dpaa2_eth_stop()
2294 dpaa2_mac_stop(priv->mac); in dpaa2_eth_stop()
2300 mutex_unlock(&priv->mac_lock); in dpaa2_eth_stop()
2303 * - stop MAC Rx and wait for all Rx frames to be enqueued to software in dpaa2_eth_stop()
2304 * - cut off WRIOP dequeues from egress FQs and wait until transmission in dpaa2_eth_stop()
2315 dpni_disable(priv->mc_io, 0, priv->mc_token); in dpaa2_eth_stop()
2316 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); in dpaa2_eth_stop()
2320 } while (dpni_enabled && --retries); in dpaa2_eth_stop()
2334 /* Empty the Scatter-Gather Buffer cache */ in dpaa2_eth_stop()
2343 struct device *dev = net_dev->dev.parent; in dpaa2_eth_set_addr()
2352 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_addr()
2353 net_dev->dev_addr); in dpaa2_eth_set_addr()
2376 percpu_stats = per_cpu_ptr(priv->percpu_stats, i); in dpaa2_eth_get_stats()
2393 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_add_uc_hw_addr()
2394 ha->addr); in dpaa2_eth_add_uc_hw_addr()
2396 netdev_warn(priv->net_dev, in dpaa2_eth_add_uc_hw_addr()
2398 ha->addr, err); in dpaa2_eth_add_uc_hw_addr()
2412 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_add_mc_hw_addr()
2413 ha->addr); in dpaa2_eth_add_mc_hw_addr()
2415 netdev_warn(priv->net_dev, in dpaa2_eth_add_mc_hw_addr()
2417 ha->addr, err); in dpaa2_eth_add_mc_hw_addr()
2427 err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_rx_add_vid()
2431 netdev_warn(priv->net_dev, in dpaa2_eth_rx_add_vid()
2446 err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid); in dpaa2_eth_rx_kill_vid()
2449 netdev_warn(priv->net_dev, in dpaa2_eth_rx_kill_vid()
2463 u8 max_mac = priv->dpni_attrs.mac_filter_entries; in dpaa2_eth_set_rx_mode()
2464 u32 options = priv->dpni_attrs.options; in dpaa2_eth_set_rx_mode()
2465 u16 mc_token = priv->mc_token; in dpaa2_eth_set_rx_mode()
2466 struct fsl_mc_io *mc_io = priv->mc_io; in dpaa2_eth_set_rx_mode()
2490 if (net_dev->flags & IFF_PROMISC) in dpaa2_eth_set_rx_mode()
2492 if (net_dev->flags & IFF_ALLMULTI) { in dpaa2_eth_set_rx_mode()
2560 netdev_features_t changed = features ^ net_dev->features; in dpaa2_eth_set_features()
2594 return -EINVAL; in dpaa2_eth_ts_ioctl()
2596 if (copy_from_user(&config, rq->ifr_data, sizeof(config))) in dpaa2_eth_ts_ioctl()
2597 return -EFAULT; in dpaa2_eth_ts_ioctl()
2603 priv->tx_tstamp_type = config.tx_type; in dpaa2_eth_ts_ioctl()
2606 return -ERANGE; in dpaa2_eth_ts_ioctl()
2610 priv->rx_tstamp = false; in dpaa2_eth_ts_ioctl()
2612 priv->rx_tstamp = true; in dpaa2_eth_ts_ioctl()
2617 if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC) in dpaa2_eth_ts_ioctl()
2620 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? in dpaa2_eth_ts_ioctl()
2621 -EFAULT : 0; in dpaa2_eth_ts_ioctl()
2632 mutex_lock(&priv->mac_lock); in dpaa2_eth_ioctl()
2635 err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd); in dpaa2_eth_ioctl()
2636 mutex_unlock(&priv->mac_lock); in dpaa2_eth_ioctl()
2640 mutex_unlock(&priv->mac_lock); in dpaa2_eth_ioctl()
2642 return -EOPNOTSUPP; in dpaa2_eth_ioctl()
2650 linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE - in xdp_mtu_valid()
2651 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; in xdp_mtu_valid()
2654 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n", in xdp_mtu_valid()
2655 linear_mfl - VLAN_ETH_HLEN); in xdp_mtu_valid()
2676 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl); in dpaa2_eth_set_rx_mfl()
2678 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n"); in dpaa2_eth_set_rx_mfl()
2690 if (!priv->xdp_prog) in dpaa2_eth_change_mtu()
2694 return -EINVAL; in dpaa2_eth_change_mtu()
2701 WRITE_ONCE(dev->mtu, new_mtu); in dpaa2_eth_change_mtu()
2710 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_update_rx_buffer_headroom()
2713 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n"); in dpaa2_eth_update_rx_buffer_headroom()
2721 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_update_rx_buffer_headroom()
2724 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n"); in dpaa2_eth_update_rx_buffer_headroom()
2739 if (prog && !xdp_mtu_valid(priv, dev->mtu)) in dpaa2_eth_setup_xdp()
2740 return -EINVAL; in dpaa2_eth_setup_xdp()
2743 bpf_prog_add(prog, priv->num_channels); in dpaa2_eth_setup_xdp()
2746 need_update = (!!priv->xdp_prog != !!prog); in dpaa2_eth_setup_xdp()
2752 * Also, when switching between xdp/non-xdp modes we need to reconfigure in dpaa2_eth_setup_xdp()
2757 err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog); in dpaa2_eth_setup_xdp()
2765 old = xchg(&priv->xdp_prog, prog); in dpaa2_eth_setup_xdp()
2769 for (i = 0; i < priv->num_channels; i++) { in dpaa2_eth_setup_xdp()
2770 ch = priv->channel[i]; in dpaa2_eth_setup_xdp()
2771 old = xchg(&ch->xdp.prog, prog); in dpaa2_eth_setup_xdp()
2786 bpf_prog_sub(prog, priv->num_channels); in dpaa2_eth_setup_xdp()
2795 switch (xdp->command) { in dpaa2_eth_xdp()
2797 return dpaa2_eth_setup_xdp(dev, xdp->prog); in dpaa2_eth_xdp()
2799 return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id); in dpaa2_eth_xdp()
2801 return -EINVAL; in dpaa2_eth_xdp()
2811 struct device *dev = net_dev->dev.parent; in dpaa2_eth_xdp_create_fd()
2821 if (xdpf->headroom < needed_headroom) in dpaa2_eth_xdp_create_fd()
2822 return -EINVAL; in dpaa2_eth_xdp_create_fd()
2828 buffer_start = xdpf->data - needed_headroom; in dpaa2_eth_xdp_create_fd()
2829 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, in dpaa2_eth_xdp_create_fd()
2831 if (aligned_start >= xdpf->data - xdpf->headroom) in dpaa2_eth_xdp_create_fd()
2836 swa->type = DPAA2_ETH_SWA_XDP; in dpaa2_eth_xdp_create_fd()
2837 swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; in dpaa2_eth_xdp_create_fd()
2838 swa->xdp.xdpf = xdpf; in dpaa2_eth_xdp_create_fd()
2841 swa->xdp.dma_size, in dpaa2_eth_xdp_create_fd()
2844 return -ENOMEM; in dpaa2_eth_xdp_create_fd()
2847 dpaa2_fd_set_offset(fd, xdpf->data - buffer_start); in dpaa2_eth_xdp_create_fd()
2848 dpaa2_fd_set_len(fd, xdpf->len); in dpaa2_eth_xdp_create_fd()
2866 return -EINVAL; in dpaa2_eth_xdp_xmit()
2869 return -ENETDOWN; in dpaa2_eth_xdp_xmit()
2871 fq = &priv->fq[smp_processor_id()]; in dpaa2_eth_xdp_xmit()
2872 xdp_redirect_fds = &fq->xdp_redirect_fds; in dpaa2_eth_xdp_xmit()
2873 fds = xdp_redirect_fds->fds; in dpaa2_eth_xdp_xmit()
2875 percpu_stats = this_cpu_ptr(priv->percpu_stats); in dpaa2_eth_xdp_xmit()
2883 xdp_redirect_fds->num = i; in dpaa2_eth_xdp_xmit()
2889 percpu_stats->tx_packets += enqueued; in dpaa2_eth_xdp_xmit()
2891 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); in dpaa2_eth_xdp_xmit()
2898 struct net_device *net_dev = priv->net_dev; in update_xps()
2905 return -ENOMEM; in update_xps()
2908 netdev_queues = (net_dev->num_tc ? : 1) * num_queues; in update_xps()
2910 /* The first <num_queues> entries in priv->fq array are Tx/Tx conf in update_xps()
2914 fq = &priv->fq[i % num_queues]; in update_xps()
2917 cpumask_set_cpu(fq->target_cpu, xps_mask); in update_xps()
2937 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in dpaa2_eth_setup_mqprio()
2939 num_tc = mqprio->num_tc; in dpaa2_eth_setup_mqprio()
2941 if (num_tc == net_dev->num_tc) in dpaa2_eth_setup_mqprio()
2947 return -EOPNOTSUPP; in dpaa2_eth_setup_mqprio()
2972 struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params; in dpaa2_eth_setup_tbf()
2978 if (p->command == TC_TBF_STATS) in dpaa2_eth_setup_tbf()
2979 return -EOPNOTSUPP; in dpaa2_eth_setup_tbf()
2982 if (p->parent != TC_H_ROOT) in dpaa2_eth_setup_tbf()
2983 return -EOPNOTSUPP; in dpaa2_eth_setup_tbf()
2985 if (p->command == TC_TBF_REPLACE) { in dpaa2_eth_setup_tbf()
2986 if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) { in dpaa2_eth_setup_tbf()
2989 return -EINVAL; in dpaa2_eth_setup_tbf()
2992 tx_cr_shaper.max_burst_size = cfg->max_size; in dpaa2_eth_setup_tbf()
2996 tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps); in dpaa2_eth_setup_tbf()
2999 err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper, in dpaa2_eth_setup_tbf()
3018 return -EOPNOTSUPP; in dpaa2_eth_setup_tc()
3047 ch->stats.cdan++; in dpaa2_eth_cdan_cb()
3052 if (!napi_if_scheduled_mark_missed(&ch->napi)) in dpaa2_eth_cdan_cb()
3053 napi_schedule(&ch->napi); in dpaa2_eth_cdan_cb()
3060 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_setup_dpcon()
3066 if (err == -ENXIO) { in dpaa2_eth_setup_dpcon()
3068 err = -EPROBE_DEFER; in dpaa2_eth_setup_dpcon()
3070 dev_info(dev, "Not enough DPCONs, will go on as-is\n"); in dpaa2_eth_setup_dpcon()
3075 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); in dpaa2_eth_setup_dpcon()
3081 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); in dpaa2_eth_setup_dpcon()
3087 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); in dpaa2_eth_setup_dpcon()
3096 dpcon_close(priv->mc_io, 0, dpcon->mc_handle); in dpaa2_eth_setup_dpcon()
3106 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); in dpaa2_eth_free_dpcon()
3107 dpcon_close(priv->mc_io, 0, dpcon->mc_handle); in dpaa2_eth_free_dpcon()
3115 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_alloc_channel()
3122 channel->dpcon = dpaa2_eth_setup_dpcon(priv); in dpaa2_eth_alloc_channel()
3123 if (IS_ERR(channel->dpcon)) { in dpaa2_eth_alloc_channel()
3124 err = PTR_ERR(channel->dpcon); in dpaa2_eth_alloc_channel()
3128 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, in dpaa2_eth_alloc_channel()
3135 channel->dpcon_id = attr.id; in dpaa2_eth_alloc_channel()
3136 channel->ch_id = attr.qbman_ch_id; in dpaa2_eth_alloc_channel()
3137 channel->priv = priv; in dpaa2_eth_alloc_channel()
3142 dpaa2_eth_free_dpcon(priv, channel->dpcon); in dpaa2_eth_alloc_channel()
3151 dpaa2_eth_free_dpcon(priv, channel->dpcon); in dpaa2_eth_free_channel()
3163 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_setup_dpio()
3175 cpumask_clear(&priv->dpio_cpumask); in dpaa2_eth_setup_dpio()
3181 if (err == -EPROBE_DEFER) in dpaa2_eth_setup_dpio()
3189 priv->channel[priv->num_channels] = channel; in dpaa2_eth_setup_dpio()
3191 nctx = &channel->nctx; in dpaa2_eth_setup_dpio()
3192 nctx->is_cdan = 1; in dpaa2_eth_setup_dpio()
3193 nctx->cb = dpaa2_eth_cdan_cb; in dpaa2_eth_setup_dpio()
3194 nctx->id = channel->ch_id; in dpaa2_eth_setup_dpio()
3195 nctx->desired_cpu = i; in dpaa2_eth_setup_dpio()
3198 channel->dpio = dpaa2_io_service_select(i); in dpaa2_eth_setup_dpio()
3199 err = dpaa2_io_service_register(channel->dpio, nctx, dev); in dpaa2_eth_setup_dpio()
3207 err = -EPROBE_DEFER; in dpaa2_eth_setup_dpio()
3212 dpcon_notif_cfg.dpio_id = nctx->dpio_id; in dpaa2_eth_setup_dpio()
3214 dpcon_notif_cfg.user_ctx = nctx->qman64; in dpaa2_eth_setup_dpio()
3215 err = dpcon_set_notification(priv->mc_io, 0, in dpaa2_eth_setup_dpio()
3216 channel->dpcon->mc_handle, in dpaa2_eth_setup_dpio()
3226 cpumask_set_cpu(i, &priv->dpio_cpumask); in dpaa2_eth_setup_dpio()
3227 priv->num_channels++; in dpaa2_eth_setup_dpio()
3232 if (priv->num_channels == priv->dpni_attrs.num_queues) in dpaa2_eth_setup_dpio()
3239 dpaa2_io_service_deregister(channel->dpio, nctx, dev); in dpaa2_eth_setup_dpio()
3243 if (err == -EPROBE_DEFER) { in dpaa2_eth_setup_dpio()
3244 for (i = 0; i < priv->num_channels; i++) { in dpaa2_eth_setup_dpio()
3245 channel = priv->channel[i]; in dpaa2_eth_setup_dpio()
3246 nctx = &channel->nctx; in dpaa2_eth_setup_dpio()
3247 dpaa2_io_service_deregister(channel->dpio, nctx, dev); in dpaa2_eth_setup_dpio()
3250 priv->num_channels = 0; in dpaa2_eth_setup_dpio()
3254 if (cpumask_empty(&priv->dpio_cpumask)) { in dpaa2_eth_setup_dpio()
3256 return -ENODEV; in dpaa2_eth_setup_dpio()
3260 cpumask_pr_args(&priv->dpio_cpumask)); in dpaa2_eth_setup_dpio()
3267 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_free_dpio()
3272 for (i = 0; i < priv->num_channels; i++) { in dpaa2_eth_free_dpio()
3273 ch = priv->channel[i]; in dpaa2_eth_free_dpio()
3274 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); in dpaa2_eth_free_dpio()
3282 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_get_affine_channel()
3285 for (i = 0; i < priv->num_channels; i++) in dpaa2_eth_get_affine_channel()
3286 if (priv->channel[i]->nctx.desired_cpu == cpu) in dpaa2_eth_get_affine_channel()
3287 return priv->channel[i]; in dpaa2_eth_get_affine_channel()
3294 return priv->channel[0]; in dpaa2_eth_get_affine_channel()
3299 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_set_fq_affinity()
3308 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); in dpaa2_eth_set_fq_affinity()
3310 for (i = 0; i < priv->num_fqs; i++) { in dpaa2_eth_set_fq_affinity()
3311 fq = &priv->fq[i]; in dpaa2_eth_set_fq_affinity()
3312 switch (fq->type) { in dpaa2_eth_set_fq_affinity()
3315 fq->target_cpu = rx_cpu; in dpaa2_eth_set_fq_affinity()
3316 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); in dpaa2_eth_set_fq_affinity()
3318 rx_cpu = cpumask_first(&priv->dpio_cpumask); in dpaa2_eth_set_fq_affinity()
3321 fq->target_cpu = txc_cpu; in dpaa2_eth_set_fq_affinity()
3322 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); in dpaa2_eth_set_fq_affinity()
3324 txc_cpu = cpumask_first(&priv->dpio_cpumask); in dpaa2_eth_set_fq_affinity()
3327 dev_err(dev, "Unknown FQ type: %d\n", fq->type); in dpaa2_eth_set_fq_affinity()
3329 fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu); in dpaa2_eth_set_fq_affinity()
3344 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; in dpaa2_eth_setup_fqs()
3345 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; in dpaa2_eth_setup_fqs()
3346 priv->fq[priv->num_fqs++].flowid = (u16)i; in dpaa2_eth_setup_fqs()
3351 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; in dpaa2_eth_setup_fqs()
3352 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; in dpaa2_eth_setup_fqs()
3353 priv->fq[priv->num_fqs].tc = (u8)j; in dpaa2_eth_setup_fqs()
3354 priv->fq[priv->num_fqs++].flowid = (u16)i; in dpaa2_eth_setup_fqs()
3359 priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; in dpaa2_eth_setup_fqs()
3360 priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; in dpaa2_eth_setup_fqs()
3369 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_allocate_dpbp()
3378 if (err == -ENXIO) in dpaa2_eth_allocate_dpbp()
3379 err = -EPROBE_DEFER; in dpaa2_eth_allocate_dpbp()
3387 err = -ENOMEM; in dpaa2_eth_allocate_dpbp()
3391 err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id, in dpaa2_eth_allocate_dpbp()
3392 &dpbp_dev->mc_handle); in dpaa2_eth_allocate_dpbp()
3398 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); in dpaa2_eth_allocate_dpbp()
3404 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); in dpaa2_eth_allocate_dpbp()
3410 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, in dpaa2_eth_allocate_dpbp()
3417 bp->dev = dpbp_dev; in dpaa2_eth_allocate_dpbp()
3418 bp->bpid = dpbp_attrs.bpid; in dpaa2_eth_allocate_dpbp()
3423 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); in dpaa2_eth_allocate_dpbp()
3426 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); in dpaa2_eth_allocate_dpbp()
3444 priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp; in dpaa2_eth_setup_default_dpbp()
3445 priv->num_bps++; in dpaa2_eth_setup_default_dpbp()
3447 for (i = 0; i < priv->num_channels; i++) in dpaa2_eth_setup_default_dpbp()
3448 priv->channel[i]->bp = bp; in dpaa2_eth_setup_default_dpbp()
3458 for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++) in dpaa2_eth_free_dpbp()
3459 if (priv->bp[idx_bp] == bp) in dpaa2_eth_free_dpbp()
3463 dpaa2_eth_drain_pool(priv, bp->bpid); in dpaa2_eth_free_dpbp()
3464 dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle); in dpaa2_eth_free_dpbp()
3465 dpbp_close(priv->mc_io, 0, bp->dev->mc_handle); in dpaa2_eth_free_dpbp()
3466 fsl_mc_object_free(bp->dev); in dpaa2_eth_free_dpbp()
3470 priv->bp[idx_bp] = priv->bp[priv->num_bps - 1]; in dpaa2_eth_free_dpbp()
3471 priv->num_bps--; in dpaa2_eth_free_dpbp()
3478 for (i = 0; i < priv->num_bps; i++) in dpaa2_eth_free_dpbps()
3479 dpaa2_eth_free_dpbp(priv, priv->bp[i]); in dpaa2_eth_free_dpbps()
3484 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_set_buffer_layout()
3493 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || in dpaa2_eth_set_buffer_layout()
3494 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) in dpaa2_eth_set_buffer_layout()
3502 priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align); in dpaa2_eth_set_buffer_layout()
3511 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_buffer_layout()
3518 /* tx-confirm buffer */ in dpaa2_eth_set_buffer_layout()
3521 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_buffer_layout()
3531 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_buffer_layout()
3532 &priv->tx_data_offset); in dpaa2_eth_set_buffer_layout()
3538 if ((priv->tx_data_offset % 64) != 0) in dpaa2_eth_set_buffer_layout()
3540 priv->tx_data_offset); in dpaa2_eth_set_buffer_layout()
3553 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_buffer_layout()
3574 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, in dpaa2_eth_enqueue_qd()
3575 priv->tx_qdid, prio, in dpaa2_eth_enqueue_qd()
3576 fq->tx_qdbin, fd); in dpaa2_eth_enqueue_qd()
3590 err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio, in dpaa2_eth_enqueue_fq_multiple()
3591 fq->tx_fqid[prio], in dpaa2_eth_enqueue_fq_multiple()
3595 return -EBUSY; in dpaa2_eth_enqueue_fq_multiple()
3606 priv->enqueue = dpaa2_eth_enqueue_qd; in dpaa2_eth_set_enqueue_mode()
3608 priv->enqueue = dpaa2_eth_enqueue_fq_multiple; in dpaa2_eth_set_enqueue_mode()
3613 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_set_pause()
3618 err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); in dpaa2_eth_set_pause()
3627 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); in dpaa2_eth_set_pause()
3633 priv->link_state.options = link_cfg.options; in dpaa2_eth_set_pause()
3645 /* We only use Tx FQIDs for FQID-based enqueue, so check in dpaa2_eth_update_tx_fqids()
3652 for (i = 0; i < priv->num_fqs; i++) { in dpaa2_eth_update_tx_fqids()
3653 fq = &priv->fq[i]; in dpaa2_eth_update_tx_fqids()
3654 if (fq->type != DPAA2_TX_CONF_FQ) in dpaa2_eth_update_tx_fqids()
3657 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_update_tx_fqids()
3658 DPNI_QUEUE_TX, j, fq->flowid, in dpaa2_eth_update_tx_fqids()
3663 fq->tx_fqid[j] = qid.fqid; in dpaa2_eth_update_tx_fqids()
3664 if (fq->tx_fqid[j] == 0) in dpaa2_eth_update_tx_fqids()
3669 priv->enqueue = dpaa2_eth_enqueue_fq_multiple; in dpaa2_eth_update_tx_fqids()
3674 netdev_info(priv->net_dev, in dpaa2_eth_update_tx_fqids()
3675 "Error reading Tx FQID, fallback to QDID-based enqueue\n"); in dpaa2_eth_update_tx_fqids()
3676 priv->enqueue = dpaa2_eth_enqueue_qd; in dpaa2_eth_update_tx_fqids()
3682 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_set_vlan_qos()
3690 /* VLAN-based classification only makes sense if we have multiple in dpaa2_eth_set_vlan_qos()
3692 * Also, we need to extract just the 3-bit PCP field from the VLAN in dpaa2_eth_set_vlan_qos()
3696 dev_dbg(dev, "VLAN-based QoS classification not supported\n"); in dpaa2_eth_set_vlan_qos()
3697 return -EOPNOTSUPP; in dpaa2_eth_set_vlan_qos()
3702 return -ENOMEM; in dpaa2_eth_set_vlan_qos()
3724 err = -ENOMEM; in dpaa2_eth_set_vlan_qos()
3728 err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg); in dpaa2_eth_set_vlan_qos()
3737 err = -ENOMEM; in dpaa2_eth_set_vlan_qos()
3747 err = -ENOMEM; in dpaa2_eth_set_vlan_qos()
3754 /* We add rules for PCP-based distribution starting with highest in dpaa2_eth_set_vlan_qos()
3759 for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) { in dpaa2_eth_set_vlan_qos()
3764 err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_vlan_qos()
3768 dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token); in dpaa2_eth_set_vlan_qos()
3773 priv->vlan_cls_enabled = true; in dpaa2_eth_set_vlan_qos()
3794 struct device *dev = &ls_dev->dev; in dpaa2_eth_setup_dpni()
3803 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); in dpaa2_eth_setup_dpni()
3810 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, in dpaa2_eth_setup_dpni()
3811 &priv->dpni_ver_minor); in dpaa2_eth_setup_dpni()
3818 priv->dpni_ver_major, priv->dpni_ver_minor, in dpaa2_eth_setup_dpni()
3820 err = -EOPNOTSUPP; in dpaa2_eth_setup_dpni()
3824 ls_dev->mc_io = priv->mc_io; in dpaa2_eth_setup_dpni()
3825 ls_dev->mc_handle = priv->mc_token; in dpaa2_eth_setup_dpni()
3827 err = dpni_reset(priv->mc_io, 0, priv->mc_token); in dpaa2_eth_setup_dpni()
3833 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_setup_dpni()
3834 &priv->dpni_attrs); in dpaa2_eth_setup_dpni()
3854 if (err && err != -EOPNOTSUPP) in dpaa2_eth_setup_dpni()
3857 priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv), in dpaa2_eth_setup_dpni()
3860 if (!priv->cls_rules) { in dpaa2_eth_setup_dpni()
3861 err = -ENOMEM; in dpaa2_eth_setup_dpni()
3868 dpni_close(priv->mc_io, 0, priv->mc_token); in dpaa2_eth_setup_dpni()
3877 err = dpni_reset(priv->mc_io, 0, priv->mc_token); in dpaa2_eth_free_dpni()
3879 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", in dpaa2_eth_free_dpni()
3882 dpni_close(priv->mc_io, 0, priv->mc_token); in dpaa2_eth_free_dpni()
3888 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_setup_rx_flow()
3893 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_setup_rx_flow()
3894 DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid); in dpaa2_eth_setup_rx_flow()
3900 fq->fqid = qid.fqid; in dpaa2_eth_setup_rx_flow()
3902 queue.destination.id = fq->channel->dpcon_id; in dpaa2_eth_setup_rx_flow()
3906 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_setup_rx_flow()
3907 DPNI_QUEUE_RX, fq->tc, fq->flowid, in dpaa2_eth_setup_rx_flow()
3917 if (fq->tc > 0) in dpaa2_eth_setup_rx_flow()
3920 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, in dpaa2_eth_setup_rx_flow()
3921 fq->flowid, 0); in dpaa2_eth_setup_rx_flow()
3927 err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq, in dpaa2_eth_setup_rx_flow()
3940 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_setup_tx_flow()
3946 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_setup_tx_flow()
3947 DPNI_QUEUE_TX, i, fq->flowid, in dpaa2_eth_setup_tx_flow()
3953 fq->tx_fqid[i] = qid.fqid; in dpaa2_eth_setup_tx_flow()
3957 fq->tx_qdbin = qid.qdbin; in dpaa2_eth_setup_tx_flow()
3959 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_setup_tx_flow()
3960 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, in dpaa2_eth_setup_tx_flow()
3967 fq->fqid = qid.fqid; in dpaa2_eth_setup_tx_flow()
3969 queue.destination.id = fq->channel->dpcon_id; in dpaa2_eth_setup_tx_flow()
3973 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_setup_tx_flow()
3974 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, in dpaa2_eth_setup_tx_flow()
3988 struct device *dev = priv->net_dev->dev.parent; in setup_rx_err_flow()
3994 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, in setup_rx_err_flow()
4001 fq->fqid = qid.fqid; in setup_rx_err_flow()
4003 q.destination.id = fq->channel->dpcon_id; in setup_rx_err_flow()
4007 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, in setup_rx_err_flow()
4087 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_config_legacy_hash_key()
4098 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_config_legacy_hash_key()
4112 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_config_hash_key()
4124 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_config_hash_key()
4131 /* If the flow steering / hashing key is shared between all in dpaa2_eth_config_hash_key()
4134 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) in dpaa2_eth_config_hash_key()
4144 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_config_cls_key()
4156 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_config_cls_key()
4163 /* If the flow steering / hashing key is shared between all in dpaa2_eth_config_cls_key()
4166 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) in dpaa2_eth_config_cls_key()
4227 struct device *dev = net_dev->dev.parent; in dpaa2_eth_set_dist_key()
4252 return -E2BIG; in dpaa2_eth_set_dist_key()
4255 key->type = DPKG_EXTRACT_FROM_HDR; in dpaa2_eth_set_dist_key()
4256 key->extract.from_hdr.prot = dist_fields[i].cls_prot; in dpaa2_eth_set_dist_key()
4257 key->extract.from_hdr.type = DPKG_FULL_FIELD; in dpaa2_eth_set_dist_key()
4258 key->extract.from_hdr.field = dist_fields[i].cls_field; in dpaa2_eth_set_dist_key()
4264 return -ENOMEM; in dpaa2_eth_set_dist_key()
4277 err = -ENOMEM; in dpaa2_eth_set_dist_key()
4293 priv->rx_hash_fields = rx_hash_fields; in dpaa2_eth_set_dist_key()
4307 return -EOPNOTSUPP; in dpaa2_eth_set_hash()
4323 struct device *dev = priv->net_dev->dev.parent; in dpaa2_eth_set_default_cls()
4329 return -EOPNOTSUPP; in dpaa2_eth_set_default_cls()
4334 return -EOPNOTSUPP; in dpaa2_eth_set_default_cls()
4339 return -EOPNOTSUPP; in dpaa2_eth_set_default_cls()
4349 err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL); in dpaa2_eth_set_default_cls()
4354 priv->rx_cls_enabled = 1; in dpaa2_eth_set_default_cls()
4364 struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX]; in dpaa2_eth_bind_dpni()
4365 struct net_device *net_dev = priv->net_dev; in dpaa2_eth_bind_dpni()
4367 struct device *dev = net_dev->dev.parent; in dpaa2_eth_bind_dpni()
4373 pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id; in dpaa2_eth_bind_dpni()
4375 pools_params.pools[0].buffer_size = priv->rx_buf_size; in dpaa2_eth_bind_dpni()
4376 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); in dpaa2_eth_bind_dpni()
4386 if (err && err != -EOPNOTSUPP) in dpaa2_eth_bind_dpni()
4393 if (err && err != -EOPNOTSUPP) in dpaa2_eth_bind_dpni()
4400 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_bind_dpni()
4408 for (i = 0; i < priv->num_fqs; i++) { in dpaa2_eth_bind_dpni()
4409 switch (priv->fq[i].type) { in dpaa2_eth_bind_dpni()
4411 err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]); in dpaa2_eth_bind_dpni()
4414 err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]); in dpaa2_eth_bind_dpni()
4417 err = setup_rx_err_flow(priv, &priv->fq[i]); in dpaa2_eth_bind_dpni()
4420 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); in dpaa2_eth_bind_dpni()
4421 return -EINVAL; in dpaa2_eth_bind_dpni()
4427 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_bind_dpni()
4428 DPNI_QUEUE_TX, &priv->tx_qdid); in dpaa2_eth_bind_dpni()
4440 struct net_device *net_dev = priv->net_dev; in dpaa2_eth_alloc_rings()
4441 struct device *dev = net_dev->dev.parent; in dpaa2_eth_alloc_rings()
4444 for (i = 0; i < priv->num_channels; i++) { in dpaa2_eth_alloc_rings()
4445 priv->channel[i]->store = in dpaa2_eth_alloc_rings()
4447 if (!priv->channel[i]->store) { in dpaa2_eth_alloc_rings()
4456 for (i = 0; i < priv->num_channels; i++) { in dpaa2_eth_alloc_rings()
4457 if (!priv->channel[i]->store) in dpaa2_eth_alloc_rings()
4459 dpaa2_io_store_destroy(priv->channel[i]->store); in dpaa2_eth_alloc_rings()
4462 return -ENOMEM; in dpaa2_eth_alloc_rings()
4469 for (i = 0; i < priv->num_channels; i++) in dpaa2_eth_free_rings()
4470 dpaa2_io_store_destroy(priv->channel[i]->store); in dpaa2_eth_free_rings()
4475 struct net_device *net_dev = priv->net_dev; in dpaa2_eth_set_mac_addr()
4476 struct device *dev = net_dev->dev.parent; in dpaa2_eth_set_mac_addr()
4481 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); in dpaa2_eth_set_mac_addr()
4488 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_mac_addr()
4499 err = dpni_set_primary_mac_addr(priv->mc_io, 0, in dpaa2_eth_set_mac_addr()
4500 priv->mc_token, in dpaa2_eth_set_mac_addr()
4509 /* No MAC address configured, fill in net_dev->dev_addr in dpaa2_eth_set_mac_addr()
4513 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); in dpaa2_eth_set_mac_addr()
4515 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_set_mac_addr()
4516 net_dev->dev_addr); in dpaa2_eth_set_mac_addr()
4525 * register_netdevice() to properly fill up net_dev->perm_addr. in dpaa2_eth_set_mac_addr()
4527 net_dev->addr_assign_type = NET_ADDR_PERM; in dpaa2_eth_set_mac_addr()
4540 struct device *dev = net_dev->dev.parent; in dpaa2_eth_netdev_init()
4542 u32 options = priv->dpni_attrs.options; in dpaa2_eth_netdev_init()
4548 net_dev->netdev_ops = &dpaa2_eth_ops; in dpaa2_eth_netdev_init()
4549 net_dev->ethtool_ops = &dpaa2_ethtool_ops; in dpaa2_eth_netdev_init()
4557 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); in dpaa2_eth_netdev_init()
4564 net_dev->max_mtu = DPAA2_ETH_MAX_MTU; in dpaa2_eth_netdev_init()
4565 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, in dpaa2_eth_netdev_init()
4595 net_dev->priv_flags |= supported; in dpaa2_eth_netdev_init()
4596 net_dev->priv_flags &= ~not_supported; in dpaa2_eth_netdev_init()
4597 net_dev->lltx = true; in dpaa2_eth_netdev_init()
4600 net_dev->features = NETIF_F_RXCSUM | in dpaa2_eth_netdev_init()
4604 net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS; in dpaa2_eth_netdev_init()
4605 net_dev->hw_features = net_dev->features; in dpaa2_eth_netdev_init()
4606 net_dev->xdp_features = NETDEV_XDP_ACT_BASIC | in dpaa2_eth_netdev_init()
4609 if (priv->dpni_attrs.wriop_version >= DPAA2_WRIOP_VERSION(3, 0, 0) && in dpaa2_eth_netdev_init()
4610 priv->dpni_attrs.num_queues <= 8) in dpaa2_eth_netdev_init()
4611 net_dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; in dpaa2_eth_netdev_init()
4613 if (priv->dpni_attrs.vlan_filter_entries) in dpaa2_eth_netdev_init()
4614 net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; in dpaa2_eth_netdev_init()
4641 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); in dpaa2_eth_connect_mac()
4644 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) { in dpaa2_eth_connect_mac()
4645 netdev_dbg(priv->net_dev, "waiting for mac\n"); in dpaa2_eth_connect_mac()
4649 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) in dpaa2_eth_connect_mac()
4654 return -ENOMEM; in dpaa2_eth_connect_mac()
4656 mac->mc_dev = dpmac_dev; in dpaa2_eth_connect_mac()
4657 mac->mc_io = priv->mc_io; in dpaa2_eth_connect_mac()
4658 mac->net_dev = priv->net_dev; in dpaa2_eth_connect_mac()
4667 if (err == -EPROBE_DEFER) in dpaa2_eth_connect_mac()
4668 netdev_dbg(priv->net_dev, in dpaa2_eth_connect_mac()
4671 netdev_err(priv->net_dev, in dpaa2_eth_connect_mac()
4678 mutex_lock(&priv->mac_lock); in dpaa2_eth_connect_mac()
4679 priv->mac = mac; in dpaa2_eth_connect_mac()
4680 mutex_unlock(&priv->mac_lock); in dpaa2_eth_connect_mac()
4695 mutex_lock(&priv->mac_lock); in dpaa2_eth_disconnect_mac()
4696 mac = priv->mac; in dpaa2_eth_disconnect_mac()
4697 priv->mac = NULL; in dpaa2_eth_disconnect_mac()
4698 mutex_unlock(&priv->mac_lock); in dpaa2_eth_disconnect_mac()
4720 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, in dpni_irq0_handler_thread()
4735 * handler is the only one who changes priv->mac at runtime, in dpni_irq0_handler_thread()
4738 had_mac = !!priv->mac; in dpni_irq0_handler_thread()
4755 dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); in dpaa2_eth_setup_irqs()
4759 irq = ls_dev->irqs[0]; in dpaa2_eth_setup_irqs()
4760 err = devm_request_threaded_irq(&ls_dev->dev, irq->virq, in dpaa2_eth_setup_irqs()
4763 dev_name(&ls_dev->dev), &ls_dev->dev); in dpaa2_eth_setup_irqs()
4765 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); in dpaa2_eth_setup_irqs()
4769 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, in dpaa2_eth_setup_irqs()
4773 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); in dpaa2_eth_setup_irqs()
4777 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, in dpaa2_eth_setup_irqs()
4780 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); in dpaa2_eth_setup_irqs()
4787 devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev); in dpaa2_eth_setup_irqs()
4799 for (i = 0; i < priv->num_channels; i++) { in dpaa2_eth_add_ch_napi()
4800 ch = priv->channel[i]; in dpaa2_eth_add_ch_napi()
4802 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll); in dpaa2_eth_add_ch_napi()
4811 for (i = 0; i < priv->num_channels; i++) { in dpaa2_eth_del_ch_napi()
4812 ch = priv->channel[i]; in dpaa2_eth_del_ch_napi()
4813 netif_napi_del(&ch->napi); in dpaa2_eth_del_ch_napi()
4824 dev = &dpni_dev->dev; in dpaa2_eth_probe()
4830 return -ENOMEM; in dpaa2_eth_probe()
4837 priv->net_dev = net_dev; in dpaa2_eth_probe()
4838 SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port); in dpaa2_eth_probe()
4840 mutex_init(&priv->mac_lock); in dpaa2_eth_probe()
4842 priv->iommu_domain = iommu_get_domain_for_dev(dev); in dpaa2_eth_probe()
4844 priv->tx_tstamp_type = HWTSTAMP_TX_OFF; in dpaa2_eth_probe()
4845 priv->rx_tstamp = false; in dpaa2_eth_probe()
4847 priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0); in dpaa2_eth_probe()
4848 if (!priv->dpaa2_ptp_wq) { in dpaa2_eth_probe()
4849 err = -ENOMEM; in dpaa2_eth_probe()
4853 INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp); in dpaa2_eth_probe()
4854 mutex_init(&priv->onestep_tstamp_lock); in dpaa2_eth_probe()
4855 skb_queue_head_init(&priv->tx_skbs); in dpaa2_eth_probe()
4857 priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK; in dpaa2_eth_probe()
4861 &priv->mc_io); in dpaa2_eth_probe()
4863 if (err == -ENXIO) { in dpaa2_eth_probe()
4865 err = -EPROBE_DEFER; in dpaa2_eth_probe()
4895 priv->percpu_stats = alloc_percpu(*priv->percpu_stats); in dpaa2_eth_probe()
4896 if (!priv->percpu_stats) { in dpaa2_eth_probe()
4898 err = -ENOMEM; in dpaa2_eth_probe()
4901 priv->percpu_extras = alloc_percpu(*priv->percpu_extras); in dpaa2_eth_probe()
4902 if (!priv->percpu_extras) { in dpaa2_eth_probe()
4904 err = -ENOMEM; in dpaa2_eth_probe()
4908 priv->sgt_cache = alloc_percpu(*priv->sgt_cache); in dpaa2_eth_probe()
4909 if (!priv->sgt_cache) { in dpaa2_eth_probe()
4911 err = -ENOMEM; in dpaa2_eth_probe()
4915 priv->fd = alloc_percpu(*priv->fd); in dpaa2_eth_probe()
4916 if (!priv->fd) { in dpaa2_eth_probe()
4918 err = -ENOMEM; in dpaa2_eth_probe()
4927 err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); in dpaa2_eth_probe()
4932 !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); in dpaa2_eth_probe()
4941 if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) { in dpaa2_eth_probe()
4942 priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; in dpaa2_eth_probe()
4943 net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops; in dpaa2_eth_probe()
4956 priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv, in dpaa2_eth_probe()
4957 "%s_poll_link", net_dev->name); in dpaa2_eth_probe()
4958 if (IS_ERR(priv->poll_thread)) { in dpaa2_eth_probe()
4962 priv->do_link_poll = true; in dpaa2_eth_probe()
4977 net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN; in dpaa2_eth_probe()
4990 dev_info(dev, "Probed interface %s\n", net_dev->name); in dpaa2_eth_probe()
5000 if (priv->do_link_poll) in dpaa2_eth_probe()
5001 kthread_stop(priv->poll_thread); in dpaa2_eth_probe()
5011 free_percpu(priv->fd); in dpaa2_eth_probe()
5013 free_percpu(priv->sgt_cache); in dpaa2_eth_probe()
5015 free_percpu(priv->percpu_extras); in dpaa2_eth_probe()
5017 free_percpu(priv->percpu_stats); in dpaa2_eth_probe()
5027 fsl_mc_portal_free(priv->mc_io); in dpaa2_eth_probe()
5029 destroy_workqueue(priv->dpaa2_ptp_wq); in dpaa2_eth_probe()
5043 dev = &ls_dev->dev; in dpaa2_eth_remove()
5059 if (priv->do_link_poll) in dpaa2_eth_remove()
5060 kthread_stop(priv->poll_thread); in dpaa2_eth_remove()
5066 free_percpu(priv->fd); in dpaa2_eth_remove()
5067 free_percpu(priv->sgt_cache); in dpaa2_eth_remove()
5068 free_percpu(priv->percpu_stats); in dpaa2_eth_remove()
5069 free_percpu(priv->percpu_extras); in dpaa2_eth_remove()
5075 if (priv->onestep_reg_base) in dpaa2_eth_remove()
5076 iounmap(priv->onestep_reg_base); in dpaa2_eth_remove()
5078 fsl_mc_portal_free(priv->mc_io); in dpaa2_eth_remove()
5080 destroy_workqueue(priv->dpaa2_ptp_wq); in dpaa2_eth_remove()
5082 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); in dpaa2_eth_remove()