Lines Matching +full:synquacer +full:- +full:netsec

1 // SPDX-License-Identifier: GPL-2.0+
247 #define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
312 struct netsec_de { /* Netsec Descriptor layout */
333 writel(val, priv->ioaddr + reg_addr); in netsec_write()
338 return readl(priv->ioaddr + reg_addr); in netsec_read()
366 while (--timeout && netsec_read(priv, addr) & mask) in netsec_wait_while_busy()
372 while (--timeout && netsec_read(priv, addr) & mask) in netsec_wait_while_busy()
378 netdev_WARN(priv->ndev, "%s: timeout\n", __func__); in netsec_wait_while_busy()
380 return -ETIMEDOUT; in netsec_wait_while_busy()
417 } while (--timeout && (data & mask)); in netsec_mac_wait_while_busy()
430 } while (--timeout && (data & mask)); in netsec_mac_wait_while_busy()
435 netdev_WARN(priv->ndev, "%s: timeout\n", __func__); in netsec_mac_wait_while_busy()
437 return -ETIMEDOUT; in netsec_mac_wait_while_busy()
442 struct phy_device *phydev = priv->ndev->phydev; in netsec_mac_update_to_phy_state()
445 value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON : in netsec_mac_update_to_phy_state()
448 if (phydev->speed != SPEED_1000) in netsec_mac_update_to_phy_state()
451 if (priv->phy_interface != PHY_INTERFACE_MODE_GMII && in netsec_mac_update_to_phy_state()
452 phydev->speed == SPEED_100) in netsec_mac_update_to_phy_state()
457 if (phy_interface_mode_is_rgmii(priv->phy_interface)) in netsec_mac_update_to_phy_state()
461 return -ETIMEDOUT; in netsec_mac_update_to_phy_state()
472 struct netsec_priv *priv = bus->priv; in netsec_phy_write()
475 return -ETIMEDOUT; in netsec_phy_write()
480 (netsec_clk_type(priv->freq) << in netsec_phy_write()
482 return -ETIMEDOUT; in netsec_phy_write()
491 * To meet this requirement, netsec driver needs to issue dummy in netsec_phy_write()
501 struct netsec_priv *priv = bus->priv; in netsec_phy_read()
508 (netsec_clk_type(priv->freq) << in netsec_phy_read()
510 return -ETIMEDOUT; in netsec_phy_read()
529 strscpy(info->driver, "netsec", sizeof(info->driver)); in netsec_et_get_drvinfo()
530 strscpy(info->bus_info, dev_name(net_device->dev.parent), in netsec_et_get_drvinfo()
531 sizeof(info->bus_info)); in netsec_et_get_drvinfo()
541 *et_coalesce = priv->et_coalesce; in netsec_et_get_coalesce()
553 priv->et_coalesce = *et_coalesce; in netsec_et_set_coalesce()
555 if (priv->et_coalesce.tx_coalesce_usecs < 50) in netsec_et_set_coalesce()
556 priv->et_coalesce.tx_coalesce_usecs = 50; in netsec_et_set_coalesce()
557 if (priv->et_coalesce.tx_max_coalesced_frames < 1) in netsec_et_set_coalesce()
558 priv->et_coalesce.tx_max_coalesced_frames = 1; in netsec_et_set_coalesce()
561 priv->et_coalesce.tx_max_coalesced_frames); in netsec_et_set_coalesce()
563 priv->et_coalesce.tx_coalesce_usecs); in netsec_et_set_coalesce()
567 if (priv->et_coalesce.rx_coalesce_usecs < 50) in netsec_et_set_coalesce()
568 priv->et_coalesce.rx_coalesce_usecs = 50; in netsec_et_set_coalesce()
569 if (priv->et_coalesce.rx_max_coalesced_frames < 1) in netsec_et_set_coalesce()
570 priv->et_coalesce.rx_max_coalesced_frames = 1; in netsec_et_set_coalesce()
573 priv->et_coalesce.rx_max_coalesced_frames); in netsec_et_set_coalesce()
575 priv->et_coalesce.rx_coalesce_usecs); in netsec_et_set_coalesce()
586 return priv->msg_enable; in netsec_et_get_msglevel()
593 priv->msg_enable = datum; in netsec_et_set_msglevel()
616 struct netsec_de *de = dring->vaddr + DESC_SZ * idx; in netsec_set_rx_de()
621 if (idx == DESC_NUM - 1) in netsec_set_rx_de()
624 de->data_buf_addr_up = upper_32_bits(desc->dma_addr); in netsec_set_rx_de()
625 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); in netsec_set_rx_de()
626 de->buf_len_info = desc->len; in netsec_set_rx_de()
627 de->attr = attr; in netsec_set_rx_de()
630 dring->desc[idx].dma_addr = desc->dma_addr; in netsec_set_rx_de()
631 dring->desc[idx].addr = desc->addr; in netsec_set_rx_de()
632 dring->desc[idx].len = desc->len; in netsec_set_rx_de()
637 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_clean_tx_dring()
640 int tail = dring->tail; in netsec_clean_tx_dring()
644 spin_lock(&dring->lock); in netsec_clean_tx_dring()
648 entry = dring->vaddr + DESC_SZ * tail; in netsec_clean_tx_dring()
652 while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) && in netsec_clean_tx_dring()
657 desc = &dring->desc[tail]; in netsec_clean_tx_dring()
658 eop = (entry->attr >> NETSEC_TX_LAST) & 1; in netsec_clean_tx_dring()
664 if (desc->buf_type != TYPE_NETSEC_XDP_TX) in netsec_clean_tx_dring()
665 dma_unmap_single(priv->dev, desc->dma_addr, desc->len, in netsec_clean_tx_dring()
671 if (desc->buf_type == TYPE_NETSEC_SKB) { in netsec_clean_tx_dring()
672 bytes += desc->skb->len; in netsec_clean_tx_dring()
673 dev_kfree_skb(desc->skb); in netsec_clean_tx_dring()
675 bytes += desc->xdpf->len; in netsec_clean_tx_dring()
676 if (desc->buf_type == TYPE_NETSEC_XDP_TX) in netsec_clean_tx_dring()
677 xdp_return_frame_rx_napi(desc->xdpf); in netsec_clean_tx_dring()
679 xdp_return_frame_bulk(desc->xdpf, &bq); in netsec_clean_tx_dring()
687 /* entry->attr is not going to be accessed by the NIC until in netsec_clean_tx_dring()
690 entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; in netsec_clean_tx_dring()
692 dring->tail = (tail + 1) % DESC_NUM; in netsec_clean_tx_dring()
694 tail = dring->tail; in netsec_clean_tx_dring()
695 entry = dring->vaddr + DESC_SZ * tail; in netsec_clean_tx_dring()
702 spin_unlock(&dring->lock); in netsec_clean_tx_dring()
710 priv->ndev->stats.tx_packets += cnt; in netsec_clean_tx_dring()
711 priv->ndev->stats.tx_bytes += bytes; in netsec_clean_tx_dring()
713 netdev_completed_queue(priv->ndev, cnt, bytes); in netsec_clean_tx_dring()
720 struct net_device *ndev = priv->ndev; in netsec_process_tx()
739 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; in netsec_alloc_rx_data()
742 page = page_pool_dev_alloc_pages(dring->page_pool); in netsec_alloc_rx_data()
746 /* We allocate the same buffer length for XDP and non-XDP cases. in netsec_alloc_rx_data()
751 /* Make sure the incoming payload fits in the page for XDP and non-XDP in netsec_alloc_rx_data()
761 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; in netsec_rx_fill()
765 netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]); in netsec_rx_fill()
769 num--; in netsec_rx_fill()
794 int idx = dring->head; in netsec_set_tx_de()
798 de = dring->vaddr + (DESC_SZ * idx); in netsec_set_tx_de()
805 (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) | in netsec_set_tx_de()
806 (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) | in netsec_set_tx_de()
808 if (idx == DESC_NUM - 1) in netsec_set_tx_de()
811 de->data_buf_addr_up = upper_32_bits(desc->dma_addr); in netsec_set_tx_de()
812 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); in netsec_set_tx_de()
813 de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len; in netsec_set_tx_de()
814 de->attr = attr; in netsec_set_tx_de()
816 dring->desc[idx] = *desc; in netsec_set_tx_de()
817 if (desc->buf_type == TYPE_NETSEC_SKB) in netsec_set_tx_de()
818 dring->desc[idx].skb = buf; in netsec_set_tx_de()
819 else if (desc->buf_type == TYPE_NETSEC_XDP_TX || in netsec_set_tx_de()
820 desc->buf_type == TYPE_NETSEC_XDP_NDO) in netsec_set_tx_de()
821 dring->desc[idx].xdpf = buf; in netsec_set_tx_de()
824 dring->head = (dring->head + 1) % DESC_NUM; in netsec_set_tx_de()
832 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_xdp_queue_one()
833 struct page *page = virt_to_page(xdpf->data); in netsec_xdp_queue_one()
839 if (tx_ring->head >= tx_ring->tail) in netsec_xdp_queue_one()
840 filled = tx_ring->head - tx_ring->tail; in netsec_xdp_queue_one()
842 filled = tx_ring->head + DESC_NUM - tx_ring->tail; in netsec_xdp_queue_one()
844 if (DESC_NUM - filled <= 1) in netsec_xdp_queue_one()
851 dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len, in netsec_xdp_queue_one()
853 if (dma_mapping_error(priv->dev, dma_handle)) in netsec_xdp_queue_one()
861 &priv->desc_ring[NETSEC_RING_RX]; in netsec_xdp_queue_one()
863 page_pool_get_dma_dir(rx_ring->page_pool); in netsec_xdp_queue_one()
865 dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom + in netsec_xdp_queue_one()
867 dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len, in netsec_xdp_queue_one()
873 tx_desc.addr = xdpf->data; in netsec_xdp_queue_one()
874 tx_desc.len = xdpf->len; in netsec_xdp_queue_one()
876 netdev_sent_queue(priv->ndev, xdpf->len); in netsec_xdp_queue_one()
884 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_xdp_xmit_back()
891 spin_lock(&tx_ring->lock); in netsec_xdp_xmit_back()
893 spin_unlock(&tx_ring->lock); in netsec_xdp_xmit_back()
901 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; in netsec_run_xdp()
902 unsigned int sync, len = xdp->data_end - xdp->data; in netsec_run_xdp()
911 sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM; in netsec_run_xdp()
921 page = virt_to_head_page(xdp->data); in netsec_run_xdp()
922 page_pool_put_page(dring->page_pool, page, sync, true); in netsec_run_xdp()
926 err = xdp_do_redirect(priv->ndev, xdp, prog); in netsec_run_xdp()
931 page = virt_to_head_page(xdp->data); in netsec_run_xdp()
932 page_pool_put_page(dring->page_pool, page, sync, true); in netsec_run_xdp()
936 bpf_warn_invalid_xdp_action(priv->ndev, prog, act); in netsec_run_xdp()
939 trace_xdp_exception(priv->ndev, prog, act); in netsec_run_xdp()
943 page = virt_to_head_page(xdp->data); in netsec_run_xdp()
944 page_pool_put_page(dring->page_pool, page, sync, true); in netsec_run_xdp()
953 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; in netsec_process_rx()
954 struct net_device *ndev = priv->ndev; in netsec_process_rx()
963 xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq); in netsec_process_rx()
965 xdp_prog = READ_ONCE(priv->xdp_prog); in netsec_process_rx()
966 dma_dir = page_pool_get_dma_dir(dring->page_pool); in netsec_process_rx()
969 u16 idx = dring->tail; in netsec_process_rx()
970 struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); in netsec_process_rx()
971 struct netsec_desc *desc = &dring->desc[idx]; in netsec_process_rx()
972 struct page *page = virt_to_page(desc->addr); in netsec_process_rx()
979 if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { in netsec_process_rx()
992 pkt_len = de->buf_len_info >> 16; in netsec_process_rx()
993 rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) & in netsec_process_rx()
995 rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1; in netsec_process_rx()
997 netif_err(priv, drv, priv->ndev, in netsec_process_rx()
1000 ndev->stats.rx_dropped++; in netsec_process_rx()
1001 dring->tail = (dring->tail + 1) % DESC_NUM; in netsec_process_rx()
1007 (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3; in netsec_process_rx()
1017 dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len, in netsec_process_rx()
1019 prefetch(desc->addr); in netsec_process_rx()
1021 xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM, in netsec_process_rx()
1033 skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA); in netsec_process_rx()
1041 page_pool_put_page(dring->page_pool, page, pkt_len, in netsec_process_rx()
1043 netif_err(priv, drv, priv->ndev, in netsec_process_rx()
1049 skb_reserve(skb, xdp.data - xdp.data_hard_start); in netsec_process_rx()
1050 skb_put(skb, xdp.data_end - xdp.data); in netsec_process_rx()
1051 skb->protocol = eth_type_trans(skb, priv->ndev); in netsec_process_rx()
1053 if (priv->rx_cksum_offload_flag && in netsec_process_rx()
1055 skb->ip_summed = CHECKSUM_UNNECESSARY; in netsec_process_rx()
1059 napi_gro_receive(&priv->napi, skb); in netsec_process_rx()
1061 ndev->stats.rx_packets++; in netsec_process_rx()
1062 ndev->stats.rx_bytes += xdp.data_end - xdp.data; in netsec_process_rx()
1066 desc->len = desc_len; in netsec_process_rx()
1067 desc->dma_addr = dma_handle; in netsec_process_rx()
1068 desc->addr = buf_addr; in netsec_process_rx()
1071 dring->tail = (dring->tail + 1) % DESC_NUM; in netsec_process_rx()
1091 spin_lock_irqsave(&priv->reglock, flags); in netsec_napi_poll()
1094 spin_unlock_irqrestore(&priv->reglock, flags); in netsec_napi_poll()
1105 if (dring->head >= dring->tail) in netsec_desc_used()
1106 used = dring->head - dring->tail; in netsec_desc_used()
1108 used = dring->head + DESC_NUM - dring->tail; in netsec_desc_used()
1115 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_check_stop_tx()
1118 if (DESC_NUM - used < 2) { in netsec_check_stop_tx()
1119 netif_stop_queue(priv->ndev); in netsec_check_stop_tx()
1127 if (DESC_NUM - used < 2) in netsec_check_stop_tx()
1130 netif_wake_queue(priv->ndev); in netsec_check_stop_tx()
1140 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_netdev_start_xmit()
1146 spin_lock_bh(&dring->lock); in netsec_netdev_start_xmit()
1149 spin_unlock_bh(&dring->lock); in netsec_netdev_start_xmit()
1151 dev_name(priv->dev), ndev->name); in netsec_netdev_start_xmit()
1155 if (skb->ip_summed == CHECKSUM_PARTIAL) in netsec_netdev_start_xmit()
1159 tso_seg_len = skb_shinfo(skb)->gso_size; in netsec_netdev_start_xmit()
1162 if (skb->protocol == htons(ETH_P_IP)) { in netsec_netdev_start_xmit()
1163 ip_hdr(skb)->tot_len = 0; in netsec_netdev_start_xmit()
1164 tcp_hdr(skb)->check = in netsec_netdev_start_xmit()
1165 ~tcp_v4_check(0, ip_hdr(skb)->saddr, in netsec_netdev_start_xmit()
1166 ip_hdr(skb)->daddr, 0); in netsec_netdev_start_xmit()
1175 tx_desc.dma_addr = dma_map_single(priv->dev, skb->data, in netsec_netdev_start_xmit()
1177 if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) { in netsec_netdev_start_xmit()
1178 spin_unlock_bh(&dring->lock); in netsec_netdev_start_xmit()
1179 netif_err(priv, drv, priv->ndev, in netsec_netdev_start_xmit()
1181 ndev->stats.tx_dropped++; in netsec_netdev_start_xmit()
1185 tx_desc.addr = skb->data; in netsec_netdev_start_xmit()
1190 netdev_sent_queue(priv->ndev, skb->len); in netsec_netdev_start_xmit()
1193 spin_unlock_bh(&dring->lock); in netsec_netdev_start_xmit()
1201 struct netsec_desc_ring *dring = &priv->desc_ring[id]; in netsec_uninit_pkt_dring()
1205 if (!dring->vaddr || !dring->desc) in netsec_uninit_pkt_dring()
1208 desc = &dring->desc[idx]; in netsec_uninit_pkt_dring()
1209 if (!desc->addr) in netsec_uninit_pkt_dring()
1213 struct page *page = virt_to_page(desc->addr); in netsec_uninit_pkt_dring()
1215 page_pool_put_full_page(dring->page_pool, page, false); in netsec_uninit_pkt_dring()
1217 dma_unmap_single(priv->dev, desc->dma_addr, desc->len, in netsec_uninit_pkt_dring()
1219 dev_kfree_skb(desc->skb); in netsec_uninit_pkt_dring()
1225 if (xdp_rxq_info_is_reg(&dring->xdp_rxq)) in netsec_uninit_pkt_dring()
1226 xdp_rxq_info_unreg(&dring->xdp_rxq); in netsec_uninit_pkt_dring()
1227 page_pool_destroy(dring->page_pool); in netsec_uninit_pkt_dring()
1230 memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM); in netsec_uninit_pkt_dring()
1231 memset(dring->vaddr, 0, DESC_SZ * DESC_NUM); in netsec_uninit_pkt_dring()
1233 dring->head = 0; in netsec_uninit_pkt_dring()
1234 dring->tail = 0; in netsec_uninit_pkt_dring()
1237 netdev_reset_queue(priv->ndev); in netsec_uninit_pkt_dring()
1242 struct netsec_desc_ring *dring = &priv->desc_ring[id]; in netsec_free_dring()
1244 if (dring->vaddr) { in netsec_free_dring()
1245 dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM, in netsec_free_dring()
1246 dring->vaddr, dring->desc_dma); in netsec_free_dring()
1247 dring->vaddr = NULL; in netsec_free_dring()
1250 kfree(dring->desc); in netsec_free_dring()
1251 dring->desc = NULL; in netsec_free_dring()
1256 struct netsec_desc_ring *dring = &priv->desc_ring[id]; in netsec_alloc_dring()
1258 dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM, in netsec_alloc_dring()
1259 &dring->desc_dma, GFP_KERNEL); in netsec_alloc_dring()
1260 if (!dring->vaddr) in netsec_alloc_dring()
1263 dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL); in netsec_alloc_dring()
1264 if (!dring->desc) in netsec_alloc_dring()
1271 return -ENOMEM; in netsec_alloc_dring()
1276 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_setup_tx_dring()
1282 de = dring->vaddr + (DESC_SZ * i); in netsec_setup_tx_dring()
1283 /* de->attr is not going to be accessed by the NIC in netsec_setup_tx_dring()
1287 de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; in netsec_setup_tx_dring()
1293 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; in netsec_setup_rx_dring()
1294 struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog); in netsec_setup_rx_dring()
1301 .dev = priv->dev, in netsec_setup_rx_dring()
1305 .napi = &priv->napi, in netsec_setup_rx_dring()
1306 .netdev = priv->ndev, in netsec_setup_rx_dring()
1310 dring->page_pool = page_pool_create(&pp_params); in netsec_setup_rx_dring()
1311 if (IS_ERR(dring->page_pool)) { in netsec_setup_rx_dring()
1312 err = PTR_ERR(dring->page_pool); in netsec_setup_rx_dring()
1313 dring->page_pool = NULL; in netsec_setup_rx_dring()
1317 err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0, priv->napi.napi_id); in netsec_setup_rx_dring()
1321 err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL, in netsec_setup_rx_dring()
1322 dring->page_pool); in netsec_setup_rx_dring()
1327 struct netsec_desc *desc = &dring->desc[i]; in netsec_setup_rx_dring()
1335 err = -ENOMEM; in netsec_setup_rx_dring()
1338 desc->dma_addr = dma_handle; in netsec_setup_rx_dring()
1339 desc->addr = buf; in netsec_setup_rx_dring()
1340 desc->len = len; in netsec_setup_rx_dring()
1361 return -ENOMEM; in netsec_netdev_load_ucode_region()
1375 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H); in netsec_netdev_load_microcode()
1376 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L); in netsec_netdev_load_microcode()
1377 size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE); in netsec_netdev_load_microcode()
1383 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H); in netsec_netdev_load_microcode()
1384 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L); in netsec_netdev_load_microcode()
1385 size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE); in netsec_netdev_load_microcode()
1392 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS); in netsec_netdev_load_microcode()
1393 size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE); in netsec_netdev_load_microcode()
1433 upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); in netsec_reset_hardware()
1435 lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); in netsec_reset_hardware()
1438 upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); in netsec_reset_hardware()
1440 lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); in netsec_reset_hardware()
1451 netif_err(priv, probe, priv->ndev, in netsec_reset_hardware()
1459 netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1); in netsec_reset_hardware()
1466 netif_err(priv, probe, priv->ndev, in netsec_reset_hardware()
1468 return -ENXIO; in netsec_reset_hardware()
1474 if (priv->ndev->mtu > ETH_DATA_LEN) in netsec_reset_hardware()
1496 struct phy_device *phydev = priv->ndev->phydev; in netsec_start_gmac()
1500 if (phydev->speed != SPEED_1000) in netsec_start_gmac()
1505 return -ETIMEDOUT; in netsec_start_gmac()
1508 return -ETIMEDOUT; in netsec_start_gmac()
1517 return -EAGAIN; in netsec_start_gmac()
1521 return -ETIMEDOUT; in netsec_start_gmac()
1525 return -ETIMEDOUT; in netsec_start_gmac()
1529 return -ETIMEDOUT; in netsec_start_gmac()
1532 return -ETIMEDOUT; in netsec_start_gmac()
1535 return -ETIMEDOUT; in netsec_start_gmac()
1537 return -ETIMEDOUT; in netsec_start_gmac()
1553 netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce, NULL, NULL); in netsec_start_gmac()
1556 return -ETIMEDOUT; in netsec_start_gmac()
1583 if (ndev->phydev->link) in netsec_phy_adjust_link()
1588 phy_print_status(ndev->phydev); in netsec_phy_adjust_link()
1607 spin_lock_irqsave(&priv->reglock, flags); in netsec_irq_handler()
1609 spin_unlock_irqrestore(&priv->reglock, flags); in netsec_irq_handler()
1611 napi_schedule(&priv->napi); in netsec_irq_handler()
1621 pm_runtime_get_sync(priv->dev); in netsec_netdev_open()
1626 netif_err(priv, probe, priv->ndev, in netsec_netdev_open()
1631 ret = request_irq(priv->ndev->irq, netsec_irq_handler, in netsec_netdev_open()
1632 IRQF_SHARED, "netsec", priv); in netsec_netdev_open()
1634 netif_err(priv, drv, priv->ndev, "request_irq failed\n"); in netsec_netdev_open()
1638 if (dev_of_node(priv->dev)) { in netsec_netdev_open()
1639 if (!of_phy_connect(priv->ndev, priv->phy_np, in netsec_netdev_open()
1641 priv->phy_interface)) { in netsec_netdev_open()
1642 netif_err(priv, link, priv->ndev, "missing PHY\n"); in netsec_netdev_open()
1643 ret = -ENODEV; in netsec_netdev_open()
1647 ret = phy_connect_direct(priv->ndev, priv->phydev, in netsec_netdev_open()
1649 priv->phy_interface); in netsec_netdev_open()
1651 netif_err(priv, link, priv->ndev, in netsec_netdev_open()
1657 phy_start(ndev->phydev); in netsec_netdev_open()
1660 napi_enable(&priv->napi); in netsec_netdev_open()
1668 free_irq(priv->ndev->irq, priv); in netsec_netdev_open()
1672 pm_runtime_put_sync(priv->dev); in netsec_netdev_open()
1681 netif_stop_queue(priv->ndev); in netsec_netdev_stop()
1684 napi_disable(&priv->napi); in netsec_netdev_stop()
1689 free_irq(priv->ndev->irq, priv); in netsec_netdev_stop()
1694 phy_stop(ndev->phydev); in netsec_netdev_stop()
1695 phy_disconnect(ndev->phydev); in netsec_netdev_stop()
1699 pm_runtime_put_sync(priv->dev); in netsec_netdev_stop()
1721 data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR); in netsec_netdev_init()
1722 netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, in netsec_netdev_init()
1730 netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); in netsec_netdev_init()
1732 spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock); in netsec_netdev_init()
1733 spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock); in netsec_netdev_init()
1756 priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM); in netsec_netdev_set_features()
1765 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_xdp_xmit()
1769 return -EINVAL; in netsec_xdp_xmit()
1771 spin_lock(&tx_ring->lock); in netsec_xdp_xmit()
1780 tx_ring->xdp_xmit++; in netsec_xdp_xmit()
1783 spin_unlock(&tx_ring->lock); in netsec_xdp_xmit()
1786 netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit); in netsec_xdp_xmit()
1787 tx_ring->xdp_xmit = 0; in netsec_xdp_xmit()
1796 struct net_device *dev = priv->ndev; in netsec_xdp_setup()
1800 if (prog && dev->mtu > 1500) { in netsec_xdp_setup()
1802 return -EOPNOTSUPP; in netsec_xdp_setup()
1809 old_prog = xchg(&priv->xdp_prog, prog); in netsec_xdp_setup()
1823 switch (xdp->command) { in netsec_xdp()
1825 return netsec_xdp_setup(priv, xdp->prog, xdp->extack); in netsec_xdp()
1827 return -EINVAL; in netsec_xdp()
1850 err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface); in netsec_of_probe()
1852 dev_err(&pdev->dev, "missing required property 'phy-mode'\n"); in netsec_of_probe()
1857 * SynQuacer is physically configured with TX and RX delays in netsec_of_probe()
1861 if (of_machine_is_compatible("socionext,developer-box") && in netsec_of_probe()
1862 priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) { in netsec_of_probe()
1863 dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n"); in netsec_of_probe()
1864 priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; in netsec_of_probe()
1867 priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); in netsec_of_probe()
1868 if (!priv->phy_np) { in netsec_of_probe()
1869 dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); in netsec_of_probe()
1870 return -EINVAL; in netsec_of_probe()
1873 *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np); in netsec_of_probe()
1875 priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ in netsec_of_probe()
1876 if (IS_ERR(priv->clk)) in netsec_of_probe()
1877 return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), in netsec_of_probe()
1879 priv->freq = clk_get_rate(priv->clk); in netsec_of_probe()
1890 return -ENODEV; in netsec_acpi_probe()
1895 * PHY correctly but passes the wrong mode string in the phy-mode in netsec_acpi_probe()
1898 priv->phy_interface = PHY_INTERFACE_MODE_NA; in netsec_acpi_probe()
1900 ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); in netsec_acpi_probe()
1902 return dev_err_probe(&pdev->dev, ret, in netsec_acpi_probe()
1903 "missing required property 'phy-channel'\n"); in netsec_acpi_probe()
1905 ret = device_property_read_u32(&pdev->dev, in netsec_acpi_probe()
1906 "socionext,phy-clock-frequency", in netsec_acpi_probe()
1907 &priv->freq); in netsec_acpi_probe()
1909 return dev_err_probe(&pdev->dev, ret, in netsec_acpi_probe()
1910 "missing required property 'socionext,phy-clock-frequency'\n"); in netsec_acpi_probe()
1916 struct phy_device *phydev = priv->phydev; in netsec_unregister_mdio()
1918 if (!dev_of_node(priv->dev) && phydev) { in netsec_unregister_mdio()
1923 mdiobus_unregister(priv->mii_bus); in netsec_unregister_mdio()
1931 bus = devm_mdiobus_alloc(priv->dev); in netsec_register_mdio()
1933 return -ENOMEM; in netsec_register_mdio()
1935 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev)); in netsec_register_mdio()
1936 bus->priv = priv; in netsec_register_mdio()
1937 bus->name = "SNI NETSEC MDIO"; in netsec_register_mdio()
1938 bus->read = netsec_phy_read; in netsec_register_mdio()
1939 bus->write = netsec_phy_write; in netsec_register_mdio()
1940 bus->parent = priv->dev; in netsec_register_mdio()
1941 priv->mii_bus = bus; in netsec_register_mdio()
1943 if (dev_of_node(priv->dev)) { in netsec_register_mdio()
1944 struct device_node *mdio_node, *parent = dev_of_node(priv->dev); in netsec_register_mdio()
1953 dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n"); in netsec_register_mdio()
1960 dev_err(priv->dev, "mdiobus register err(%d)\n", ret); in netsec_register_mdio()
1965 bus->phy_mask = ~0; in netsec_register_mdio()
1968 dev_err(priv->dev, "mdiobus register err(%d)\n", ret); in netsec_register_mdio()
1972 priv->phydev = get_phy_device(bus, phy_addr, false); in netsec_register_mdio()
1973 if (IS_ERR(priv->phydev)) { in netsec_register_mdio()
1974 ret = PTR_ERR(priv->phydev); in netsec_register_mdio()
1975 dev_err(priv->dev, "get_phy_device err(%d)\n", ret); in netsec_register_mdio()
1976 priv->phydev = NULL; in netsec_register_mdio()
1978 return -ENODEV; in netsec_register_mdio()
1981 ret = phy_device_register(priv->phydev); in netsec_register_mdio()
1983 phy_device_free(priv->phydev); in netsec_register_mdio()
1985 dev_err(priv->dev, in netsec_register_mdio()
2004 dev_err(&pdev->dev, "No MMIO resource found.\n"); in netsec_probe()
2005 return -ENODEV; in netsec_probe()
2010 dev_info(&pdev->dev, "No EEPROM resource found.\n"); in netsec_probe()
2011 return -ENODEV; in netsec_probe()
2020 return -ENOMEM; in netsec_probe()
2024 spin_lock_init(&priv->reglock); in netsec_probe()
2025 SET_NETDEV_DEV(ndev, &pdev->dev); in netsec_probe()
2027 ndev->irq = irq; in netsec_probe()
2028 priv->dev = &pdev->dev; in netsec_probe()
2029 priv->ndev = ndev; in netsec_probe()
2031 priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | in netsec_probe()
2034 priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start, in netsec_probe()
2036 if (!priv->ioaddr) { in netsec_probe()
2037 dev_err(&pdev->dev, "devm_ioremap() failed\n"); in netsec_probe()
2038 ret = -ENXIO; in netsec_probe()
2042 priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start, in netsec_probe()
2044 if (!priv->eeprom_base) { in netsec_probe()
2045 dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n"); in netsec_probe()
2046 ret = -ENXIO; in netsec_probe()
2050 ret = device_get_ethdev_address(&pdev->dev, ndev); in netsec_probe()
2051 if (ret && priv->eeprom_base) { in netsec_probe()
2052 void __iomem *macp = priv->eeprom_base + in netsec_probe()
2065 if (!is_valid_ether_addr(ndev->dev_addr)) { in netsec_probe()
2066 dev_warn(&pdev->dev, "No MAC address found, using random\n"); in netsec_probe()
2070 if (dev_of_node(&pdev->dev)) in netsec_probe()
2077 priv->phy_addr = phy_addr; in netsec_probe()
2079 if (!priv->freq) { in netsec_probe()
2080 dev_err(&pdev->dev, "missing PHY reference clock frequency\n"); in netsec_probe()
2081 ret = -ENODEV; in netsec_probe()
2086 priv->et_coalesce.rx_coalesce_usecs = 500; in netsec_probe()
2087 priv->et_coalesce.rx_max_coalesced_frames = 8; in netsec_probe()
2088 priv->et_coalesce.tx_coalesce_usecs = 500; in netsec_probe()
2089 priv->et_coalesce.tx_max_coalesced_frames = 8; in netsec_probe()
2091 ret = device_property_read_u32(&pdev->dev, "max-frame-size", in netsec_probe()
2092 &ndev->max_mtu); in netsec_probe()
2094 ndev->max_mtu = ETH_DATA_LEN; in netsec_probe()
2097 pm_runtime_enable(&pdev->dev); in netsec_probe()
2098 pm_runtime_get_sync(&pdev->dev); in netsec_probe()
2101 /* this driver only supports F_TAIKI style NETSEC */ in netsec_probe()
2104 ret = -ENODEV; in netsec_probe()
2108 dev_info(&pdev->dev, "hardware revision %d.%d\n", in netsec_probe()
2111 netif_napi_add(ndev, &priv->napi, netsec_napi_poll); in netsec_probe()
2113 ndev->netdev_ops = &netsec_netdev_ops; in netsec_probe()
2114 ndev->ethtool_ops = &netsec_ethtool_ops; in netsec_probe()
2116 ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO | in netsec_probe()
2118 ndev->hw_features = ndev->features; in netsec_probe()
2120 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in netsec_probe()
2123 priv->rx_cksum_offload_flag = true; in netsec_probe()
2129 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) in netsec_probe()
2130 dev_warn(&pdev->dev, "Failed to set DMA mask\n"); in netsec_probe()
2138 pm_runtime_put_sync(&pdev->dev); in netsec_probe()
2144 netif_napi_del(&priv->napi); in netsec_probe()
2146 pm_runtime_put_sync(&pdev->dev); in netsec_probe()
2147 pm_runtime_disable(&pdev->dev); in netsec_probe()
2150 dev_err(&pdev->dev, "init failed\n"); in netsec_probe()
2159 unregister_netdev(priv->ndev); in netsec_remove()
2163 netif_napi_del(&priv->napi); in netsec_remove()
2165 pm_runtime_disable(&pdev->dev); in netsec_remove()
2166 free_netdev(priv->ndev); in netsec_remove()
2176 clk_disable_unprepare(priv->clk); in netsec_runtime_suspend()
2185 clk_prepare_enable(priv->clk); in netsec_runtime_resume()
2199 { .compatible = "socionext,synquacer-netsec" },
2216 .name = "netsec",
2226 MODULE_DESCRIPTION("NETSEC Ethernet driver");