Lines Matching +full:no +full:- +full:pbl +full:- +full:x8
1 // SPDX-License-Identifier: GPL-2.0+
23 tn40_write_reg(priv, TN40_REG_IMR, priv->isr_mask); in tn40_enable_interrupts()
43 f->va = dma_alloc_coherent(&priv->pdev->dev, in tn40_fifo_alloc()
44 memsz + TN40_FIFO_EXTRA_SPACE, &f->da, in tn40_fifo_alloc()
46 if (!f->va) in tn40_fifo_alloc()
47 return -ENOMEM; in tn40_fifo_alloc()
49 f->reg_cfg0 = reg_cfg0; in tn40_fifo_alloc()
50 f->reg_cfg1 = reg_cfg1; in tn40_fifo_alloc()
51 f->reg_rptr = reg_rptr; in tn40_fifo_alloc()
52 f->reg_wptr = reg_wptr; in tn40_fifo_alloc()
53 f->rptr = 0; in tn40_fifo_alloc()
54 f->wptr = 0; in tn40_fifo_alloc()
55 f->memsz = memsz; in tn40_fifo_alloc()
56 f->size_mask = memsz - 1; in tn40_fifo_alloc()
57 cfg_base = lower_32_bits((f->da & TN40_TX_RX_CFG0_BASE) | fsz_type); in tn40_fifo_alloc()
59 tn40_write_reg(priv, reg_cfg1, upper_32_bits(f->da)); in tn40_fifo_alloc()
65 dma_free_coherent(&priv->pdev->dev, in tn40_fifo_free()
66 f->memsz + TN40_FIFO_EXTRA_SPACE, f->va, f->da); in tn40_fifo_free()
78 db->stack = (int *)(db + 1); in tn40_rxdb_alloc()
79 db->elems = (void *)(db->stack + nelem); in tn40_rxdb_alloc()
80 db->nelem = nelem; in tn40_rxdb_alloc()
81 db->top = nelem; in tn40_rxdb_alloc()
84 db->stack[i] = nelem - i - 1; in tn40_rxdb_alloc()
96 return db->stack[--db->top]; in tn40_rxdb_alloc_elem()
101 return db->elems + n; in tn40_rxdb_addr_elem()
106 return db->top; in tn40_rxdb_available()
111 db->stack[db->top++] = n; in tn40_rxdb_free_elem()
115 * tn40_create_rx_ring - Initialize RX all related HW and SW resources
124 * or generating interrupts. In this situation the host has no chance of
133 .dev = &priv->pdev->dev, in tn40_create_rx_ring()
134 .napi = &priv->napi, in tn40_create_rx_ring()
136 .netdev = priv->ndev, in tn40_create_rx_ring()
142 priv->page_pool = page_pool_create(&pp); in tn40_create_rx_ring()
143 if (IS_ERR(priv->page_pool)) in tn40_create_rx_ring()
144 return PTR_ERR(priv->page_pool); in tn40_create_rx_ring()
146 ret = tn40_fifo_alloc(priv, &priv->rxd_fifo0.m, priv->rxd_size, in tn40_create_rx_ring()
152 ret = tn40_fifo_alloc(priv, &priv->rxf_fifo0.m, priv->rxf_size, in tn40_create_rx_ring()
158 pkt_size = priv->ndev->mtu + VLAN_ETH_HLEN; in tn40_create_rx_ring()
159 priv->rxf_fifo0.m.pktsz = pkt_size; in tn40_create_rx_ring()
160 nr = priv->rxf_fifo0.m.memsz / sizeof(struct tn40_rxf_desc); in tn40_create_rx_ring()
161 priv->rxdb0 = tn40_rxdb_alloc(nr); in tn40_create_rx_ring()
162 if (!priv->rxdb0) { in tn40_create_rx_ring()
163 ret = -ENOMEM; in tn40_create_rx_ring()
168 tn40_fifo_free(priv, &priv->rxf_fifo0.m); in tn40_create_rx_ring()
170 tn40_fifo_free(priv, &priv->rxd_fifo0.m); in tn40_create_rx_ring()
172 page_pool_destroy(priv->page_pool); in tn40_create_rx_ring()
178 struct tn40_rxdb *db = priv->rxdb0; in tn40_rx_free_buffers()
182 netdev_dbg(priv->ndev, "total =%d free =%d busy =%d\n", db->nelem, in tn40_rx_free_buffers()
184 db->nelem - tn40_rxdb_available(db)); in tn40_rx_free_buffers()
186 for (i = 0; i < db->nelem; i++) { in tn40_rx_free_buffers()
188 if (dm->page) in tn40_rx_free_buffers()
189 page_pool_put_full_page(priv->page_pool, dm->page, in tn40_rx_free_buffers()
196 if (priv->rxdb0) { in tn40_destroy_rx_ring()
198 tn40_rxdb_free(priv->rxdb0); in tn40_destroy_rx_ring()
199 priv->rxdb0 = NULL; in tn40_destroy_rx_ring()
201 tn40_fifo_free(priv, &priv->rxf_fifo0.m); in tn40_destroy_rx_ring()
202 tn40_fifo_free(priv, &priv->rxd_fifo0.m); in tn40_destroy_rx_ring()
203 page_pool_destroy(priv->page_pool); in tn40_destroy_rx_ring()
208 struct tn40_rxf_fifo *f = &priv->rxf_fifo0; in tn40_set_rx_desc()
212 rxfd = (struct tn40_rxf_desc *)(f->m.va + f->m.wptr); in tn40_set_rx_desc()
213 rxfd->info = cpu_to_le32(0x10003); /* INFO =1 BC =3 */ in tn40_set_rx_desc()
214 rxfd->va_lo = cpu_to_le32(idx); in tn40_set_rx_desc()
215 rxfd->pa_lo = cpu_to_le32(lower_32_bits(dma)); in tn40_set_rx_desc()
216 rxfd->pa_hi = cpu_to_le32(upper_32_bits(dma)); in tn40_set_rx_desc()
217 rxfd->len = cpu_to_le32(f->m.pktsz); in tn40_set_rx_desc()
218 f->m.wptr += sizeof(struct tn40_rxf_desc); in tn40_set_rx_desc()
219 delta = f->m.wptr - f->m.memsz; in tn40_set_rx_desc()
221 f->m.wptr = delta; in tn40_set_rx_desc()
223 memcpy(f->m.va, f->m.va + f->m.memsz, delta); in tn40_set_rx_desc()
224 netdev_dbg(priv->ndev, in tn40_set_rx_desc()
231 * tn40_rx_alloc_buffers - Fill rxf fifo with buffers.
242 struct tn40_rxf_fifo *f = &priv->rxf_fifo0; in tn40_rx_alloc_buffers()
243 struct tn40_rxdb *db = priv->rxdb0; in tn40_rx_alloc_buffers()
248 dno = tn40_rxdb_available(db) - 1; in tn40_rx_alloc_buffers()
249 for (i = dno; i > 0; i--) { in tn40_rx_alloc_buffers()
250 page = page_pool_dev_alloc_pages(priv->page_pool); in tn40_rx_alloc_buffers()
257 dm->page = page; in tn40_rx_alloc_buffers()
260 tn40_write_reg(priv, f->m.reg_wptr, in tn40_rx_alloc_buffers()
261 f->m.wptr & TN40_TXF_WPTR_WR_PTR); in tn40_rx_alloc_buffers()
262 netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr 0x%x\n", in tn40_rx_alloc_buffers()
263 f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR); in tn40_rx_alloc_buffers()
264 netdev_dbg(priv->ndev, "read_reg 0x%04x f->m.reg_rptr=0x%x\n", in tn40_rx_alloc_buffers()
265 f->m.reg_rptr, tn40_read_reg(priv, f->m.reg_rptr)); in tn40_rx_alloc_buffers()
266 netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr=0x%x\n", in tn40_rx_alloc_buffers()
267 f->m.reg_wptr, tn40_read_reg(priv, f->m.reg_wptr)); in tn40_rx_alloc_buffers()
273 struct tn40_rxf_fifo *f = &priv->rxf_fifo0; in tn40_recycle_rx_buffer()
277 idx = le32_to_cpu(rxdd->va_lo); in tn40_recycle_rx_buffer()
278 dm = tn40_rxdb_addr_elem(priv->rxdb0, idx); in tn40_recycle_rx_buffer()
279 tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(dm->page)); in tn40_recycle_rx_buffer()
281 tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR); in tn40_recycle_rx_buffer()
286 struct tn40_rxd_fifo *f = &priv->rxd_fifo0; in tn40_rx_receive()
296 f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_WR_PTR; in tn40_rx_receive()
297 size = f->m.wptr - f->m.rptr; in tn40_rx_receive()
299 size += f->m.memsz; /* Size is negative :-) */ in tn40_rx_receive()
302 rxdd = (struct tn40_rxd_desc *)(f->m.va + f->m.rptr); in tn40_rx_receive()
303 db = priv->rxdb0; in tn40_rx_receive()
323 rxd_val1 = le32_to_cpu(rxdd->rxd_val1); in tn40_rx_receive()
326 size -= tmp_len; in tn40_rx_receive()
329 netdev_dbg(priv->ndev, in tn40_rx_receive()
339 /* A special treatment is given to non-contiguous in tn40_rx_receive()
348 f->m.rptr += tmp_len; in tn40_rx_receive()
349 tmp_len = f->m.rptr - f->m.memsz; in tn40_rx_receive()
351 f->m.rptr = tmp_len; in tn40_rx_receive()
356 netdev_dbg(priv->ndev, in tn40_rx_receive()
358 f->m.rptr, tmp_len); in tn40_rx_receive()
359 memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len); in tn40_rx_receive()
362 idx = le32_to_cpu(rxdd->va_lo); in tn40_rx_receive()
366 len = le16_to_cpu(rxdd->len); in tn40_rx_receive()
367 rxd_vlan = le16_to_cpu(rxdd->rxd_vlan); in tn40_rx_receive()
371 u64_stats_update_begin(&priv->syncp); in tn40_rx_receive()
372 priv->stats.rx_errors++; in tn40_rx_receive()
373 u64_stats_update_end(&priv->syncp); in tn40_rx_receive()
378 skb = napi_build_skb(page_address(dm->page), PAGE_SIZE); in tn40_rx_receive()
380 u64_stats_update_begin(&priv->syncp); in tn40_rx_receive()
381 priv->stats.rx_dropped++; in tn40_rx_receive()
382 priv->alloc_fail++; in tn40_rx_receive()
383 u64_stats_update_end(&priv->syncp); in tn40_rx_receive()
389 skb->protocol = eth_type_trans(skb, priv->ndev); in tn40_rx_receive()
390 skb->ip_summed = in tn40_rx_receive()
396 dm->page = NULL; in tn40_rx_receive()
399 napi_gro_receive(&priv->napi, skb); in tn40_rx_receive()
401 u64_stats_update_begin(&priv->syncp); in tn40_rx_receive()
402 priv->stats.rx_bytes += len; in tn40_rx_receive()
403 u64_stats_update_end(&priv->syncp); in tn40_rx_receive()
408 u64_stats_update_begin(&priv->syncp); in tn40_rx_receive()
409 priv->stats.rx_packets += done; in tn40_rx_receive()
410 u64_stats_update_end(&priv->syncp); in tn40_rx_receive()
412 tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR); in tn40_rx_receive()
420 * 1) TX Free Fifo - TXF - Holds ack descriptors for sent packets.
421 * 2) TX Data Fifo - TXD - Holds descriptors of full buffers.
448 * (from a pre-calculated array) and subtracts it from tx level. The
462 if (unlikely(*pptr == db->end)) in tn40_do_tx_db_ptr_next()
463 *pptr = db->start; in tn40_do_tx_db_ptr_next()
468 tn40_do_tx_db_ptr_next(db, &db->rptr); in tn40_tx_db_inc_rptr()
473 tn40_do_tx_db_ptr_next(db, &db->wptr); in tn40_tx_db_inc_wptr()
480 d->start = vzalloc(memsz); in tn40_tx_db_init()
481 if (!d->start) in tn40_tx_db_init()
482 return -ENOMEM; in tn40_tx_db_init()
487 d->size = memsz / sizeof(struct tn40_tx_map) - 1; in tn40_tx_db_init()
488 d->end = d->start + d->size + 1; /* just after last element */ in tn40_tx_db_init()
491 d->rptr = d->start; in tn40_tx_db_init()
492 d->wptr = d->start; in tn40_tx_db_init()
498 if (d->start) { in tn40_tx_db_close()
499 vfree(d->start); in tn40_tx_db_close()
500 d->start = NULL; in tn40_tx_db_close()
506 * 7 - is number of lwords in txd with one phys buffer
507 * 3 - is number of lwords used for every additional phys buffer
541 static void tn40_pbl_set(struct tn40_pbl *pbl, dma_addr_t dma, int len) in tn40_pbl_set() argument
543 pbl->len = cpu_to_le32(len); in tn40_pbl_set()
544 pbl->pa_lo = cpu_to_le32(lower_32_bits(dma)); in tn40_pbl_set()
545 pbl->pa_hi = cpu_to_le32(upper_32_bits(dma)); in tn40_pbl_set()
550 db->wptr->len = len; in tn40_txdb_set()
551 db->wptr->addr.dma = dma; in tn40_txdb_set()
560 * tn40_tx_map_skb - create and store DMA mappings for skb's data blocks
567 * PBL of a new tx descriptor. It also stores them in the tx db, so they could
578 int nr_frags = skb_shinfo(skb)->nr_frags; in tn40_tx_map_skb()
579 struct tn40_pbl *pbl = &txdd->pbl[0]; in tn40_tx_map_skb() local
580 struct tn40_txdb *db = &priv->txdb; in tn40_tx_map_skb()
585 netdev_dbg(priv->ndev, "TX skb %p skbLen %d dataLen %d frags %d\n", skb, in tn40_tx_map_skb()
586 skb->len, skb->data_len, nr_frags); in tn40_tx_map_skb()
587 if (nr_frags > TN40_MAX_PBL - 1) { in tn40_tx_map_skb()
591 nr_frags = skb_shinfo(skb)->nr_frags; in tn40_tx_map_skb()
594 len = skb->len - skb->data_len; in tn40_tx_map_skb()
595 dma = dma_map_single(&priv->pdev->dev, skb->data, len, in tn40_tx_map_skb()
597 ret = dma_mapping_error(&priv->pdev->dev, dma); in tn40_tx_map_skb()
602 tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len); in tn40_tx_map_skb()
603 *pkt_len = db->wptr->len; in tn40_tx_map_skb()
606 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in tn40_tx_map_skb()
609 dma = skb_frag_dma_map(&priv->pdev->dev, frag, 0, in tn40_tx_map_skb()
612 ret = dma_mapping_error(&priv->pdev->dev, dma); in tn40_tx_map_skb()
622 tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len); in tn40_tx_map_skb()
623 *pkt_len += db->wptr->len; in tn40_tx_map_skb()
627 if (skb->len < TN40_SHORT_PACKET_SIZE) in tn40_tx_map_skb()
632 db->wptr->len = -tn40_txd_sizes[nr_frags].bytes; in tn40_tx_map_skb()
633 db->wptr->addr.skb = skb; in tn40_tx_map_skb()
638 dma_unmap_page(&priv->pdev->dev, db->wptr->addr.dma, db->wptr->len, in tn40_tx_map_skb()
640 for (; i > 0; i--) in tn40_tx_map_skb()
641 dma_unmap_page(&priv->pdev->dev, info[i - 1].dma, in tn40_tx_map_skb()
642 info[i - 1].size, DMA_TO_DEVICE); in tn40_tx_map_skb()
643 return -ENOMEM; in tn40_tx_map_skb()
650 ret = tn40_fifo_alloc(priv, &priv->txd_fifo0.m, priv->txd_size, in tn40_create_tx_ring()
656 ret = tn40_fifo_alloc(priv, &priv->txf_fifo0.m, priv->txf_size, in tn40_create_tx_ring()
665 ret = tn40_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)); in tn40_create_tx_ring()
670 priv->b0_len = 64; in tn40_create_tx_ring()
671 priv->b0_va = dma_alloc_coherent(&priv->pdev->dev, priv->b0_len, in tn40_create_tx_ring()
672 &priv->b0_dma, GFP_KERNEL); in tn40_create_tx_ring()
673 if (!priv->b0_va) in tn40_create_tx_ring()
676 priv->tx_level = TN40_MAX_TX_LEVEL; in tn40_create_tx_ring()
677 priv->tx_update_mark = priv->tx_level - 1024; in tn40_create_tx_ring()
680 tn40_tx_db_close(&priv->txdb); in tn40_create_tx_ring()
682 tn40_fifo_free(priv, &priv->txf_fifo0.m); in tn40_create_tx_ring()
684 tn40_fifo_free(priv, &priv->txd_fifo0.m); in tn40_create_tx_ring()
685 return -ENOMEM; in tn40_create_tx_ring()
689 * tn40_tx_space - Calculate the available space in the TX fifo.
696 struct tn40_txd_fifo *f = &priv->txd_fifo0; in tn40_tx_space()
699 f->m.rptr = tn40_read_reg(priv, f->m.reg_rptr) & TN40_TXF_WPTR_WR_PTR; in tn40_tx_space()
700 fsize = f->m.rptr - f->m.wptr; in tn40_tx_space()
702 fsize = f->m.memsz + fsize; in tn40_tx_space()
711 struct tn40_txd_fifo *f = &priv->txd_fifo0; in tn40_start_xmit()
722 txdd = (struct tn40_txd_desc *)(f->m.va + f->m.wptr); in tn40_start_xmit()
725 u64_stats_update_begin(&priv->syncp); in tn40_start_xmit()
726 priv->stats.tx_dropped++; in tn40_start_xmit()
727 u64_stats_update_end(&priv->syncp); in tn40_start_xmit()
731 nr_frags = skb_shinfo(skb)->nr_frags; in tn40_start_xmit()
732 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) in tn40_start_xmit()
735 if (skb_shinfo(skb)->gso_size) { in tn40_start_xmit()
736 txd_mss = skb_shinfo(skb)->gso_size; in tn40_start_xmit()
738 netdev_dbg(priv->ndev, "skb %p pkt len %d gso size = %d\n", skb, in tn40_start_xmit()
746 txdd->va_hi = 0; in tn40_start_xmit()
747 txdd->va_lo = 0; in tn40_start_xmit()
748 txdd->length = cpu_to_le16(pkt_len); in tn40_start_xmit()
749 txdd->mss = cpu_to_le16(txd_mss); in tn40_start_xmit()
750 txdd->txd_val1 = in tn40_start_xmit()
754 netdev_dbg(priv->ndev, "=== w1 qwords[%d] %d =====\n", nr_frags, in tn40_start_xmit()
756 netdev_dbg(priv->ndev, "=== TxD desc =====================\n"); in tn40_start_xmit()
757 netdev_dbg(priv->ndev, "=== w1: 0x%x ================\n", in tn40_start_xmit()
758 txdd->txd_val1); in tn40_start_xmit()
759 netdev_dbg(priv->ndev, "=== w2: mss 0x%x len 0x%x\n", txdd->mss, in tn40_start_xmit()
760 txdd->length); in tn40_start_xmit()
763 struct tn40_pbl *pbl = &txdd->pbl[++nr_frags]; in tn40_start_xmit() local
765 txdd->length = cpu_to_le16(TN40_SHORT_PACKET_SIZE); in tn40_start_xmit()
766 txdd->txd_val1 = in tn40_start_xmit()
771 pbl->len = cpu_to_le32(TN40_SHORT_PACKET_SIZE - pkt_len); in tn40_start_xmit()
772 pbl->pa_lo = cpu_to_le32(lower_32_bits(priv->b0_dma)); in tn40_start_xmit()
773 pbl->pa_hi = cpu_to_le32(upper_32_bits(priv->b0_dma)); in tn40_start_xmit()
774 netdev_dbg(priv->ndev, "=== SHORT_PKT_FIX ==============\n"); in tn40_start_xmit()
775 netdev_dbg(priv->ndev, "=== nr_frags : %d ==============\n", in tn40_start_xmit()
782 f->m.wptr += tn40_txd_sizes[nr_frags].bytes; in tn40_start_xmit()
783 len = f->m.wptr - f->m.memsz; in tn40_start_xmit()
785 f->m.wptr = len; in tn40_start_xmit()
787 memcpy(f->m.va, f->m.va + f->m.memsz, len); in tn40_start_xmit()
794 priv->tx_level -= tn40_txd_sizes[nr_frags].bytes; in tn40_start_xmit()
795 if (priv->tx_level > priv->tx_update_mark) { in tn40_start_xmit()
796 tn40_write_reg(priv, f->m.reg_wptr, in tn40_start_xmit()
797 f->m.wptr & TN40_TXF_WPTR_WR_PTR); in tn40_start_xmit()
799 if (priv->tx_noupd++ > TN40_NO_UPD_PACKETS) { in tn40_start_xmit()
800 priv->tx_noupd = 0; in tn40_start_xmit()
801 tn40_write_reg(priv, f->m.reg_wptr, in tn40_start_xmit()
802 f->m.wptr & TN40_TXF_WPTR_WR_PTR); in tn40_start_xmit()
806 u64_stats_update_begin(&priv->syncp); in tn40_start_xmit()
807 priv->stats.tx_packets++; in tn40_start_xmit()
808 priv->stats.tx_bytes += pkt_len; in tn40_start_xmit()
809 u64_stats_update_end(&priv->syncp); in tn40_start_xmit()
810 if (priv->tx_level < TN40_MIN_TX_LEVEL) { in tn40_start_xmit()
811 netdev_dbg(priv->ndev, "TX Q STOP level %d\n", priv->tx_level); in tn40_start_xmit()
820 struct tn40_txf_fifo *f = &priv->txf_fifo0; in tn40_tx_cleanup()
821 struct tn40_txdb *db = &priv->txdb; in tn40_tx_cleanup()
824 f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_MASK; in tn40_tx_cleanup()
826 netif_tx_lock(priv->ndev); in tn40_tx_cleanup()
827 while (f->m.wptr != f->m.rptr) { in tn40_tx_cleanup()
828 f->m.rptr += TN40_TXF_DESC_SZ; in tn40_tx_cleanup()
829 f->m.rptr &= f->m.size_mask; in tn40_tx_cleanup()
833 dma_addr_t addr = db->rptr->addr.dma; in tn40_tx_cleanup()
834 size_t size = db->rptr->len; in tn40_tx_cleanup()
836 netif_tx_unlock(priv->ndev); in tn40_tx_cleanup()
837 dma_unmap_page(&priv->pdev->dev, addr, in tn40_tx_cleanup()
839 netif_tx_lock(priv->ndev); in tn40_tx_cleanup()
841 } while (db->rptr->len > 0); in tn40_tx_cleanup()
842 tx_level -= db->rptr->len; /* '-' Because the len is negative */ in tn40_tx_cleanup()
844 /* Now should come skb pointer - free it */ in tn40_tx_cleanup()
845 dev_kfree_skb_any(db->rptr->addr.skb); in tn40_tx_cleanup()
846 netdev_dbg(priv->ndev, "dev_kfree_skb_any %p %d\n", in tn40_tx_cleanup()
847 db->rptr->addr.skb, -db->rptr->len); in tn40_tx_cleanup()
852 tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR); in tn40_tx_cleanup()
858 priv->tx_level += tx_level; in tn40_tx_cleanup()
859 if (priv->tx_noupd) { in tn40_tx_cleanup()
860 priv->tx_noupd = 0; in tn40_tx_cleanup()
861 tn40_write_reg(priv, priv->txd_fifo0.m.reg_wptr, in tn40_tx_cleanup()
862 priv->txd_fifo0.m.wptr & TN40_TXF_WPTR_WR_PTR); in tn40_tx_cleanup()
864 if (unlikely(netif_queue_stopped(priv->ndev) && in tn40_tx_cleanup()
865 netif_carrier_ok(priv->ndev) && in tn40_tx_cleanup()
866 (priv->tx_level >= TN40_MAX_TX_LEVEL / 2))) { in tn40_tx_cleanup()
867 netdev_dbg(priv->ndev, "TX Q WAKE level %d\n", priv->tx_level); in tn40_tx_cleanup()
868 netif_wake_queue(priv->ndev); in tn40_tx_cleanup()
870 netif_tx_unlock(priv->ndev); in tn40_tx_cleanup()
875 struct tn40_txdb *db = &priv->txdb; in tn40_tx_free_skbs()
877 while (db->rptr != db->wptr) { in tn40_tx_free_skbs()
878 if (likely(db->rptr->len)) in tn40_tx_free_skbs()
879 dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma, in tn40_tx_free_skbs()
880 db->rptr->len, DMA_TO_DEVICE); in tn40_tx_free_skbs()
882 dev_kfree_skb(db->rptr->addr.skb); in tn40_tx_free_skbs()
890 tn40_fifo_free(priv, &priv->txd_fifo0.m); in tn40_destroy_tx_ring()
891 tn40_fifo_free(priv, &priv->txf_fifo0.m); in tn40_destroy_tx_ring()
892 tn40_tx_db_close(&priv->txdb); in tn40_destroy_tx_ring()
894 if (priv->b0_len) { in tn40_destroy_tx_ring()
895 dma_free_coherent(&priv->pdev->dev, priv->b0_len, priv->b0_va, in tn40_destroy_tx_ring()
896 priv->b0_dma); in tn40_destroy_tx_ring()
897 priv->b0_len = 0; in tn40_destroy_tx_ring()
902 * tn40_tx_push_desc - Push a descriptor to TxD fifo.
916 struct tn40_txd_fifo *f = &priv->txd_fifo0; in tn40_tx_push_desc()
917 int i = f->m.memsz - f->m.wptr; in tn40_tx_push_desc()
923 memcpy(f->m.va + f->m.wptr, data, size); in tn40_tx_push_desc()
924 f->m.wptr += size; in tn40_tx_push_desc()
926 memcpy(f->m.va + f->m.wptr, data, i); in tn40_tx_push_desc()
927 f->m.wptr = size - i; in tn40_tx_push_desc()
928 memcpy(f->m.va, data + i, f->m.wptr); in tn40_tx_push_desc()
930 tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR); in tn40_tx_push_desc()
934 * tn40_tx_push_desc_safe - push descriptor to TxD fifo in a safe way.
953 int avail = tn40_tx_space(priv) - 8; in tn40_tx_push_desc_safe()
963 netdev_dbg(priv->ndev, in tn40_tx_push_desc_safe()
967 size -= avail; in tn40_tx_push_desc_safe()
977 netdev_dbg(priv->ndev, "speed %d\n", speed); in tn40_set_link_speed()
982 netdev_dbg(priv->ndev, "link_speed %d\n", speed); in tn40_set_link_speed()
1008 for (i = 1000; i; i--) { in tn40_set_link_speed()
1022 netdev_err(priv->ndev, "MAC init timeout!\n"); in tn40_set_link_speed()
1046 for (i = 1000; i; i--) { in tn40_set_link_speed()
1059 netdev_err(priv->ndev, "MAC init timeout!\n"); in tn40_set_link_speed()
1094 netdev_err(priv->ndev, in tn40_set_link_speed()
1107 netdev_dbg(priv->ndev, "link changed %u\n", link); in tn40_link_changed()
1113 netdev_dbg(priv->ndev, "isr = 0x%x\n", isr); in tn40_isr_extra()
1134 if (likely(napi_schedule_prep(&priv->napi))) { in tn40_isr_napi()
1135 __napi_schedule(&priv->napi); in tn40_isr_napi()
1180 ret = request_firmware(&fw, TN40_FIRMWARE_NAME, &priv->pdev->dev); in tn40_fw_load()
1186 netdev_dbg(priv->ndev, "Loading FW...\n"); in tn40_fw_load()
1187 tn40_tx_push_desc_safe(priv, (void *)fw->data, fw->size); in tn40_fw_load()
1196 netdev_err(priv->ndev, "firmware loading failed\n"); in tn40_fw_load()
1197 netdev_dbg(priv->ndev, "VPC: 0x%x VIC: 0x%x STATUS: 0x%xd\n", in tn40_fw_load()
1201 ret = -EIO; in tn40_fw_load()
1203 netdev_dbg(priv->ndev, "firmware loading success\n"); in tn40_fw_load()
1213 netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n", in tn40_restore_mac()
1218 val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]); in tn40_restore_mac()
1220 val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]); in tn40_restore_mac()
1222 val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]); in tn40_restore_mac()
1227 (ndev->dev_addr[3] << 24) | (ndev->dev_addr[2] << 16) | in tn40_restore_mac()
1228 (ndev->dev_addr[1] << 8) | (ndev->dev_addr[0])); in tn40_restore_mac()
1230 (ndev->dev_addr[5] << 8) | (ndev->dev_addr[4])); in tn40_restore_mac()
1232 netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n", in tn40_restore_mac()
1255 priv->rxf_fifo0.m.pktsz & TN40_MAX_FRAME_AB_VAL); in tn40_hw_start()
1256 tn40_write_reg(priv, TN40_REG_RDINTCM0, priv->rdintcm); in tn40_hw_start()
1260 tn40_write_reg(priv, TN40_REG_TDINTCM0, priv->tdintcm); in tn40_hw_start()
1263 tn40_restore_mac(priv->ndev, priv); in tn40_hw_start()
1283 tn40_write_reg(priv, TN40_REG_CLKPLL, (val | TN40_CLKPLL_SFTRST) + 0x8); in tn40_hw_reset()
1293 return -EIO; in tn40_hw_reset()
1296 /* Do any PCI-E read transaction */ in tn40_hw_reset()
1318 netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n"); in tn40_sw_reset()
1351 netdev_err(priv->ndev, "failed to tx init %d\n", ret); in tn40_start()
1357 netdev_err(priv->ndev, "failed to rx init %d\n", ret); in tn40_start()
1362 if (tn40_rxdb_available(priv->rxdb0) != 1) { in tn40_start()
1363 ret = -ENOMEM; in tn40_start()
1364 netdev_err(priv->ndev, "failed to allocate rx buffers\n"); in tn40_start()
1368 ret = request_irq(priv->pdev->irq, &tn40_isr_napi, IRQF_SHARED, in tn40_start()
1369 priv->ndev->name, priv->ndev); in tn40_start()
1371 netdev_err(priv->ndev, "failed to request irq %d\n", ret); in tn40_start()
1387 free_irq(priv->pdev->irq, priv->ndev); in tn40_stop()
1397 phylink_stop(priv->phylink); in tn40_close()
1398 phylink_disconnect_phy(priv->phylink); in tn40_close()
1400 napi_disable(&priv->napi); in tn40_close()
1401 netif_napi_del(&priv->napi); in tn40_close()
1411 ret = phylink_connect_phy(priv->phylink, priv->phydev); in tn40_open()
1419 phylink_disconnect_phy(priv->phylink); in tn40_open()
1423 napi_enable(&priv->napi); in tn40_open()
1424 phylink_start(priv->phylink); in tn40_open()
1425 netif_start_queue(priv->ndev); in tn40_open()
1435 netdev_dbg(priv->ndev, "vid =%d value =%d\n", (int)vid, enable); in __tn40_vlan_rx_vid()
1439 netdev_dbg(priv->ndev, "reg =%x, val =%x, bit =%d\n", reg, val, bit); in __tn40_vlan_rx_vid()
1444 netdev_dbg(priv->ndev, "new val %x\n", val); in __tn40_vlan_rx_vid()
1469 /* IMF - imperfect (hash) rx multicast filter */ in tn40_setmulti()
1470 /* PMF - perfect rx multicast filter */ in tn40_setmulti()
1473 if (ndev->flags & IFF_PROMISC) { in tn40_setmulti()
1475 } else if (ndev->flags & IFF_ALLMULTI) { in tn40_setmulti()
1507 hash ^= mclist->addr[i]; in tn40_setmulti()
1527 eth_hw_addr_set(ndev, addr->sa_data); in tn40_set_mac()
1542 eth_hw_addr_set(priv->ndev, addr); in tn40_mac_init()
1552 start = u64_stats_fetch_begin(&priv->syncp); in tn40_get_stats()
1553 stats->tx_packets = priv->stats.tx_packets; in tn40_get_stats()
1554 stats->tx_bytes = priv->stats.tx_bytes; in tn40_get_stats()
1555 stats->tx_dropped = priv->stats.tx_dropped; in tn40_get_stats()
1557 stats->rx_packets = priv->stats.rx_packets; in tn40_get_stats()
1558 stats->rx_bytes = priv->stats.rx_bytes; in tn40_get_stats()
1559 stats->rx_dropped = priv->stats.rx_dropped; in tn40_get_stats()
1560 stats->rx_errors = priv->stats.rx_errors; in tn40_get_stats()
1561 } while (u64_stats_fetch_retry(&priv->syncp, start)); in tn40_get_stats()
1581 return phylink_ethtool_ksettings_get(priv->phylink, cmd); in tn40_ethtool_get_link_ksettings()
1596 start = u64_stats_fetch_begin(&priv->syncp); in tn40_get_queue_stats_rx()
1598 stats->packets = priv->stats.rx_packets; in tn40_get_queue_stats_rx()
1599 stats->bytes = priv->stats.rx_bytes; in tn40_get_queue_stats_rx()
1600 stats->alloc_fail = priv->alloc_fail; in tn40_get_queue_stats_rx()
1601 } while (u64_stats_fetch_retry(&priv->syncp, start)); in tn40_get_queue_stats_rx()
1611 start = u64_stats_fetch_begin(&priv->syncp); in tn40_get_queue_stats_tx()
1613 stats->packets = priv->stats.tx_packets; in tn40_get_queue_stats_tx()
1614 stats->bytes = priv->stats.tx_bytes; in tn40_get_queue_stats_tx()
1615 } while (u64_stats_fetch_retry(&priv->syncp, start)); in tn40_get_queue_stats_tx()
1622 rx->packets = 0; in tn40_get_base_stats()
1623 rx->bytes = 0; in tn40_get_base_stats()
1624 rx->alloc_fail = 0; in tn40_get_base_stats()
1626 tx->packets = 0; in tn40_get_base_stats()
1627 tx->bytes = 0; in tn40_get_base_stats()
1660 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(struct tn40_priv)); in tn40_netdev_alloc()
1663 ndev->netdev_ops = &tn40_netdev_ops; in tn40_netdev_alloc()
1664 ndev->ethtool_ops = &tn40_ethtool_ops; in tn40_netdev_alloc()
1665 ndev->stat_ops = &tn40_stat_ops; in tn40_netdev_alloc()
1666 ndev->tx_queue_len = TN40_NDEV_TXQ_LEN; in tn40_netdev_alloc()
1667 ndev->mem_start = pci_resource_start(pdev, 0); in tn40_netdev_alloc()
1668 ndev->mem_end = pci_resource_end(pdev, 0); in tn40_netdev_alloc()
1669 ndev->min_mtu = ETH_ZLEN; in tn40_netdev_alloc()
1670 ndev->max_mtu = TN40_MAX_MTU; in tn40_netdev_alloc()
1672 ndev->features = NETIF_F_IP_CSUM | in tn40_netdev_alloc()
1681 ndev->vlan_features = NETIF_F_IP_CSUM | in tn40_netdev_alloc()
1685 if (dma_get_mask(&pdev->dev) == DMA_BIT_MASK(64)) { in tn40_netdev_alloc()
1686 ndev->features |= NETIF_F_HIGHDMA; in tn40_netdev_alloc()
1687 ndev->vlan_features |= NETIF_F_HIGHDMA; in tn40_netdev_alloc()
1689 ndev->hw_features |= ndev->features; in tn40_netdev_alloc()
1691 SET_NETDEV_DEV(ndev, &pdev->dev); in tn40_netdev_alloc()
1707 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in tn40_probe()
1709 dev_err(&pdev->dev, "failed to set DMA mask.\n"); in tn40_probe()
1715 dev_err(&pdev->dev, "failed to request PCI regions.\n"); in tn40_probe()
1723 ret = -EIO; in tn40_probe()
1724 dev_err(&pdev->dev, "failed to map PCI bar.\n"); in tn40_probe()
1730 ret = -ENOMEM; in tn40_probe()
1731 dev_err(&pdev->dev, "failed to allocate netdev.\n"); in tn40_probe()
1737 netif_napi_add(ndev, &priv->napi, tn40_poll); in tn40_probe()
1739 priv->regs = regs; in tn40_probe()
1740 priv->pdev = pdev; in tn40_probe()
1741 priv->ndev = ndev; in tn40_probe()
1743 priv->txd_size = 3; in tn40_probe()
1744 priv->txf_size = 3; in tn40_probe()
1745 priv->rxd_size = 3; in tn40_probe()
1746 priv->rxf_size = 3; in tn40_probe()
1748 priv->rdintcm = TN40_INT_REG_VAL(0x20, 1, 4, 12); in tn40_probe()
1749 priv->tdintcm = TN40_INT_REG_VAL(0x20, 1, 0, 12); in tn40_probe()
1753 dev_err(&pdev->dev, "failed to reset HW.\n"); in tn40_probe()
1759 dev_err(&pdev->dev, "failed to allocate irq.\n"); in tn40_probe()
1765 dev_err(&pdev->dev, "failed to initialize mdio bus.\n"); in tn40_probe()
1769 priv->stats_flag = in tn40_probe()
1771 u64_stats_init(&priv->syncp); in tn40_probe()
1773 priv->isr_mask = TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_PSE | in tn40_probe()
1780 dev_err(&pdev->dev, "failed to set up PHY.\n"); in tn40_probe()
1786 dev_err(&pdev->dev, "failed to initialize tn40_priv.\n"); in tn40_probe()
1792 dev_err(&pdev->dev, "failed to register netdev.\n"); in tn40_probe()
1814 struct net_device *ndev = priv->ndev; in tn40_remove()
1819 pci_free_irq_vectors(priv->pdev); in tn40_remove()
1821 iounmap(priv->regs); in tn40_remove()