Lines Matching full:port

61 static void mvpp2_acpi_start(struct mvpp2_port *port);
182 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, in mvpp2_txdesc_dma_addr_get() argument
185 if (port->priv->hw_version == MVPP21) in mvpp2_txdesc_dma_addr_get()
192 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, in mvpp2_txdesc_dma_addr_set() argument
201 if (port->priv->hw_version == MVPP21) { in mvpp2_txdesc_dma_addr_set()
213 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, in mvpp2_txdesc_size_get() argument
216 if (port->priv->hw_version == MVPP21) in mvpp2_txdesc_size_get()
222 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, in mvpp2_txdesc_size_set() argument
226 if (port->priv->hw_version == MVPP21) in mvpp2_txdesc_size_set()
232 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, in mvpp2_txdesc_txq_set() argument
236 if (port->priv->hw_version == MVPP21) in mvpp2_txdesc_txq_set()
242 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, in mvpp2_txdesc_cmd_set() argument
246 if (port->priv->hw_version == MVPP21) in mvpp2_txdesc_cmd_set()
252 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, in mvpp2_txdesc_offset_get() argument
255 if (port->priv->hw_version == MVPP21) in mvpp2_txdesc_offset_get()
261 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, in mvpp2_rxdesc_dma_addr_get() argument
264 if (port->priv->hw_version == MVPP21) in mvpp2_rxdesc_dma_addr_get()
271 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, in mvpp2_rxdesc_cookie_get() argument
274 if (port->priv->hw_version == MVPP21) in mvpp2_rxdesc_cookie_get()
281 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, in mvpp2_rxdesc_size_get() argument
284 if (port->priv->hw_version == MVPP21) in mvpp2_rxdesc_size_get()
290 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, in mvpp2_rxdesc_status_get() argument
293 if (port->priv->hw_version == MVPP21) in mvpp2_rxdesc_status_get()
306 static void mvpp2_txq_inc_put(struct mvpp2_port *port, in mvpp2_txq_inc_put() argument
319 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); in mvpp2_txq_inc_put()
320 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + in mvpp2_txq_inc_put()
321 mvpp2_txdesc_offset_get(port, tx_desc); in mvpp2_txq_inc_put()
347 /* Get number of physical egress port */
348 static inline int mvpp2_egress_port(struct mvpp2_port *port) in mvpp2_egress_port() argument
350 return MVPP2_MAX_TCONT + port->id; in mvpp2_egress_port()
354 static inline int mvpp2_txq_phys(int port, int txq) in mvpp2_txq_phys() argument
356 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; in mvpp2_txq_phys()
640 struct mvpp2_port *port; in mvpp2_bm_init() local
651 port = priv->port_list[i]; in mvpp2_bm_init()
652 if (port->xdp_prog) { in mvpp2_bm_init()
720 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, in mvpp2_rxq_long_pool_set() argument
727 prxq = port->rxqs[lrxq]->id; in mvpp2_rxq_long_pool_set()
729 if (port->priv->hw_version == MVPP21) in mvpp2_rxq_long_pool_set()
734 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); in mvpp2_rxq_long_pool_set()
737 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); in mvpp2_rxq_long_pool_set()
741 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, in mvpp2_rxq_short_pool_set() argument
748 prxq = port->rxqs[lrxq]->id; in mvpp2_rxq_short_pool_set()
750 if (port->priv->hw_version == MVPP21) in mvpp2_rxq_short_pool_set()
755 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); in mvpp2_rxq_short_pool_set()
758 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); in mvpp2_rxq_short_pool_set()
761 static void *mvpp2_buf_alloc(struct mvpp2_port *port, in mvpp2_buf_alloc() argument
781 dma_addr = dma_map_single(port->dev->dev.parent, data, in mvpp2_buf_alloc()
784 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { in mvpp2_buf_alloc()
796 static void mvpp2_rxq_enable_fc(struct mvpp2_port *port) in mvpp2_rxq_enable_fc() argument
799 int fq = port->first_rxq; in mvpp2_rxq_enable_fc()
802 spin_lock_irqsave(&port->priv->mss_spinlock, flags); in mvpp2_rxq_enable_fc()
807 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); in mvpp2_rxq_enable_fc()
810 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); in mvpp2_rxq_enable_fc()
813 for (q = 0; q < port->nrxqs; q++) { in mvpp2_rxq_enable_fc()
817 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); in mvpp2_rxq_enable_fc()
819 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); in mvpp2_rxq_enable_fc()
820 /* Set RXQ port ID */ in mvpp2_rxq_enable_fc()
822 val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq)); in mvpp2_rxq_enable_fc()
834 host_id = port->nqvecs; in mvpp2_rxq_enable_fc()
844 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); in mvpp2_rxq_enable_fc()
848 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); in mvpp2_rxq_enable_fc()
851 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); in mvpp2_rxq_enable_fc()
853 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); in mvpp2_rxq_enable_fc()
857 static void mvpp2_rxq_disable_fc(struct mvpp2_port *port) in mvpp2_rxq_disable_fc() argument
861 int fq = port->first_rxq; in mvpp2_rxq_disable_fc()
863 spin_lock_irqsave(&port->priv->mss_spinlock, flags); in mvpp2_rxq_disable_fc()
868 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); in mvpp2_rxq_disable_fc()
871 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); in mvpp2_rxq_disable_fc()
874 for (q = 0; q < port->nrxqs; q++) { in mvpp2_rxq_disable_fc()
878 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); in mvpp2_rxq_disable_fc()
880 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); in mvpp2_rxq_disable_fc()
887 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); in mvpp2_rxq_disable_fc()
891 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); in mvpp2_rxq_disable_fc()
894 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); in mvpp2_rxq_disable_fc()
896 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); in mvpp2_rxq_disable_fc()
900 static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port, in mvpp2_bm_pool_update_fc() argument
907 spin_lock_irqsave(&port->priv->mss_spinlock, flags); in mvpp2_bm_pool_update_fc()
912 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); in mvpp2_bm_pool_update_fc()
915 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); in mvpp2_bm_pool_update_fc()
919 /* Set BM pool start and stop thresholds per port */ in mvpp2_bm_pool_update_fc()
920 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); in mvpp2_bm_pool_update_fc()
921 val |= MSS_BUF_POOL_PORT_OFFS(port->id); in mvpp2_bm_pool_update_fc()
926 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); in mvpp2_bm_pool_update_fc()
928 /* Remove BM pool from the port */ in mvpp2_bm_pool_update_fc()
929 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); in mvpp2_bm_pool_update_fc()
930 val &= ~MSS_BUF_POOL_PORT_OFFS(port->id); in mvpp2_bm_pool_update_fc()
933 * flow control if pool empty (not used by any port) in mvpp2_bm_pool_update_fc()
940 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); in mvpp2_bm_pool_update_fc()
944 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); in mvpp2_bm_pool_update_fc()
947 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); in mvpp2_bm_pool_update_fc()
949 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); in mvpp2_bm_pool_update_fc()
955 struct mvpp2_port *port; in mvpp2_bm_pool_update_priv_fc() local
959 port = priv->port_list[i]; in mvpp2_bm_pool_update_priv_fc()
960 if (port->priv->percpu_pools) { in mvpp2_bm_pool_update_priv_fc()
961 for (j = 0; j < port->nrxqs; j++) in mvpp2_bm_pool_update_priv_fc()
962 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j], in mvpp2_bm_pool_update_priv_fc()
963 port->tx_fc & en); in mvpp2_bm_pool_update_priv_fc()
965 mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en); in mvpp2_bm_pool_update_priv_fc()
966 mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en); in mvpp2_bm_pool_update_priv_fc()
976 * flow control enabled, but still disabled per port. in mvpp2_enable_global_fc()
1000 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, in mvpp2_bm_pool_put() argument
1004 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); in mvpp2_bm_pool_put()
1007 if (test_bit(thread, &port->priv->lock_map)) in mvpp2_bm_pool_put()
1008 spin_lock_irqsave(&port->bm_lock[thread], flags); in mvpp2_bm_pool_put()
1010 if (port->priv->hw_version >= MVPP22) { in mvpp2_bm_pool_put()
1022 mvpp2_thread_write_relaxed(port->priv, thread, in mvpp2_bm_pool_put()
1031 mvpp2_thread_write_relaxed(port->priv, thread, in mvpp2_bm_pool_put()
1033 mvpp2_thread_write_relaxed(port->priv, thread, in mvpp2_bm_pool_put()
1036 if (test_bit(thread, &port->priv->lock_map)) in mvpp2_bm_pool_put()
1037 spin_unlock_irqrestore(&port->bm_lock[thread], flags); in mvpp2_bm_pool_put()
1043 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, in mvpp2_bm_bufs_add() argument
1052 if (port->priv->percpu_pools && in mvpp2_bm_bufs_add()
1054 netdev_err(port->dev, in mvpp2_bm_bufs_add()
1064 netdev_err(port->dev, in mvpp2_bm_bufs_add()
1070 if (port->priv->percpu_pools) in mvpp2_bm_bufs_add()
1071 pp = port->priv->page_pool[bm_pool->id]; in mvpp2_bm_bufs_add()
1073 buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr, in mvpp2_bm_bufs_add()
1078 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, in mvpp2_bm_bufs_add()
1085 netdev_dbg(port->dev, in mvpp2_bm_bufs_add()
1089 netdev_dbg(port->dev, in mvpp2_bm_bufs_add()
1099 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) in mvpp2_bm_pool_use() argument
1101 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; in mvpp2_bm_pool_use()
1104 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) || in mvpp2_bm_pool_use()
1105 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { in mvpp2_bm_pool_use()
1106 netdev_err(port->dev, "Invalid pool %d\n", pool); in mvpp2_bm_pool_use()
1121 if (port->priv->percpu_pools) { in mvpp2_bm_pool_use()
1122 if (pool < port->nrxqs) in mvpp2_bm_pool_use()
1130 mvpp2_bm_bufs_free(port->dev->dev.parent, in mvpp2_bm_pool_use()
1131 port->priv, new_pool, pkts_num); in mvpp2_bm_pool_use()
1140 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); in mvpp2_bm_pool_use()
1148 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, in mvpp2_bm_pool_use()
1155 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, in mvpp2_bm_pool_use_percpu() argument
1158 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; in mvpp2_bm_pool_use_percpu()
1161 if (pool > port->nrxqs * 2) { in mvpp2_bm_pool_use_percpu()
1162 netdev_err(port->dev, "Invalid pool %d\n", pool); in mvpp2_bm_pool_use_percpu()
1179 mvpp2_bm_bufs_free(port->dev->dev.parent, in mvpp2_bm_pool_use_percpu()
1180 port->priv, new_pool, pkts_num); in mvpp2_bm_pool_use_percpu()
1188 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); in mvpp2_bm_pool_use_percpu()
1196 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, in mvpp2_bm_pool_use_percpu()
1203 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) in mvpp2_swf_bm_pool_init_shared() argument
1208 /* If port pkt_size is higher than 1518B: in mvpp2_swf_bm_pool_init_shared()
1212 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { in mvpp2_swf_bm_pool_init_shared()
1220 if (!port->pool_long) { in mvpp2_swf_bm_pool_init_shared()
1221 port->pool_long = in mvpp2_swf_bm_pool_init_shared()
1222 mvpp2_bm_pool_use(port, long_log_pool, in mvpp2_swf_bm_pool_init_shared()
1224 if (!port->pool_long) in mvpp2_swf_bm_pool_init_shared()
1227 port->pool_long->port_map |= BIT(port->id); in mvpp2_swf_bm_pool_init_shared()
1229 for (rxq = 0; rxq < port->nrxqs; rxq++) in mvpp2_swf_bm_pool_init_shared()
1230 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); in mvpp2_swf_bm_pool_init_shared()
1233 if (!port->pool_short) { in mvpp2_swf_bm_pool_init_shared()
1234 port->pool_short = in mvpp2_swf_bm_pool_init_shared()
1235 mvpp2_bm_pool_use(port, short_log_pool, in mvpp2_swf_bm_pool_init_shared()
1237 if (!port->pool_short) in mvpp2_swf_bm_pool_init_shared()
1240 port->pool_short->port_map |= BIT(port->id); in mvpp2_swf_bm_pool_init_shared()
1242 for (rxq = 0; rxq < port->nrxqs; rxq++) in mvpp2_swf_bm_pool_init_shared()
1243 mvpp2_rxq_short_pool_set(port, rxq, in mvpp2_swf_bm_pool_init_shared()
1244 port->pool_short->id); in mvpp2_swf_bm_pool_init_shared()
1251 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) in mvpp2_swf_bm_pool_init_percpu() argument
1256 for (i = 0; i < port->nrxqs; i++) { in mvpp2_swf_bm_pool_init_percpu()
1257 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, in mvpp2_swf_bm_pool_init_percpu()
1262 bm_pool->port_map |= BIT(port->id); in mvpp2_swf_bm_pool_init_percpu()
1263 mvpp2_rxq_short_pool_set(port, i, bm_pool->id); in mvpp2_swf_bm_pool_init_percpu()
1266 for (i = 0; i < port->nrxqs; i++) { in mvpp2_swf_bm_pool_init_percpu()
1267 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, in mvpp2_swf_bm_pool_init_percpu()
1272 bm_pool->port_map |= BIT(port->id); in mvpp2_swf_bm_pool_init_percpu()
1273 mvpp2_rxq_long_pool_set(port, i, bm_pool->id); in mvpp2_swf_bm_pool_init_percpu()
1276 port->pool_long = NULL; in mvpp2_swf_bm_pool_init_percpu()
1277 port->pool_short = NULL; in mvpp2_swf_bm_pool_init_percpu()
1282 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) in mvpp2_swf_bm_pool_init() argument
1284 if (port->priv->percpu_pools) in mvpp2_swf_bm_pool_init()
1285 return mvpp2_swf_bm_pool_init_percpu(port); in mvpp2_swf_bm_pool_init()
1287 return mvpp2_swf_bm_pool_init_shared(port); in mvpp2_swf_bm_pool_init()
1290 static void mvpp2_set_hw_csum(struct mvpp2_port *port, in mvpp2_set_hw_csum() argument
1295 /* Update L4 checksum when jumbo enable/disable on port. in mvpp2_set_hw_csum()
1296 * Only port 0 supports hardware checksum offload due to in mvpp2_set_hw_csum()
1301 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { in mvpp2_set_hw_csum()
1302 port->dev->features &= ~csums; in mvpp2_set_hw_csum()
1303 port->dev->hw_features &= ~csums; in mvpp2_set_hw_csum()
1305 port->dev->features |= csums; in mvpp2_set_hw_csum()
1306 port->dev->hw_features |= csums; in mvpp2_set_hw_csum()
1312 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_bm_update_mtu() local
1316 if (port->priv->percpu_pools) in mvpp2_bm_update_mtu()
1319 /* If port MTU is higher than 1518B: in mvpp2_bm_update_mtu()
1328 if (new_long_pool != port->pool_long->id) { in mvpp2_bm_update_mtu()
1329 if (port->tx_fc) { in mvpp2_bm_update_mtu()
1331 mvpp2_bm_pool_update_fc(port, in mvpp2_bm_update_mtu()
1332 port->pool_short, in mvpp2_bm_update_mtu()
1335 mvpp2_bm_pool_update_fc(port, port->pool_long, in mvpp2_bm_update_mtu()
1339 /* Remove port from old short & long pool */ in mvpp2_bm_update_mtu()
1340 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, in mvpp2_bm_update_mtu()
1341 port->pool_long->pkt_size); in mvpp2_bm_update_mtu()
1342 port->pool_long->port_map &= ~BIT(port->id); in mvpp2_bm_update_mtu()
1343 port->pool_long = NULL; in mvpp2_bm_update_mtu()
1345 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, in mvpp2_bm_update_mtu()
1346 port->pool_short->pkt_size); in mvpp2_bm_update_mtu()
1347 port->pool_short->port_map &= ~BIT(port->id); in mvpp2_bm_update_mtu()
1348 port->pool_short = NULL; in mvpp2_bm_update_mtu()
1350 port->pkt_size = pkt_size; in mvpp2_bm_update_mtu()
1352 /* Add port to new short & long pool */ in mvpp2_bm_update_mtu()
1353 mvpp2_swf_bm_pool_init(port); in mvpp2_bm_update_mtu()
1355 mvpp2_set_hw_csum(port, new_long_pool); in mvpp2_bm_update_mtu()
1357 if (port->tx_fc) { in mvpp2_bm_update_mtu()
1359 mvpp2_bm_pool_update_fc(port, port->pool_long, in mvpp2_bm_update_mtu()
1362 mvpp2_bm_pool_update_fc(port, port->pool_short, in mvpp2_bm_update_mtu()
1366 /* Update L4 checksum when jumbo enable/disable on port */ in mvpp2_bm_update_mtu()
1367 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { in mvpp2_bm_update_mtu()
1385 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) in mvpp2_interrupts_enable() argument
1389 for (i = 0; i < port->nqvecs; i++) in mvpp2_interrupts_enable()
1390 sw_thread_mask |= port->qvecs[i].sw_thread_mask; in mvpp2_interrupts_enable()
1392 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), in mvpp2_interrupts_enable()
1396 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) in mvpp2_interrupts_disable() argument
1400 for (i = 0; i < port->nqvecs; i++) in mvpp2_interrupts_disable()
1401 sw_thread_mask |= port->qvecs[i].sw_thread_mask; in mvpp2_interrupts_disable()
1403 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), in mvpp2_interrupts_disable()
1409 struct mvpp2_port *port = qvec->port; in mvpp2_qvec_interrupt_enable() local
1411 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), in mvpp2_qvec_interrupt_enable()
1417 struct mvpp2_port *port = qvec->port; in mvpp2_qvec_interrupt_disable() local
1419 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), in mvpp2_qvec_interrupt_disable()
1429 struct mvpp2_port *port = arg; in mvpp2_interrupts_mask() local
1434 if (cpu > port->priv->nthreads) in mvpp2_interrupts_mask()
1437 thread = mvpp2_cpu_to_thread(port->priv, cpu); in mvpp2_interrupts_mask()
1439 mvpp2_thread_write(port->priv, thread, in mvpp2_interrupts_mask()
1440 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); in mvpp2_interrupts_mask()
1441 mvpp2_thread_write(port->priv, thread, in mvpp2_interrupts_mask()
1442 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0); in mvpp2_interrupts_mask()
1451 struct mvpp2_port *port = arg; in mvpp2_interrupts_unmask() local
1456 if (cpu >= port->priv->nthreads) in mvpp2_interrupts_unmask()
1459 thread = mvpp2_cpu_to_thread(port->priv, cpu); in mvpp2_interrupts_unmask()
1462 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); in mvpp2_interrupts_unmask()
1463 if (port->has_tx_irqs) in mvpp2_interrupts_unmask()
1466 mvpp2_thread_write(port->priv, thread, in mvpp2_interrupts_unmask()
1467 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); in mvpp2_interrupts_unmask()
1468 mvpp2_thread_write(port->priv, thread, in mvpp2_interrupts_unmask()
1469 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), in mvpp2_interrupts_unmask()
1474 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) in mvpp2_shared_interrupt_mask_unmask() argument
1479 if (port->priv->hw_version == MVPP21) in mvpp2_shared_interrupt_mask_unmask()
1487 for (i = 0; i < port->nqvecs; i++) { in mvpp2_shared_interrupt_mask_unmask()
1488 struct mvpp2_queue_vector *v = port->qvecs + i; in mvpp2_shared_interrupt_mask_unmask()
1493 mvpp2_thread_write(port->priv, v->sw_thread_id, in mvpp2_shared_interrupt_mask_unmask()
1494 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); in mvpp2_shared_interrupt_mask_unmask()
1495 mvpp2_thread_write(port->priv, v->sw_thread_id, in mvpp2_shared_interrupt_mask_unmask()
1496 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), in mvpp2_shared_interrupt_mask_unmask()
1501 /* Only GOP port 0 has an XLG MAC */
1502 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port) in mvpp2_port_supports_xlg() argument
1504 return port->gop_id == 0; in mvpp2_port_supports_xlg()
1507 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) in mvpp2_port_supports_rgmii() argument
1509 return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0); in mvpp2_port_supports_rgmii()
1512 /* Port configuration routines */
1531 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) in mvpp22_gop_init_rgmii() argument
1533 struct mvpp2 *priv = port->priv; in mvpp22_gop_init_rgmii()
1541 if (port->gop_id == 2) { in mvpp22_gop_init_rgmii()
1543 } else if (port->gop_id == 3) { in mvpp22_gop_init_rgmii()
1551 if (port->phy_interface == PHY_INTERFACE_MODE_MII) in mvpp22_gop_init_rgmii()
1559 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) in mvpp22_gop_init_sgmii() argument
1561 struct mvpp2 *priv = port->priv; in mvpp22_gop_init_sgmii()
1569 if (port->gop_id > 1) { in mvpp22_gop_init_sgmii()
1571 if (port->gop_id == 2) in mvpp22_gop_init_sgmii()
1573 else if (port->gop_id == 3) in mvpp22_gop_init_sgmii()
1579 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) in mvpp22_gop_init_10gkr() argument
1581 struct mvpp2 *priv = port->priv; in mvpp22_gop_init_10gkr()
1582 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); in mvpp22_gop_init_10gkr()
1583 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); in mvpp22_gop_init_10gkr()
1602 static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en) in mvpp22_gop_fca_enable_periodic() argument
1604 struct mvpp2 *priv = port->priv; in mvpp22_gop_fca_enable_periodic()
1605 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); in mvpp22_gop_fca_enable_periodic()
1615 static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer) in mvpp22_gop_fca_set_timer() argument
1617 struct mvpp2 *priv = port->priv; in mvpp22_gop_fca_set_timer()
1618 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); in mvpp22_gop_fca_set_timer()
1629 * partner won't send traffic if port is in XOFF mode.
1631 static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port) in mvpp22_gop_fca_set_periodic_timer() argument
1635 timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER)) in mvpp22_gop_fca_set_periodic_timer()
1638 mvpp22_gop_fca_enable_periodic(port, false); in mvpp22_gop_fca_set_periodic_timer()
1640 mvpp22_gop_fca_set_timer(port, timer); in mvpp22_gop_fca_set_periodic_timer()
1642 mvpp22_gop_fca_enable_periodic(port, true); in mvpp22_gop_fca_set_periodic_timer()
1645 static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface) in mvpp22_gop_init() argument
1647 struct mvpp2 *priv = port->priv; in mvpp22_gop_init()
1659 if (!mvpp2_port_supports_rgmii(port)) in mvpp22_gop_init()
1661 mvpp22_gop_init_rgmii(port); in mvpp22_gop_init()
1666 mvpp22_gop_init_sgmii(port); in mvpp22_gop_init()
1670 if (!mvpp2_port_supports_xlg(port)) in mvpp22_gop_init()
1672 mvpp22_gop_init_10gkr(port); in mvpp22_gop_init()
1679 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | in mvpp22_gop_init()
1680 GENCONF_PORT_CTRL1_EN(port->gop_id); in mvpp22_gop_init()
1691 mvpp22_gop_fca_set_periodic_timer(port); in mvpp22_gop_init()
1697 netdev_err(port->dev, "Invalid port configuration\n"); in mvpp22_gop_init()
1701 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) in mvpp22_gop_unmask_irq() argument
1705 if (phy_interface_mode_is_rgmii(port->phy_interface) || in mvpp22_gop_unmask_irq()
1706 phy_interface_mode_is_8023z(port->phy_interface) || in mvpp22_gop_unmask_irq()
1707 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { in mvpp22_gop_unmask_irq()
1708 /* Enable the GMAC link status irq for this port */ in mvpp22_gop_unmask_irq()
1709 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); in mvpp22_gop_unmask_irq()
1711 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); in mvpp22_gop_unmask_irq()
1714 if (mvpp2_port_supports_xlg(port)) { in mvpp22_gop_unmask_irq()
1715 /* Enable the XLG/GIG irqs for this port */ in mvpp22_gop_unmask_irq()
1716 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); in mvpp22_gop_unmask_irq()
1717 if (mvpp2_is_xlg(port->phy_interface)) in mvpp22_gop_unmask_irq()
1721 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); in mvpp22_gop_unmask_irq()
1725 static void mvpp22_gop_mask_irq(struct mvpp2_port *port) in mvpp22_gop_mask_irq() argument
1729 if (mvpp2_port_supports_xlg(port)) { in mvpp22_gop_mask_irq()
1730 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); in mvpp22_gop_mask_irq()
1733 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); in mvpp22_gop_mask_irq()
1736 if (phy_interface_mode_is_rgmii(port->phy_interface) || in mvpp22_gop_mask_irq()
1737 phy_interface_mode_is_8023z(port->phy_interface) || in mvpp22_gop_mask_irq()
1738 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { in mvpp22_gop_mask_irq()
1739 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); in mvpp22_gop_mask_irq()
1741 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); in mvpp22_gop_mask_irq()
1745 static void mvpp22_gop_setup_irq(struct mvpp2_port *port) in mvpp22_gop_setup_irq() argument
1749 mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK, in mvpp22_gop_setup_irq()
1753 if (port->phylink || in mvpp22_gop_setup_irq()
1754 phy_interface_mode_is_rgmii(port->phy_interface) || in mvpp22_gop_setup_irq()
1755 phy_interface_mode_is_8023z(port->phy_interface) || in mvpp22_gop_setup_irq()
1756 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { in mvpp22_gop_setup_irq()
1757 val = readl(port->base + MVPP22_GMAC_INT_MASK); in mvpp22_gop_setup_irq()
1759 writel(val, port->base + MVPP22_GMAC_INT_MASK); in mvpp22_gop_setup_irq()
1762 if (mvpp2_port_supports_xlg(port)) { in mvpp22_gop_setup_irq()
1763 val = readl(port->base + MVPP22_XLG_INT_MASK); in mvpp22_gop_setup_irq()
1765 writel(val, port->base + MVPP22_XLG_INT_MASK); in mvpp22_gop_setup_irq()
1767 mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK, in mvpp22_gop_setup_irq()
1772 mvpp22_gop_unmask_irq(port); in mvpp22_gop_setup_irq()
1785 static int mvpp22_comphy_init(struct mvpp2_port *port, in mvpp22_comphy_init() argument
1790 if (!port->comphy) in mvpp22_comphy_init()
1793 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface); in mvpp22_comphy_init()
1797 return phy_power_on(port->comphy); in mvpp22_comphy_init()
1800 static void mvpp2_port_enable(struct mvpp2_port *port) in mvpp2_port_enable() argument
1804 if (mvpp2_port_supports_xlg(port) && in mvpp2_port_enable()
1805 mvpp2_is_xlg(port->phy_interface)) { in mvpp2_port_enable()
1806 val = readl(port->base + MVPP22_XLG_CTRL0_REG); in mvpp2_port_enable()
1809 writel(val, port->base + MVPP22_XLG_CTRL0_REG); in mvpp2_port_enable()
1811 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); in mvpp2_port_enable()
1814 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); in mvpp2_port_enable()
1818 static void mvpp2_port_disable(struct mvpp2_port *port) in mvpp2_port_disable() argument
1822 if (mvpp2_port_supports_xlg(port) && in mvpp2_port_disable()
1823 mvpp2_is_xlg(port->phy_interface)) { in mvpp2_port_disable()
1824 val = readl(port->base + MVPP22_XLG_CTRL0_REG); in mvpp2_port_disable()
1826 writel(val, port->base + MVPP22_XLG_CTRL0_REG); in mvpp2_port_disable()
1829 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); in mvpp2_port_disable()
1831 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); in mvpp2_port_disable()
1835 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) in mvpp2_port_periodic_xon_disable() argument
1839 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & in mvpp2_port_periodic_xon_disable()
1841 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); in mvpp2_port_periodic_xon_disable()
1844 /* Configure loopback port */
1845 static void mvpp2_port_loopback_set(struct mvpp2_port *port, in mvpp2_port_loopback_set() argument
1850 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); in mvpp2_port_loopback_set()
1863 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); in mvpp2_port_loopback_set()
1882 static u64 mvpp2_read_count(struct mvpp2_port *port, in mvpp2_read_count() argument
1887 val = readl(port->stats_base + counter->offset); in mvpp2_read_count()
1889 val += (u64)readl(port->stats_base + counter->offset + 4) << 32; in mvpp2_read_count()
1987 struct mvpp2_port *port = netdev_priv(netdev); in mvpp2_ethtool_get_strings() local
2005 for (q = 0; q < port->ntxqs; q++) { in mvpp2_ethtool_get_strings()
2013 for (q = 0; q < port->nrxqs; q++) { in mvpp2_ethtool_get_strings()
2030 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) in mvpp2_get_xdp_stats() argument
2046 cpu_stats = per_cpu_ptr(port->stats, cpu); in mvpp2_get_xdp_stats()
2068 static void mvpp2_read_stats(struct mvpp2_port *port) in mvpp2_read_stats() argument
2075 pstats = port->ethtool_stats; in mvpp2_read_stats()
2078 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]); in mvpp2_read_stats()
2081 *pstats++ += mvpp2_read(port->priv, in mvpp2_read_stats()
2083 4 * port->id); in mvpp2_read_stats()
2085 for (q = 0; q < port->ntxqs; q++) in mvpp2_read_stats()
2087 *pstats++ += mvpp2_read_index(port->priv, in mvpp2_read_stats()
2088 MVPP22_CTRS_TX_CTR(port->id, q), in mvpp2_read_stats()
2092 * driver's. We need to add the port->first_rxq offset. in mvpp2_read_stats()
2094 for (q = 0; q < port->nrxqs; q++) in mvpp2_read_stats()
2096 *pstats++ += mvpp2_read_index(port->priv, in mvpp2_read_stats()
2097 port->first_rxq + q, in mvpp2_read_stats()
2101 mvpp2_get_xdp_stats(port, &xdp_stats); in mvpp2_read_stats()
2135 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, in mvpp2_gather_hw_statistics() local
2138 mutex_lock(&port->gather_stats_lock); in mvpp2_gather_hw_statistics()
2140 mvpp2_read_stats(port); in mvpp2_gather_hw_statistics()
2145 cancel_delayed_work(&port->stats_work); in mvpp2_gather_hw_statistics()
2146 queue_delayed_work(port->priv->stats_queue, &port->stats_work, in mvpp2_gather_hw_statistics()
2149 mutex_unlock(&port->gather_stats_lock); in mvpp2_gather_hw_statistics()
2155 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_get_stats() local
2157 /* Update statistics for the given port, then take the lock to avoid in mvpp2_ethtool_get_stats()
2160 mvpp2_gather_hw_statistics(&port->stats_work.work); in mvpp2_ethtool_get_stats()
2162 mutex_lock(&port->gather_stats_lock); in mvpp2_ethtool_get_stats()
2163 memcpy(data, port->ethtool_stats, in mvpp2_ethtool_get_stats()
2164 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs)); in mvpp2_ethtool_get_stats()
2165 mutex_unlock(&port->gather_stats_lock); in mvpp2_ethtool_get_stats()
2170 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_get_sset_count() local
2173 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs); in mvpp2_ethtool_get_sset_count()
2178 static void mvpp2_mac_reset_assert(struct mvpp2_port *port) in mvpp2_mac_reset_assert() argument
2182 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | in mvpp2_mac_reset_assert()
2184 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); in mvpp2_mac_reset_assert()
2186 if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) { in mvpp2_mac_reset_assert()
2187 val = readl(port->base + MVPP22_XLG_CTRL0_REG) & in mvpp2_mac_reset_assert()
2189 writel(val, port->base + MVPP22_XLG_CTRL0_REG); in mvpp2_mac_reset_assert()
2193 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) in mvpp22_pcs_reset_assert() argument
2195 struct mvpp2 *priv = port->priv; in mvpp22_pcs_reset_assert()
2199 if (port->priv->hw_version == MVPP21 || port->gop_id != 0) in mvpp22_pcs_reset_assert()
2202 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); in mvpp22_pcs_reset_assert()
2203 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); in mvpp22_pcs_reset_assert()
2214 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port, in mvpp22_pcs_reset_deassert() argument
2217 struct mvpp2 *priv = port->priv; in mvpp22_pcs_reset_deassert()
2221 if (port->priv->hw_version == MVPP21 || port->gop_id != 0) in mvpp22_pcs_reset_deassert()
2224 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); in mvpp22_pcs_reset_deassert()
2225 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); in mvpp22_pcs_reset_deassert()
2246 /* Change maximum receive size of the port */
2247 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) in mvpp2_gmac_max_rx_size_set() argument
2251 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); in mvpp2_gmac_max_rx_size_set()
2253 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << in mvpp2_gmac_max_rx_size_set()
2255 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); in mvpp2_gmac_max_rx_size_set()
2258 /* Change maximum receive size of the port */
2259 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) in mvpp2_xlg_max_rx_size_set() argument
2263 val = readl(port->base + MVPP22_XLG_CTRL1_REG); in mvpp2_xlg_max_rx_size_set()
2265 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << in mvpp2_xlg_max_rx_size_set()
2267 writel(val, port->base + MVPP22_XLG_CTRL1_REG); in mvpp2_xlg_max_rx_size_set()
2270 /* Set defaults to the MVPP2 port */
2271 static void mvpp2_defaults_set(struct mvpp2_port *port) in mvpp2_defaults_set() argument
2275 if (port->priv->hw_version == MVPP21) { in mvpp2_defaults_set()
2277 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); in mvpp2_defaults_set()
2281 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); in mvpp2_defaults_set()
2285 tx_port_num = mvpp2_egress_port(port); in mvpp2_defaults_set()
2286 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, in mvpp2_defaults_set()
2288 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); in mvpp2_defaults_set()
2291 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); in mvpp2_defaults_set()
2295 mvpp2_write(port->priv, in mvpp2_defaults_set()
2301 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, in mvpp2_defaults_set()
2302 port->priv->tclk / USEC_PER_SEC); in mvpp2_defaults_set()
2303 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); in mvpp2_defaults_set()
2307 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); in mvpp2_defaults_set()
2309 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); in mvpp2_defaults_set()
2312 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), in mvpp2_defaults_set()
2317 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { in mvpp2_defaults_set()
2318 queue = port->rxqs[lrxq]->id; in mvpp2_defaults_set()
2319 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); in mvpp2_defaults_set()
2322 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); in mvpp2_defaults_set()
2326 mvpp2_interrupts_disable(port); in mvpp2_defaults_set()
2330 static void mvpp2_ingress_enable(struct mvpp2_port *port) in mvpp2_ingress_enable() argument
2335 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { in mvpp2_ingress_enable()
2336 queue = port->rxqs[lrxq]->id; in mvpp2_ingress_enable()
2337 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); in mvpp2_ingress_enable()
2339 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); in mvpp2_ingress_enable()
2343 static void mvpp2_ingress_disable(struct mvpp2_port *port) in mvpp2_ingress_disable() argument
2348 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { in mvpp2_ingress_disable()
2349 queue = port->rxqs[lrxq]->id; in mvpp2_ingress_disable()
2350 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); in mvpp2_ingress_disable()
2352 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); in mvpp2_ingress_disable()
2359 static void mvpp2_egress_enable(struct mvpp2_port *port) in mvpp2_egress_enable() argument
2363 int tx_port_num = mvpp2_egress_port(port); in mvpp2_egress_enable()
2367 for (queue = 0; queue < port->ntxqs; queue++) { in mvpp2_egress_enable()
2368 struct mvpp2_tx_queue *txq = port->txqs[queue]; in mvpp2_egress_enable()
2374 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); in mvpp2_egress_enable()
2375 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); in mvpp2_egress_enable()
2381 static void mvpp2_egress_disable(struct mvpp2_port *port) in mvpp2_egress_disable() argument
2385 int tx_port_num = mvpp2_egress_port(port); in mvpp2_egress_disable()
2388 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); in mvpp2_egress_disable()
2389 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & in mvpp2_egress_disable()
2392 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, in mvpp2_egress_disable()
2399 netdev_warn(port->dev, in mvpp2_egress_disable()
2407 /* Check port TX Command register that all in mvpp2_egress_disable()
2410 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); in mvpp2_egress_disable()
2418 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) in mvpp2_rxq_received() argument
2420 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); in mvpp2_rxq_received()
2429 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, in mvpp2_rxq_status_update() argument
2437 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); in mvpp2_rxq_status_update()
2452 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, in mvpp2_rxq_offset_set() argument
2460 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); in mvpp2_rxq_offset_set()
2467 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); in mvpp2_rxq_offset_set()
2487 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) in mvpp2_aggr_txq_pend_desc_add() argument
2490 mvpp2_thread_write(port->priv, in mvpp2_aggr_txq_pend_desc_add()
2491 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), in mvpp2_aggr_txq_pend_desc_add()
2501 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, in mvpp2_aggr_desc_num_check() argument
2507 mvpp2_cpu_to_thread(port->priv, smp_processor_id()); in mvpp2_aggr_desc_num_check()
2508 u32 val = mvpp2_read_relaxed(port->priv, in mvpp2_aggr_desc_num_check()
2525 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, in mvpp2_txq_alloc_reserved_desc() argument
2528 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); in mvpp2_txq_alloc_reserved_desc()
2529 struct mvpp2 *priv = port->priv; in mvpp2_txq_alloc_reserved_desc()
2543 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, in mvpp2_txq_reserved_desc_num_proc() argument
2560 for (thread = 0; thread < port->priv->nthreads; thread++) { in mvpp2_txq_reserved_desc_num_proc()
2575 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); in mvpp2_txq_reserved_desc_num_proc()
2635 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, in mvpp2_txq_sent_desc_proc() argument
2641 val = mvpp2_thread_read_relaxed(port->priv, in mvpp2_txq_sent_desc_proc()
2642 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), in mvpp2_txq_sent_desc_proc()
2654 struct mvpp2_port *port = arg; in mvpp2_txq_sent_counter_clear() local
2658 if (smp_processor_id() >= port->priv->nthreads) in mvpp2_txq_sent_counter_clear()
2661 for (queue = 0; queue < port->ntxqs; queue++) { in mvpp2_txq_sent_counter_clear()
2662 int id = port->txqs[queue]->id; in mvpp2_txq_sent_counter_clear()
2664 mvpp2_thread_read(port->priv, in mvpp2_txq_sent_counter_clear()
2665 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), in mvpp2_txq_sent_counter_clear()
2671 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) in mvpp2_txp_max_tx_size_set() argument
2676 mtu = port->pkt_size * 8; in mvpp2_txp_max_tx_size_set()
2684 tx_port_num = mvpp2_egress_port(port); in mvpp2_txp_max_tx_size_set()
2685 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); in mvpp2_txp_max_tx_size_set()
2688 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); in mvpp2_txp_max_tx_size_set()
2691 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); in mvpp2_txp_max_tx_size_set()
2694 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); in mvpp2_txp_max_tx_size_set()
2700 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); in mvpp2_txp_max_tx_size_set()
2703 for (txq = 0; txq < port->ntxqs; txq++) { in mvpp2_txp_max_tx_size_set()
2704 val = mvpp2_read(port->priv, in mvpp2_txp_max_tx_size_set()
2712 mvpp2_write(port->priv, in mvpp2_txp_max_tx_size_set()
2720 static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port, in mvpp2_set_rxq_free_tresh() argument
2725 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); in mvpp2_set_rxq_free_tresh()
2727 val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG); in mvpp2_set_rxq_free_tresh()
2730 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); in mvpp2_set_rxq_free_tresh()
2736 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, in mvpp2_rx_pkts_coal_set() argument
2739 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); in mvpp2_rx_pkts_coal_set()
2744 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); in mvpp2_rx_pkts_coal_set()
2745 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, in mvpp2_rx_pkts_coal_set()
2752 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, in mvpp2_tx_pkts_coal_set() argument
2764 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); in mvpp2_tx_pkts_coal_set()
2765 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); in mvpp2_tx_pkts_coal_set()
2788 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, in mvpp2_rx_time_coal_set() argument
2791 unsigned long freq = port->priv->tclk; in mvpp2_rx_time_coal_set()
2802 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); in mvpp2_rx_time_coal_set()
2805 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) in mvpp2_tx_time_coal_set() argument
2807 unsigned long freq = port->priv->tclk; in mvpp2_tx_time_coal_set()
2808 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); in mvpp2_tx_time_coal_set()
2811 port->tx_time_coal = in mvpp2_tx_time_coal_set()
2815 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); in mvpp2_tx_time_coal_set()
2818 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); in mvpp2_tx_time_coal_set()
2822 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, in mvpp2_txq_bufs_free() argument
2839 dma_unmap_single(port->dev->dev.parent, tx_buf->dma, in mvpp2_txq_bufs_free()
2854 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, in mvpp2_get_rx_queue() argument
2859 return port->rxqs[queue]; in mvpp2_get_rx_queue()
2862 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, in mvpp2_get_tx_queue() argument
2867 return port->txqs[queue]; in mvpp2_get_tx_queue()
2871 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, in mvpp2_txq_done() argument
2874 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); in mvpp2_txq_done()
2877 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) in mvpp2_txq_done()
2878 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); in mvpp2_txq_done()
2880 tx_done = mvpp2_txq_sent_desc_proc(port, txq); in mvpp2_txq_done()
2883 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); in mvpp2_txq_done()
2892 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, in mvpp2_tx_done() argument
2900 txq = mvpp2_get_tx_queue(port, cause); in mvpp2_tx_done()
2907 mvpp2_txq_done(port, txq, txq_pcpu); in mvpp2_tx_done()
2955 static int mvpp2_rxq_init(struct mvpp2_port *port, in mvpp2_rxq_init() argument
2958 struct mvpp2 *priv = port->priv; in mvpp2_rxq_init()
2963 rxq->size = port->rx_ring_size; in mvpp2_rxq_init()
2966 rxq->descs = dma_alloc_coherent(port->dev->dev.parent, in mvpp2_rxq_init()
2975 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); in mvpp2_rxq_init()
2978 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); in mvpp2_rxq_init()
2979 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); in mvpp2_rxq_init()
2980 if (port->priv->hw_version == MVPP21) in mvpp2_rxq_init()
2984 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); in mvpp2_rxq_init()
2985 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); in mvpp2_rxq_init()
2986 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); in mvpp2_rxq_init()
2990 mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM); in mvpp2_rxq_init()
2993 mvpp2_rx_pkts_coal_set(port, rxq); in mvpp2_rxq_init()
2994 mvpp2_rx_time_coal_set(port, rxq); in mvpp2_rxq_init()
2997 mvpp2_set_rxq_free_tresh(port, rxq); in mvpp2_rxq_init()
3000 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); in mvpp2_rxq_init()
3003 err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0); in mvpp2_rxq_init()
3007 err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0); in mvpp2_rxq_init()
3021 port->nrxqs]); in mvpp2_rxq_init()
3035 dma_free_coherent(port->dev->dev.parent, in mvpp2_rxq_init()
3042 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, in mvpp2_rxq_drop_pkts() argument
3047 rx_received = mvpp2_rxq_received(port, rxq->id); in mvpp2_rxq_drop_pkts()
3053 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); in mvpp2_rxq_drop_pkts()
3059 mvpp2_bm_pool_put(port, pool, in mvpp2_rxq_drop_pkts()
3060 mvpp2_rxdesc_dma_addr_get(port, rx_desc), in mvpp2_rxq_drop_pkts()
3061 mvpp2_rxdesc_cookie_get(port, rx_desc)); in mvpp2_rxq_drop_pkts()
3063 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); in mvpp2_rxq_drop_pkts()
3067 static void mvpp2_rxq_deinit(struct mvpp2_port *port, in mvpp2_rxq_deinit() argument
3078 mvpp2_rxq_drop_pkts(port, rxq); in mvpp2_rxq_deinit()
3081 dma_free_coherent(port->dev->dev.parent, in mvpp2_rxq_deinit()
3094 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); in mvpp2_rxq_deinit()
3095 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); in mvpp2_rxq_deinit()
3096 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); in mvpp2_rxq_deinit()
3097 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); in mvpp2_rxq_deinit()
3098 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); in mvpp2_rxq_deinit()
3103 static int mvpp2_txq_init(struct mvpp2_port *port, in mvpp2_txq_init() argument
3111 txq->size = port->tx_ring_size; in mvpp2_txq_init()
3114 txq->descs = dma_alloc_coherent(port->dev->dev.parent, in mvpp2_txq_init()
3123 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); in mvpp2_txq_init()
3124 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); in mvpp2_txq_init()
3125 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, in mvpp2_txq_init()
3127 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, in mvpp2_txq_init()
3129 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); in mvpp2_txq_init()
3130 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, in mvpp2_txq_init()
3132 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); in mvpp2_txq_init()
3134 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); in mvpp2_txq_init()
3138 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT in mvpp2_txq_init()
3142 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + in mvpp2_txq_init()
3145 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, in mvpp2_txq_init()
3151 tx_port_num = mvpp2_egress_port(port); in mvpp2_txq_init()
3152 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); in mvpp2_txq_init()
3154 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); in mvpp2_txq_init()
3158 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); in mvpp2_txq_init()
3161 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), in mvpp2_txq_init()
3164 for (thread = 0; thread < port->priv->nthreads; thread++) { in mvpp2_txq_init()
3183 dma_alloc_coherent(port->dev->dev.parent, in mvpp2_txq_init()
3195 static void mvpp2_txq_deinit(struct mvpp2_port *port, in mvpp2_txq_deinit() argument
3201 for (thread = 0; thread < port->priv->nthreads; thread++) { in mvpp2_txq_deinit()
3206 dma_free_coherent(port->dev->dev.parent, in mvpp2_txq_deinit()
3215 dma_free_coherent(port->dev->dev.parent, in mvpp2_txq_deinit()
3225 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); in mvpp2_txq_deinit()
3228 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); in mvpp2_txq_deinit()
3229 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); in mvpp2_txq_deinit()
3230 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); in mvpp2_txq_deinit()
3231 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); in mvpp2_txq_deinit()
3236 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) in mvpp2_txq_clean() argument
3240 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); in mvpp2_txq_clean()
3243 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); in mvpp2_txq_clean()
3244 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); in mvpp2_txq_clean()
3246 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); in mvpp2_txq_clean()
3254 netdev_warn(port->dev, in mvpp2_txq_clean()
3255 "port %d: cleaning queue %d timed out\n", in mvpp2_txq_clean()
3256 port->id, txq->log_id); in mvpp2_txq_clean()
3262 pending = mvpp2_thread_read(port->priv, thread, in mvpp2_txq_clean()
3268 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); in mvpp2_txq_clean()
3271 for (thread = 0; thread < port->priv->nthreads; thread++) { in mvpp2_txq_clean()
3275 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); in mvpp2_txq_clean()
3285 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) in mvpp2_cleanup_txqs() argument
3291 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); in mvpp2_cleanup_txqs()
3294 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); in mvpp2_cleanup_txqs()
3295 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); in mvpp2_cleanup_txqs()
3297 for (queue = 0; queue < port->ntxqs; queue++) { in mvpp2_cleanup_txqs()
3298 txq = port->txqs[queue]; in mvpp2_cleanup_txqs()
3299 mvpp2_txq_clean(port, txq); in mvpp2_cleanup_txqs()
3300 mvpp2_txq_deinit(port, txq); in mvpp2_cleanup_txqs()
3303 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); in mvpp2_cleanup_txqs()
3305 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); in mvpp2_cleanup_txqs()
3306 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); in mvpp2_cleanup_txqs()
3310 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) in mvpp2_cleanup_rxqs() argument
3314 for (queue = 0; queue < port->nrxqs; queue++) in mvpp2_cleanup_rxqs()
3315 mvpp2_rxq_deinit(port, port->rxqs[queue]); in mvpp2_cleanup_rxqs()
3317 if (port->tx_fc) in mvpp2_cleanup_rxqs()
3318 mvpp2_rxq_disable_fc(port); in mvpp2_cleanup_rxqs()
3321 /* Init all Rx queues for port */
3322 static int mvpp2_setup_rxqs(struct mvpp2_port *port) in mvpp2_setup_rxqs() argument
3326 for (queue = 0; queue < port->nrxqs; queue++) { in mvpp2_setup_rxqs()
3327 err = mvpp2_rxq_init(port, port->rxqs[queue]); in mvpp2_setup_rxqs()
3332 if (port->tx_fc) in mvpp2_setup_rxqs()
3333 mvpp2_rxq_enable_fc(port); in mvpp2_setup_rxqs()
3338 mvpp2_cleanup_rxqs(port); in mvpp2_setup_rxqs()
3342 /* Init all tx queues for port */
3343 static int mvpp2_setup_txqs(struct mvpp2_port *port) in mvpp2_setup_txqs() argument
3348 for (queue = 0; queue < port->ntxqs; queue++) { in mvpp2_setup_txqs()
3349 txq = port->txqs[queue]; in mvpp2_setup_txqs()
3350 err = mvpp2_txq_init(port, txq); in mvpp2_setup_txqs()
3356 netif_set_xps_queue(port->dev, cpumask_of(queue), queue); in mvpp2_setup_txqs()
3359 if (port->has_tx_irqs) { in mvpp2_setup_txqs()
3360 mvpp2_tx_time_coal_set(port); in mvpp2_setup_txqs()
3361 for (queue = 0; queue < port->ntxqs; queue++) { in mvpp2_setup_txqs()
3362 txq = port->txqs[queue]; in mvpp2_setup_txqs()
3363 mvpp2_tx_pkts_coal_set(port, txq); in mvpp2_setup_txqs()
3367 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); in mvpp2_setup_txqs()
3371 mvpp2_cleanup_txqs(port); in mvpp2_setup_txqs()
3375 /* The callback for per-port interrupt */
3387 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) in mvpp2_isr_handle_ptp_queue() argument
3396 ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); in mvpp2_isr_handle_ptp_queue()
3400 queue = &port->tx_hwtstamp_queue[nq]; in mvpp2_isr_handle_ptp_queue()
3417 mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps); in mvpp2_isr_handle_ptp_queue()
3424 static void mvpp2_isr_handle_ptp(struct mvpp2_port *port) in mvpp2_isr_handle_ptp() argument
3429 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); in mvpp2_isr_handle_ptp()
3432 mvpp2_isr_handle_ptp_queue(port, 0); in mvpp2_isr_handle_ptp()
3434 mvpp2_isr_handle_ptp_queue(port, 1); in mvpp2_isr_handle_ptp()
3437 static void mvpp2_isr_handle_link(struct mvpp2_port *port, in mvpp2_isr_handle_link() argument
3440 struct net_device *dev = port->dev; in mvpp2_isr_handle_link()
3442 if (port->phylink) { in mvpp2_isr_handle_link()
3451 mvpp2_interrupts_enable(port); in mvpp2_isr_handle_link()
3453 mvpp2_egress_enable(port); in mvpp2_isr_handle_link()
3454 mvpp2_ingress_enable(port); in mvpp2_isr_handle_link()
3460 mvpp2_ingress_disable(port); in mvpp2_isr_handle_link()
3461 mvpp2_egress_disable(port); in mvpp2_isr_handle_link()
3463 mvpp2_interrupts_disable(port); in mvpp2_isr_handle_link()
3467 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port) in mvpp2_isr_handle_xlg() argument
3472 val = readl(port->base + MVPP22_XLG_INT_STAT); in mvpp2_isr_handle_xlg()
3474 val = readl(port->base + MVPP22_XLG_STATUS); in mvpp2_isr_handle_xlg()
3476 mvpp2_isr_handle_link(port, &port->pcs_xlg, link); in mvpp2_isr_handle_xlg()
3480 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port) in mvpp2_isr_handle_gmac_internal() argument
3485 if (phy_interface_mode_is_rgmii(port->phy_interface) || in mvpp2_isr_handle_gmac_internal()
3486 phy_interface_mode_is_8023z(port->phy_interface) || in mvpp2_isr_handle_gmac_internal()
3487 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { in mvpp2_isr_handle_gmac_internal()
3488 val = readl(port->base + MVPP22_GMAC_INT_STAT); in mvpp2_isr_handle_gmac_internal()
3490 val = readl(port->base + MVPP2_GMAC_STATUS0); in mvpp2_isr_handle_gmac_internal()
3492 mvpp2_isr_handle_link(port, &port->pcs_gmac, link); in mvpp2_isr_handle_gmac_internal()
3497 /* Per-port interrupt for link status changes */
3500 struct mvpp2_port *port = (struct mvpp2_port *)dev_id; in mvpp2_port_isr() local
3503 mvpp22_gop_mask_irq(port); in mvpp2_port_isr()
3505 if (mvpp2_port_supports_xlg(port) && in mvpp2_port_isr()
3506 mvpp2_is_xlg(port->phy_interface)) { in mvpp2_port_isr()
3508 val = readl(port->base + MVPP22_XLG_EXT_INT_STAT); in mvpp2_port_isr()
3510 mvpp2_isr_handle_xlg(port); in mvpp2_port_isr()
3512 mvpp2_isr_handle_ptp(port); in mvpp2_port_isr()
3517 val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT); in mvpp2_port_isr()
3519 mvpp2_isr_handle_gmac_internal(port); in mvpp2_port_isr()
3521 mvpp2_isr_handle_ptp(port); in mvpp2_port_isr()
3524 mvpp22_gop_unmask_irq(port); in mvpp2_port_isr()
3531 struct mvpp2_port *port; in mvpp2_hr_timer_cb() local
3542 port = netdev_priv(dev); in mvpp2_hr_timer_cb()
3545 cause = (1 << port->ntxqs) - 1; in mvpp2_hr_timer_cb()
3546 tx_todo = mvpp2_tx_done(port, cause, in mvpp2_hr_timer_cb()
3547 mvpp2_cpu_to_thread(port->priv, smp_processor_id())); in mvpp2_hr_timer_cb()
3563 static void mvpp2_rx_error(struct mvpp2_port *port, in mvpp2_rx_error() argument
3566 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); in mvpp2_rx_error()
3567 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); in mvpp2_rx_error()
3582 netdev_err(port->dev, in mvpp2_rx_error()
3588 static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status) in mvpp2_rx_csum() argument
3602 static int mvpp2_rx_refill(struct mvpp2_port *port, in mvpp2_rx_refill() argument
3610 buf = mvpp2_buf_alloc(port, bm_pool, page_pool, in mvpp2_rx_refill()
3615 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); in mvpp2_rx_refill()
3621 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) in mvpp2_skb_tx_csum() argument
3652 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte) in mvpp2_xdp_finish_tx() argument
3654 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); in mvpp2_xdp_finish_tx()
3660 txq = port->txqs[txq_id]; in mvpp2_xdp_finish_tx()
3662 nq = netdev_get_tx_queue(port->dev, txq_id); in mvpp2_xdp_finish_tx()
3663 aggr_txq = &port->priv->aggr_txqs[thread]; in mvpp2_xdp_finish_tx()
3671 mvpp2_aggr_txq_pend_desc_add(port, nxmit); in mvpp2_xdp_finish_tx()
3677 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) in mvpp2_xdp_finish_tx()
3678 mvpp2_txq_done(port, txq, txq_pcpu); in mvpp2_xdp_finish_tx()
3682 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id, in mvpp2_xdp_submit_frame() argument
3685 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); in mvpp2_xdp_submit_frame()
3696 txq = port->txqs[txq_id]; in mvpp2_xdp_submit_frame()
3698 aggr_txq = &port->priv->aggr_txqs[thread]; in mvpp2_xdp_submit_frame()
3701 if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) || in mvpp2_xdp_submit_frame()
3702 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) { in mvpp2_xdp_submit_frame()
3709 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); in mvpp2_xdp_submit_frame()
3710 mvpp2_txdesc_size_set(port, tx_desc, xdpf->len); in mvpp2_xdp_submit_frame()
3714 dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data, in mvpp2_xdp_submit_frame()
3717 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { in mvpp2_xdp_submit_frame()
3730 dma_sync_single_for_device(port->dev->dev.parent, dma_addr, in mvpp2_xdp_submit_frame()
3736 mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr); in mvpp2_xdp_submit_frame()
3738 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); in mvpp2_xdp_submit_frame()
3739 mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type); in mvpp2_xdp_submit_frame()
3746 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) in mvpp2_xdp_xmit_back() argument
3748 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); in mvpp2_xdp_xmit_back()
3760 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); in mvpp2_xdp_xmit_back()
3762 ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false); in mvpp2_xdp_xmit_back()
3770 mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len); in mvpp2_xdp_xmit_back()
3784 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_xdp_xmit() local
3790 if (unlikely(test_bit(0, &port->state))) in mvpp2_xdp_xmit()
3799 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); in mvpp2_xdp_xmit()
3802 ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true); in mvpp2_xdp_xmit()
3811 mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte); in mvpp2_xdp_xmit()
3813 stats = this_cpu_ptr(port->stats); in mvpp2_xdp_xmit()
3825 mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog, in mvpp2_run_xdp() argument
3846 err = xdp_do_redirect(port->dev, xdp, prog); in mvpp2_run_xdp()
3857 ret = mvpp2_xdp_xmit_back(port, xdp); in mvpp2_run_xdp()
3864 bpf_warn_invalid_xdp_action(port->dev, prog, act); in mvpp2_run_xdp()
3867 trace_xdp_exception(port->dev, prog, act); in mvpp2_run_xdp()
3880 static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc, in mvpp2_buff_hdr_pool_put() argument
3887 phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); in mvpp2_buff_hdr_pool_put()
3888 dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); in mvpp2_buff_hdr_pool_put()
3896 if (port->priv->hw_version >= MVPP22) { in mvpp2_buff_hdr_pool_put()
3901 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); in mvpp2_buff_hdr_pool_put()
3910 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, in mvpp2_rx() argument
3913 struct net_device *dev = port->dev; in mvpp2_rx()
3922 xdp_prog = READ_ONCE(port->xdp_prog); in mvpp2_rx()
3925 rx_received = mvpp2_rxq_received(port, rxq->id); in mvpp2_rx()
3942 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); in mvpp2_rx()
3948 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); in mvpp2_rx()
3949 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); in mvpp2_rx()
3951 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); in mvpp2_rx()
3955 bm_pool = &port->priv->bm_pools[pool]; in mvpp2_rx()
3957 if (port->priv->percpu_pools) { in mvpp2_rx()
3958 pp = port->priv->page_pool[pool]; in mvpp2_rx()
4001 ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps); in mvpp2_rx()
4005 err = mvpp2_rx_refill(port, bm_pool, pp, pool); in mvpp2_rx()
4007 netdev_err(port->dev, "failed to refill BM pools\n"); in mvpp2_rx()
4022 netdev_warn(port->dev, "skb build failed\n"); in mvpp2_rx()
4029 if (mvpp22_rx_hwtstamping(port)) { in mvpp2_rx()
4031 mvpp22_tai_tstamp(port->priv->tai, timestamp, in mvpp2_rx()
4035 err = mvpp2_rx_refill(port, bm_pool, pp, pool); in mvpp2_rx()
4037 netdev_err(port->dev, "failed to refill BM pools\n"); in mvpp2_rx()
4054 skb->ip_summed = mvpp2_rx_csum(port, rx_status); in mvpp2_rx()
4062 mvpp2_rx_error(port, rx_desc); in mvpp2_rx()
4065 mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status); in mvpp2_rx()
4067 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); in mvpp2_rx()
4074 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); in mvpp2_rx()
4088 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); in mvpp2_rx()
4094 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, in tx_desc_unmap_put() argument
4097 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); in tx_desc_unmap_put()
4101 mvpp2_txdesc_dma_addr_get(port, desc); in tx_desc_unmap_put()
4103 mvpp2_txdesc_size_get(port, desc); in tx_desc_unmap_put()
4105 dma_unmap_single(port->dev->dev.parent, buf_dma_addr, in tx_desc_unmap_put()
4110 static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port, in mvpp2_txdesc_clear_ptp() argument
4114 if (port->priv->hw_version >= MVPP22) in mvpp2_txdesc_clear_ptp()
4119 static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port, in mvpp2_tx_hw_tstamp() argument
4128 if (port->priv->hw_version == MVPP21 || in mvpp2_tx_hw_tstamp()
4129 port->tx_hwtstamp_type == HWTSTAMP_TX_OFF) in mvpp2_tx_hw_tstamp()
4144 queue = &port->tx_hwtstamp_queue[0]; in mvpp2_tx_hw_tstamp()
4157 queue = &port->tx_hwtstamp_queue[1]; in mvpp2_tx_hw_tstamp()
4199 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, in mvpp2_tx_frag_process() argument
4203 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); in mvpp2_tx_frag_process()
4214 mvpp2_txdesc_clear_ptp(port, tx_desc); in mvpp2_tx_frag_process()
4215 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); in mvpp2_tx_frag_process()
4216 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); in mvpp2_tx_frag_process()
4218 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, in mvpp2_tx_frag_process()
4221 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { in mvpp2_tx_frag_process()
4226 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); in mvpp2_tx_frag_process()
4230 mvpp2_txdesc_cmd_set(port, tx_desc, in mvpp2_tx_frag_process()
4232 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); in mvpp2_tx_frag_process()
4235 mvpp2_txdesc_cmd_set(port, tx_desc, 0); in mvpp2_tx_frag_process()
4236 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); in mvpp2_tx_frag_process()
4247 tx_desc_unmap_put(port, txq, tx_desc); in mvpp2_tx_frag_process()
4260 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_tso_put_hdr() local
4264 mvpp2_txdesc_clear_ptp(port, tx_desc); in mvpp2_tso_put_hdr()
4265 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); in mvpp2_tso_put_hdr()
4266 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); in mvpp2_tso_put_hdr()
4270 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); in mvpp2_tso_put_hdr()
4272 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | in mvpp2_tso_put_hdr()
4275 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); in mvpp2_tso_put_hdr()
4285 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_tso_put_data() local
4289 mvpp2_txdesc_clear_ptp(port, tx_desc); in mvpp2_tso_put_data()
4290 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); in mvpp2_tso_put_data()
4291 mvpp2_txdesc_size_set(port, tx_desc, sz); in mvpp2_tso_put_data()
4300 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); in mvpp2_tso_put_data()
4303 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); in mvpp2_tso_put_data()
4305 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); in mvpp2_tso_put_data()
4309 mvpp2_txdesc_cmd_set(port, tx_desc, 0); in mvpp2_tso_put_data()
4312 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); in mvpp2_tso_put_data()
4321 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_tx_tso() local
4326 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || in mvpp2_tx_tso()
4327 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, in mvpp2_tx_tso()
4362 tx_desc_unmap_put(port, txq, tx_desc); in mvpp2_tx_tso()
4370 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_tx() local
4381 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); in mvpp2_tx()
4384 txq = port->txqs[txq_id]; in mvpp2_tx()
4386 aggr_txq = &port->priv->aggr_txqs[thread]; in mvpp2_tx()
4388 if (test_bit(thread, &port->priv->lock_map)) in mvpp2_tx()
4389 spin_lock_irqsave(&port->tx_lock[thread], flags); in mvpp2_tx()
4398 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || in mvpp2_tx()
4399 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { in mvpp2_tx()
4407 !mvpp2_tx_hw_tstamp(port, tx_desc, skb)) in mvpp2_tx()
4408 mvpp2_txdesc_clear_ptp(port, tx_desc); in mvpp2_tx()
4409 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); in mvpp2_tx()
4410 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); in mvpp2_tx()
4420 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); in mvpp2_tx()
4422 tx_cmd = mvpp2_skb_tx_csum(port, skb); in mvpp2_tx()
4427 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); in mvpp2_tx()
4428 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); in mvpp2_tx()
4432 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); in mvpp2_tx()
4433 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); in mvpp2_tx()
4436 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { in mvpp2_tx()
4437 tx_desc_unmap_put(port, txq, tx_desc); in mvpp2_tx()
4444 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); in mvpp2_tx()
4453 mvpp2_aggr_txq_pend_desc_add(port, frags); in mvpp2_tx()
4468 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) in mvpp2_tx()
4469 mvpp2_txq_done(port, txq, txq_pcpu); in mvpp2_tx()
4472 if (!port->has_tx_irqs && txq_pcpu->count <= frags && in mvpp2_tx()
4474 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); in mvpp2_tx()
4484 if (test_bit(thread, &port->priv->lock_map)) in mvpp2_tx()
4485 spin_unlock_irqrestore(&port->tx_lock[thread], flags); in mvpp2_tx()
4504 struct mvpp2_port *port = netdev_priv(napi->dev); in mvpp2_poll() local
4506 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); in mvpp2_poll()
4520 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, in mvpp2_poll()
4521 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); in mvpp2_poll()
4525 mvpp2_cause_error(port->dev, cause_misc); in mvpp2_poll()
4528 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); in mvpp2_poll()
4529 mvpp2_thread_write(port->priv, thread, in mvpp2_poll()
4530 MVPP2_ISR_RX_TX_CAUSE_REG(port->id), in mvpp2_poll()
4534 if (port->has_tx_irqs) { in mvpp2_poll()
4538 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); in mvpp2_poll()
4544 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); in mvpp2_poll()
4551 rxq = mvpp2_get_rx_queue(port, cause_rx); in mvpp2_poll()
4555 count = mvpp2_rx(port, napi, budget, rxq); in mvpp2_poll()
4577 static void mvpp22_mode_reconfigure(struct mvpp2_port *port, in mvpp22_mode_reconfigure() argument
4583 mvpp2_mac_reset_assert(port); in mvpp22_mode_reconfigure()
4586 mvpp22_pcs_reset_assert(port); in mvpp22_mode_reconfigure()
4589 mvpp22_comphy_init(port, interface); in mvpp22_mode_reconfigure()
4592 mvpp22_gop_init(port, interface); in mvpp22_mode_reconfigure()
4594 mvpp22_pcs_reset_deassert(port, interface); in mvpp22_mode_reconfigure()
4596 if (mvpp2_port_supports_xlg(port)) { in mvpp22_mode_reconfigure()
4597 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); in mvpp22_mode_reconfigure()
4605 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); in mvpp22_mode_reconfigure()
4608 if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface)) in mvpp22_mode_reconfigure()
4609 mvpp2_xlg_max_rx_size_set(port); in mvpp22_mode_reconfigure()
4611 mvpp2_gmac_max_rx_size_set(port); in mvpp22_mode_reconfigure()
4614 /* Set hw internals when starting port */
4615 static void mvpp2_start_dev(struct mvpp2_port *port) in mvpp2_start_dev() argument
4619 mvpp2_txp_max_tx_size_set(port); in mvpp2_start_dev()
4621 for (i = 0; i < port->nqvecs; i++) in mvpp2_start_dev()
4622 napi_enable(&port->qvecs[i].napi); in mvpp2_start_dev()
4625 mvpp2_interrupts_enable(port); in mvpp2_start_dev()
4627 if (port->priv->hw_version >= MVPP22) in mvpp2_start_dev()
4628 mvpp22_mode_reconfigure(port, port->phy_interface); in mvpp2_start_dev()
4630 if (port->phylink) { in mvpp2_start_dev()
4631 phylink_start(port->phylink); in mvpp2_start_dev()
4633 mvpp2_acpi_start(port); in mvpp2_start_dev()
4636 netif_tx_start_all_queues(port->dev); in mvpp2_start_dev()
4638 clear_bit(0, &port->state); in mvpp2_start_dev()
4641 /* Set hw internals when stopping port */
4642 static void mvpp2_stop_dev(struct mvpp2_port *port) in mvpp2_stop_dev() argument
4646 set_bit(0, &port->state); in mvpp2_stop_dev()
4649 mvpp2_interrupts_disable(port); in mvpp2_stop_dev()
4651 for (i = 0; i < port->nqvecs; i++) in mvpp2_stop_dev()
4652 napi_disable(&port->qvecs[i].napi); in mvpp2_stop_dev()
4654 if (port->phylink) in mvpp2_stop_dev()
4655 phylink_stop(port->phylink); in mvpp2_stop_dev()
4656 phy_power_off(port->comphy); in mvpp2_stop_dev()
4701 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) in mvpp21_get_mac_address() argument
4705 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); in mvpp21_get_mac_address()
4706 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); in mvpp21_get_mac_address()
4707 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); in mvpp21_get_mac_address()
4716 static int mvpp2_irqs_init(struct mvpp2_port *port) in mvpp2_irqs_init() argument
4720 for (i = 0; i < port->nqvecs; i++) { in mvpp2_irqs_init()
4721 struct mvpp2_queue_vector *qv = port->qvecs + i; in mvpp2_irqs_init()
4733 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); in mvpp2_irqs_init()
4741 if (mvpp2_cpu_to_thread(port->priv, cpu) == in mvpp2_irqs_init()
4752 for (i = 0; i < port->nqvecs; i++) { in mvpp2_irqs_init()
4753 struct mvpp2_queue_vector *qv = port->qvecs + i; in mvpp2_irqs_init()
4764 static void mvpp2_irqs_deinit(struct mvpp2_port *port) in mvpp2_irqs_deinit() argument
4768 for (i = 0; i < port->nqvecs; i++) { in mvpp2_irqs_deinit()
4769 struct mvpp2_queue_vector *qv = port->qvecs + i; in mvpp2_irqs_deinit()
4779 static bool mvpp22_rss_is_supported(struct mvpp2_port *port) in mvpp22_rss_is_supported() argument
4782 !(port->flags & MVPP2_F_LOOPBACK); in mvpp22_rss_is_supported()
4787 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_open() local
4788 struct mvpp2 *priv = port->priv; in mvpp2_open()
4794 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); in mvpp2_open()
4799 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); in mvpp2_open()
4804 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); in mvpp2_open()
4809 err = mvpp2_prs_def_flow(port); in mvpp2_open()
4816 err = mvpp2_setup_rxqs(port); in mvpp2_open()
4818 netdev_err(port->dev, "cannot allocate Rx queues\n"); in mvpp2_open()
4822 err = mvpp2_setup_txqs(port); in mvpp2_open()
4824 netdev_err(port->dev, "cannot allocate Tx queues\n"); in mvpp2_open()
4828 err = mvpp2_irqs_init(port); in mvpp2_open()
4830 netdev_err(port->dev, "cannot init IRQs\n"); in mvpp2_open()
4834 if (port->phylink) { in mvpp2_open()
4835 err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0); in mvpp2_open()
4837 netdev_err(port->dev, "could not attach PHY (%d)\n", in mvpp2_open()
4845 if (priv->hw_version >= MVPP22 && port->port_irq) { in mvpp2_open()
4846 err = request_irq(port->port_irq, mvpp2_port_isr, 0, in mvpp2_open()
4847 dev->name, port); in mvpp2_open()
4849 netdev_err(port->dev, in mvpp2_open()
4850 "cannot request port link/ptp IRQ %d\n", in mvpp2_open()
4851 port->port_irq); in mvpp2_open()
4855 mvpp22_gop_setup_irq(port); in mvpp2_open()
4858 netif_carrier_off(port->dev); in mvpp2_open()
4862 port->port_irq = 0; in mvpp2_open()
4866 netdev_err(port->dev, in mvpp2_open()
4873 on_each_cpu(mvpp2_interrupts_unmask, port, 1); in mvpp2_open()
4874 mvpp2_shared_interrupt_mask_unmask(port, false); in mvpp2_open()
4876 mvpp2_start_dev(port); in mvpp2_open()
4879 queue_delayed_work(priv->stats_queue, &port->stats_work, in mvpp2_open()
4885 mvpp2_irqs_deinit(port); in mvpp2_open()
4887 mvpp2_cleanup_txqs(port); in mvpp2_open()
4889 mvpp2_cleanup_rxqs(port); in mvpp2_open()
4895 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_stop() local
4899 mvpp2_stop_dev(port); in mvpp2_stop()
4902 on_each_cpu(mvpp2_interrupts_mask, port, 1); in mvpp2_stop()
4903 mvpp2_shared_interrupt_mask_unmask(port, true); in mvpp2_stop()
4905 if (port->phylink) in mvpp2_stop()
4906 phylink_disconnect_phy(port->phylink); in mvpp2_stop()
4907 if (port->port_irq) in mvpp2_stop()
4908 free_irq(port->port_irq, port); in mvpp2_stop()
4910 mvpp2_irqs_deinit(port); in mvpp2_stop()
4911 if (!port->has_tx_irqs) { in mvpp2_stop()
4912 for (thread = 0; thread < port->priv->nthreads; thread++) { in mvpp2_stop()
4913 port_pcpu = per_cpu_ptr(port->pcpu, thread); in mvpp2_stop()
4919 mvpp2_cleanup_rxqs(port); in mvpp2_stop()
4920 mvpp2_cleanup_txqs(port); in mvpp2_stop()
4922 cancel_delayed_work_sync(&port->stats_work); in mvpp2_stop()
4924 mvpp2_mac_reset_assert(port); in mvpp2_stop()
4925 mvpp22_pcs_reset_assert(port); in mvpp2_stop()
4930 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, in mvpp2_prs_mac_da_accept_list() argument
4937 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); in mvpp2_prs_mac_da_accept_list()
4945 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) in mvpp2_set_rx_promisc() argument
4947 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) in mvpp2_set_rx_promisc()
4948 mvpp2_prs_vid_enable_filtering(port); in mvpp2_set_rx_promisc()
4950 mvpp2_prs_vid_disable_filtering(port); in mvpp2_set_rx_promisc()
4952 mvpp2_prs_mac_promisc_set(port->priv, port->id, in mvpp2_set_rx_promisc()
4955 mvpp2_prs_mac_promisc_set(port->priv, port->id, in mvpp2_set_rx_promisc()
4961 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_set_rx_mode() local
4964 mvpp2_prs_mac_del_all(port); in mvpp2_set_rx_mode()
4967 mvpp2_set_rx_promisc(port, true); in mvpp2_set_rx_mode()
4971 mvpp2_set_rx_promisc(port, false); in mvpp2_set_rx_mode()
4974 mvpp2_prs_mac_da_accept_list(port, &dev->uc)) in mvpp2_set_rx_mode()
4975 mvpp2_prs_mac_promisc_set(port->priv, port->id, in mvpp2_set_rx_mode()
4979 mvpp2_prs_mac_promisc_set(port->priv, port->id, in mvpp2_set_rx_mode()
4985 mvpp2_prs_mac_da_accept_list(port, &dev->mc)) in mvpp2_set_rx_mode()
4986 mvpp2_prs_mac_promisc_set(port->priv, port->id, in mvpp2_set_rx_mode()
5014 struct mvpp2_port *port = NULL; in mvpp2_bm_switch_buffers() local
5018 port = priv->port_list[i]; in mvpp2_bm_switch_buffers()
5019 status[i] = netif_running(port->dev); in mvpp2_bm_switch_buffers()
5021 mvpp2_stop(port->dev); in mvpp2_bm_switch_buffers()
5026 numbufs = port->nrxqs * 2; in mvpp2_bm_switch_buffers()
5032 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]); in mvpp2_bm_switch_buffers()
5034 devm_kfree(port->dev->dev.parent, priv->bm_pools); in mvpp2_bm_switch_buffers()
5036 mvpp2_bm_init(port->dev->dev.parent, priv); in mvpp2_bm_switch_buffers()
5039 port = priv->port_list[i]; in mvpp2_bm_switch_buffers()
5040 if (percpu && port->ntxqs >= num_possible_cpus() * 2) in mvpp2_bm_switch_buffers()
5041 xdp_set_features_flag(port->dev, in mvpp2_bm_switch_buffers()
5046 xdp_clear_features_flag(port->dev); in mvpp2_bm_switch_buffers()
5048 mvpp2_swf_bm_pool_init(port); in mvpp2_bm_switch_buffers()
5050 mvpp2_open(port->dev); in mvpp2_bm_switch_buffers()
5061 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_change_mtu() local
5063 struct mvpp2 *priv = port->priv; in mvpp2_change_mtu()
5072 if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { in mvpp2_change_mtu()
5088 if (priv->port_list[i] != port && in mvpp2_change_mtu()
5095 /* No port is using jumbo frames */ in mvpp2_change_mtu()
5097 dev_info(port->dev->dev.parent, in mvpp2_change_mtu()
5104 mvpp2_stop_dev(port); in mvpp2_change_mtu()
5112 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); in mvpp2_change_mtu()
5116 mvpp2_start_dev(port); in mvpp2_change_mtu()
5117 mvpp2_egress_enable(port); in mvpp2_change_mtu()
5118 mvpp2_ingress_enable(port); in mvpp2_change_mtu()
5124 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port) in mvpp2_check_pagepool_dma() argument
5127 struct mvpp2 *priv = port->priv; in mvpp2_check_pagepool_dma()
5137 port = priv->port_list[i]; in mvpp2_check_pagepool_dma()
5138 if (port->xdp_prog) { in mvpp2_check_pagepool_dma()
5154 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_get_stats64() local
5165 cpu_stats = per_cpu_ptr(port->stats, cpu); in mvpp2_get_stats64()
5185 static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) in mvpp2_set_ts_config() argument
5198 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); in mvpp2_set_ts_config()
5213 mvpp22_tai_start(port->priv->tai); in mvpp2_set_ts_config()
5221 port->rx_hwtstamp = true; in mvpp2_set_ts_config()
5223 port->rx_hwtstamp = false; in mvpp2_set_ts_config()
5235 mvpp22_tai_stop(port->priv->tai); in mvpp2_set_ts_config()
5237 port->tx_hwtstamp_type = config.tx_type; in mvpp2_set_ts_config()
5245 static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) in mvpp2_get_ts_config() argument
5251 config.tx_type = port->tx_hwtstamp_type; in mvpp2_get_ts_config()
5252 config.rx_filter = port->rx_hwtstamp ? in mvpp2_get_ts_config()
5264 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_get_ts_info() local
5266 if (!port->hwtstamp) in mvpp2_ethtool_get_ts_info()
5269 info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai); in mvpp2_ethtool_get_ts_info()
5284 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ioctl() local
5288 if (port->hwtstamp) in mvpp2_ioctl()
5289 return mvpp2_set_ts_config(port, ifr); in mvpp2_ioctl()
5293 if (port->hwtstamp) in mvpp2_ioctl()
5294 return mvpp2_get_ts_config(port, ifr); in mvpp2_ioctl()
5298 if (!port->phylink) in mvpp2_ioctl()
5301 return phylink_mii_ioctl(port->phylink, ifr, cmd); in mvpp2_ioctl()
5306 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_vlan_rx_add_vid() local
5309 ret = mvpp2_prs_vid_entry_add(port, vid); in mvpp2_vlan_rx_add_vid()
5311 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", in mvpp2_vlan_rx_add_vid()
5318 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_vlan_rx_kill_vid() local
5320 mvpp2_prs_vid_entry_remove(port, vid); in mvpp2_vlan_rx_kill_vid()
5328 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_set_features() local
5332 mvpp2_prs_vid_enable_filtering(port); in mvpp2_set_features()
5335 * port in mvpp2_set_features()
5337 mvpp2_prs_vid_remove_all(port); in mvpp2_set_features()
5339 mvpp2_prs_vid_disable_filtering(port); in mvpp2_set_features()
5345 mvpp22_port_rss_enable(port); in mvpp2_set_features()
5347 mvpp22_port_rss_disable(port); in mvpp2_set_features()
5353 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) in mvpp2_xdp_setup() argument
5356 bool running = netif_running(port->dev); in mvpp2_xdp_setup()
5357 bool reset = !prog != !port->xdp_prog; in mvpp2_xdp_setup()
5359 if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { in mvpp2_xdp_setup()
5364 if (!port->priv->percpu_pools) { in mvpp2_xdp_setup()
5369 if (port->ntxqs < num_possible_cpus() * 2) { in mvpp2_xdp_setup()
5376 mvpp2_stop(port->dev); in mvpp2_xdp_setup()
5378 old_prog = xchg(&port->xdp_prog, prog); in mvpp2_xdp_setup()
5388 mvpp2_open(port->dev); in mvpp2_xdp_setup()
5391 mvpp2_check_pagepool_dma(port); in mvpp2_xdp_setup()
5398 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_xdp() local
5402 return mvpp2_xdp_setup(port, xdp); in mvpp2_xdp()
5412 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_nway_reset() local
5414 if (!port->phylink) in mvpp2_ethtool_nway_reset()
5417 return phylink_ethtool_nway_reset(port->phylink); in mvpp2_ethtool_nway_reset()
5427 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_set_coalesce() local
5430 for (queue = 0; queue < port->nrxqs; queue++) { in mvpp2_ethtool_set_coalesce()
5431 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; in mvpp2_ethtool_set_coalesce()
5435 mvpp2_rx_pkts_coal_set(port, rxq); in mvpp2_ethtool_set_coalesce()
5436 mvpp2_rx_time_coal_set(port, rxq); in mvpp2_ethtool_set_coalesce()
5439 if (port->has_tx_irqs) { in mvpp2_ethtool_set_coalesce()
5440 port->tx_time_coal = c->tx_coalesce_usecs; in mvpp2_ethtool_set_coalesce()
5441 mvpp2_tx_time_coal_set(port); in mvpp2_ethtool_set_coalesce()
5444 for (queue = 0; queue < port->ntxqs; queue++) { in mvpp2_ethtool_set_coalesce()
5445 struct mvpp2_tx_queue *txq = port->txqs[queue]; in mvpp2_ethtool_set_coalesce()
5449 if (port->has_tx_irqs) in mvpp2_ethtool_set_coalesce()
5450 mvpp2_tx_pkts_coal_set(port, txq); in mvpp2_ethtool_set_coalesce()
5463 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_get_coalesce() local
5465 c->rx_coalesce_usecs = port->rxqs[0]->time_coal; in mvpp2_ethtool_get_coalesce()
5466 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; in mvpp2_ethtool_get_coalesce()
5467 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; in mvpp2_ethtool_get_coalesce()
5468 c->tx_coalesce_usecs = port->tx_time_coal; in mvpp2_ethtool_get_coalesce()
5489 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_get_ringparam() local
5493 ring->rx_pending = port->rx_ring_size; in mvpp2_ethtool_get_ringparam()
5494 ring->tx_pending = port->tx_ring_size; in mvpp2_ethtool_get_ringparam()
5503 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_set_ringparam() local
5504 u16 prev_rx_ring_size = port->rx_ring_size; in mvpp2_ethtool_set_ringparam()
5505 u16 prev_tx_ring_size = port->tx_ring_size; in mvpp2_ethtool_set_ringparam()
5513 port->rx_ring_size = ring->rx_pending; in mvpp2_ethtool_set_ringparam()
5514 port->tx_ring_size = ring->tx_pending; in mvpp2_ethtool_set_ringparam()
5521 mvpp2_stop_dev(port); in mvpp2_ethtool_set_ringparam()
5522 mvpp2_cleanup_rxqs(port); in mvpp2_ethtool_set_ringparam()
5523 mvpp2_cleanup_txqs(port); in mvpp2_ethtool_set_ringparam()
5525 port->rx_ring_size = ring->rx_pending; in mvpp2_ethtool_set_ringparam()
5526 port->tx_ring_size = ring->tx_pending; in mvpp2_ethtool_set_ringparam()
5528 err = mvpp2_setup_rxqs(port); in mvpp2_ethtool_set_ringparam()
5531 port->rx_ring_size = prev_rx_ring_size; in mvpp2_ethtool_set_ringparam()
5533 err = mvpp2_setup_rxqs(port); in mvpp2_ethtool_set_ringparam()
5537 err = mvpp2_setup_txqs(port); in mvpp2_ethtool_set_ringparam()
5540 port->tx_ring_size = prev_tx_ring_size; in mvpp2_ethtool_set_ringparam()
5542 err = mvpp2_setup_txqs(port); in mvpp2_ethtool_set_ringparam()
5547 mvpp2_start_dev(port); in mvpp2_ethtool_set_ringparam()
5548 mvpp2_egress_enable(port); in mvpp2_ethtool_set_ringparam()
5549 mvpp2_ingress_enable(port); in mvpp2_ethtool_set_ringparam()
5554 mvpp2_cleanup_rxqs(port); in mvpp2_ethtool_set_ringparam()
5563 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_get_pause_param() local
5565 if (!port->phylink) in mvpp2_ethtool_get_pause_param()
5568 phylink_ethtool_get_pauseparam(port->phylink, pause); in mvpp2_ethtool_get_pause_param()
5574 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_set_pause_param() local
5576 if (!port->phylink) in mvpp2_ethtool_set_pause_param()
5579 return phylink_ethtool_set_pauseparam(port->phylink, pause); in mvpp2_ethtool_set_pause_param()
5585 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_get_link_ksettings() local
5587 if (!port->phylink) in mvpp2_ethtool_get_link_ksettings()
5590 return phylink_ethtool_ksettings_get(port->phylink, cmd); in mvpp2_ethtool_get_link_ksettings()
5596 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_set_link_ksettings() local
5598 if (!port->phylink) in mvpp2_ethtool_set_link_ksettings()
5601 return phylink_ethtool_ksettings_set(port->phylink, cmd); in mvpp2_ethtool_set_link_ksettings()
5607 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_get_rxnfc() local
5610 if (!mvpp22_rss_is_supported(port)) in mvpp2_ethtool_get_rxnfc()
5615 ret = mvpp2_ethtool_rxfh_get(port, info); in mvpp2_ethtool_get_rxnfc()
5618 info->data = port->nrxqs; in mvpp2_ethtool_get_rxnfc()
5621 info->rule_cnt = port->n_rfs_rules; in mvpp2_ethtool_get_rxnfc()
5624 ret = mvpp2_ethtool_cls_rule_get(port, info); in mvpp2_ethtool_get_rxnfc()
5633 if (port->rfs_rules[i]) in mvpp2_ethtool_get_rxnfc()
5647 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_set_rxnfc() local
5650 if (!mvpp22_rss_is_supported(port)) in mvpp2_ethtool_set_rxnfc()
5655 ret = mvpp2_ethtool_rxfh_set(port, info); in mvpp2_ethtool_set_rxnfc()
5658 ret = mvpp2_ethtool_cls_rule_ins(port, info); in mvpp2_ethtool_set_rxnfc()
5661 ret = mvpp2_ethtool_cls_rule_del(port, info); in mvpp2_ethtool_set_rxnfc()
5671 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_get_rxfh_indir_size() local
5673 return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0; in mvpp2_ethtool_get_rxfh_indir_size()
5679 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_ethtool_get_rxfh() local
5683 if (!mvpp22_rss_is_supported(port)) in mvpp2_ethtool_get_rxfh()
5691 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, in mvpp2_ethtool_get_rxfh()
5697 static bool mvpp2_ethtool_rxfh_okay(struct mvpp2_port *port, in mvpp2_ethtool_rxfh_okay() argument
5700 if (!mvpp22_rss_is_supported(port)) in mvpp2_ethtool_rxfh_okay()
5718 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_create_rxfh_context() local
5721 if (!mvpp2_ethtool_rxfh_okay(port, rxfh)) in mvpp2_create_rxfh_context()
5726 ret = mvpp22_port_rss_ctx_create(port, rxfh->rss_context); in mvpp2_create_rxfh_context()
5731 ret = mvpp22_port_rss_ctx_indir_get(port, rxfh->rss_context, in mvpp2_create_rxfh_context()
5734 ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context, in mvpp2_create_rxfh_context()
5744 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_modify_rxfh_context() local
5747 if (!mvpp2_ethtool_rxfh_okay(port, rxfh)) in mvpp2_modify_rxfh_context()
5751 ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context, in mvpp2_modify_rxfh_context()
5761 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_remove_rxfh_context() local
5763 return mvpp22_port_rss_ctx_delete(port, rss_context); in mvpp2_remove_rxfh_context()
5821 * had a single IRQ defined per-port.
5823 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, in mvpp2_simple_queue_vectors_init() argument
5826 struct mvpp2_queue_vector *v = &port->qvecs[0]; in mvpp2_simple_queue_vectors_init()
5829 v->nrxqs = port->nrxqs; in mvpp2_simple_queue_vectors_init()
5833 v->port = port; in mvpp2_simple_queue_vectors_init()
5837 netif_napi_add(port->dev, &v->napi, mvpp2_poll); in mvpp2_simple_queue_vectors_init()
5839 port->nqvecs = 1; in mvpp2_simple_queue_vectors_init()
5844 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, in mvpp2_multi_queue_vectors_init() argument
5847 struct mvpp2 *priv = port->priv; in mvpp2_multi_queue_vectors_init()
5853 port->nqvecs = priv->nthreads + 1; in mvpp2_multi_queue_vectors_init()
5856 port->nqvecs = priv->nthreads; in mvpp2_multi_queue_vectors_init()
5860 for (i = 0; i < port->nqvecs; i++) { in mvpp2_multi_queue_vectors_init()
5863 v = port->qvecs + i; in mvpp2_multi_queue_vectors_init()
5865 v->port = port; in mvpp2_multi_queue_vectors_init()
5870 if (port->flags & MVPP2_F_DT_COMPAT) in mvpp2_multi_queue_vectors_init()
5879 i == (port->nqvecs - 1)) { in mvpp2_multi_queue_vectors_init()
5881 v->nrxqs = port->nrxqs; in mvpp2_multi_queue_vectors_init()
5884 if (port->flags & MVPP2_F_DT_COMPAT) in mvpp2_multi_queue_vectors_init()
5891 v->irq = fwnode_irq_get(port->fwnode, i); in mvpp2_multi_queue_vectors_init()
5897 netif_napi_add(port->dev, &v->napi, mvpp2_poll); in mvpp2_multi_queue_vectors_init()
5903 for (i = 0; i < port->nqvecs; i++) in mvpp2_multi_queue_vectors_init()
5904 irq_dispose_mapping(port->qvecs[i].irq); in mvpp2_multi_queue_vectors_init()
5908 static int mvpp2_queue_vectors_init(struct mvpp2_port *port, in mvpp2_queue_vectors_init() argument
5911 if (port->has_tx_irqs) in mvpp2_queue_vectors_init()
5912 return mvpp2_multi_queue_vectors_init(port, port_node); in mvpp2_queue_vectors_init()
5914 return mvpp2_simple_queue_vectors_init(port, port_node); in mvpp2_queue_vectors_init()
5917 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) in mvpp2_queue_vectors_deinit() argument
5921 for (i = 0; i < port->nqvecs; i++) in mvpp2_queue_vectors_deinit()
5922 irq_dispose_mapping(port->qvecs[i].irq); in mvpp2_queue_vectors_deinit()
5925 /* Configure Rx queue group interrupt for this port */
5926 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) in mvpp2_rx_irqs_setup() argument
5928 struct mvpp2 *priv = port->priv; in mvpp2_rx_irqs_setup()
5933 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), in mvpp2_rx_irqs_setup()
5934 port->nrxqs); in mvpp2_rx_irqs_setup()
5939 for (i = 0; i < port->nqvecs; i++) { in mvpp2_rx_irqs_setup()
5940 struct mvpp2_queue_vector *qv = port->qvecs + i; in mvpp2_rx_irqs_setup()
5946 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; in mvpp2_rx_irqs_setup()
5955 /* Initialize port HW */
5956 static int mvpp2_port_init(struct mvpp2_port *port) in mvpp2_port_init() argument
5958 struct device *dev = port->dev->dev.parent; in mvpp2_port_init()
5959 struct mvpp2 *priv = port->priv; in mvpp2_port_init()
5965 if (port->first_rxq + port->nrxqs > in mvpp2_port_init()
5969 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) in mvpp2_port_init()
5972 /* Disable port */ in mvpp2_port_init()
5973 mvpp2_egress_disable(port); in mvpp2_port_init()
5974 mvpp2_port_disable(port); in mvpp2_port_init()
5976 if (mvpp2_is_xlg(port->phy_interface)) { in mvpp2_port_init()
5977 val = readl(port->base + MVPP22_XLG_CTRL0_REG); in mvpp2_port_init()
5980 writel(val, port->base + MVPP22_XLG_CTRL0_REG); in mvpp2_port_init()
5982 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); in mvpp2_port_init()
5985 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); in mvpp2_port_init()
5988 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; in mvpp2_port_init()
5990 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), in mvpp2_port_init()
5992 if (!port->txqs) in mvpp2_port_init()
5995 /* Associate physical Tx queues to this port and initialize. in mvpp2_port_init()
5998 for (queue = 0; queue < port->ntxqs; queue++) { in mvpp2_port_init()
5999 int queue_phy_id = mvpp2_txq_phys(port->id, queue); in mvpp2_port_init()
6022 port->txqs[queue] = txq; in mvpp2_port_init()
6025 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), in mvpp2_port_init()
6027 if (!port->rxqs) { in mvpp2_port_init()
6032 /* Allocate and initialize Rx queue for this port */ in mvpp2_port_init()
6033 for (queue = 0; queue < port->nrxqs; queue++) { in mvpp2_port_init()
6036 /* Map physical Rx queue to port's logical Rx queue */ in mvpp2_port_init()
6043 rxq->id = port->first_rxq + queue; in mvpp2_port_init()
6044 rxq->port = port->id; in mvpp2_port_init()
6047 port->rxqs[queue] = rxq; in mvpp2_port_init()
6050 mvpp2_rx_irqs_setup(port); in mvpp2_port_init()
6053 for (queue = 0; queue < port->nrxqs; queue++) { in mvpp2_port_init()
6054 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; in mvpp2_port_init()
6056 rxq->size = port->rx_ring_size; in mvpp2_port_init()
6061 mvpp2_ingress_disable(port); in mvpp2_port_init()
6063 /* Port default configuration */ in mvpp2_port_init()
6064 mvpp2_defaults_set(port); in mvpp2_port_init()
6066 /* Port's classifier configuration */ in mvpp2_port_init()
6067 mvpp2_cls_oversize_rxq_set(port); in mvpp2_port_init()
6068 mvpp2_cls_port_config(port); in mvpp2_port_init()
6070 if (mvpp22_rss_is_supported(port)) in mvpp2_port_init()
6071 mvpp22_port_rss_init(port); in mvpp2_port_init()
6074 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); in mvpp2_port_init()
6077 err = mvpp2_swf_bm_pool_init(port); in mvpp2_port_init()
6081 /* Clear all port stats */ in mvpp2_port_init()
6082 mvpp2_read_stats(port); in mvpp2_port_init()
6083 memset(port->ethtool_stats, 0, in mvpp2_port_init()
6084 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64)); in mvpp2_port_init()
6089 for (queue = 0; queue < port->ntxqs; queue++) { in mvpp2_port_init()
6090 if (!port->txqs[queue]) in mvpp2_port_init()
6092 free_percpu(port->txqs[queue]->pcpu); in mvpp2_port_init()
6113 /* Checks if the port dt description has the required Tx interrupts:
6152 struct mvpp2_port *port = netdev_priv(dev); in mvpp2_port_copy_mac_addr() local
6164 mvpp21_get_mac_address(port, hw_mac_addr); in mvpp2_port_copy_mac_addr()
6206 struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs); in mvpp2_xlg_pcs_get_state() local
6209 if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER) in mvpp2_xlg_pcs_get_state()
6216 val = readl(port->base + MVPP22_XLG_STATUS); in mvpp2_xlg_pcs_get_state()
6220 val = readl(port->base + MVPP22_XLG_CTRL0_REG); in mvpp2_xlg_pcs_get_state()
6258 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); in mvpp2_gmac_pcs_get_state() local
6261 val = readl(port->base + MVPP2_GMAC_STATUS0); in mvpp2_gmac_pcs_get_state()
6267 switch (port->phy_interface) { in mvpp2_gmac_pcs_get_state()
6295 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); in mvpp2_gmac_pcs_config() local
6337 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); in mvpp2_gmac_pcs_config()
6341 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); in mvpp2_gmac_pcs_config()
6349 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); in mvpp2_gmac_pcs_an_restart() local
6350 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); in mvpp2_gmac_pcs_an_restart()
6353 port->base + MVPP2_GMAC_AUTONEG_CONFIG); in mvpp2_gmac_pcs_an_restart()
6355 port->base + MVPP2_GMAC_AUTONEG_CONFIG); in mvpp2_gmac_pcs_an_restart()
6365 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, in mvpp2_xlg_config() argument
6370 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, in mvpp2_xlg_config()
6373 mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG, in mvpp2_xlg_config()
6381 val = readl(port->base + MVPP22_XLG_CTRL0_REG); in mvpp2_xlg_config()
6385 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, in mvpp2_gmac_config() argument
6392 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); in mvpp2_gmac_config()
6393 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); in mvpp2_gmac_config()
6394 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); in mvpp2_gmac_config()
6399 /* Configure port type */ in mvpp2_gmac_config()
6437 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); in mvpp2_gmac_config()
6439 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); in mvpp2_gmac_config()
6441 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); in mvpp2_gmac_config()
6447 struct mvpp2_port *port = mvpp2_phylink_to_port(config); in mvpp2_select_pcs() local
6454 return &port->pcs_xlg; in mvpp2_select_pcs()
6456 return &port->pcs_gmac; in mvpp2_select_pcs()
6462 struct mvpp2_port *port = mvpp2_phylink_to_port(config); in mvpp2_mac_prepare() local
6465 if (mvpp2_is_xlg(interface) && port->gop_id != 0) { in mvpp2_mac_prepare()
6466 netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name); in mvpp2_mac_prepare()
6470 if (port->phy_interface != interface || in mvpp2_mac_prepare()
6478 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, in mvpp2_mac_prepare()
6483 if (mvpp2_port_supports_xlg(port)) in mvpp2_mac_prepare()
6484 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, in mvpp2_mac_prepare()
6490 /* Make sure the port is disabled when reconfiguring the mode */ in mvpp2_mac_prepare()
6491 mvpp2_port_disable(port); in mvpp2_mac_prepare()
6493 if (port->phy_interface != interface) { in mvpp2_mac_prepare()
6495 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, in mvpp2_mac_prepare()
6499 if (port->priv->hw_version >= MVPP22) { in mvpp2_mac_prepare()
6500 mvpp22_gop_mask_irq(port); in mvpp2_mac_prepare()
6502 phy_power_off(port->comphy); in mvpp2_mac_prepare()
6505 mvpp22_mode_reconfigure(port, interface); in mvpp2_mac_prepare()
6515 struct mvpp2_port *port = mvpp2_phylink_to_port(config); in mvpp2_mac_config() local
6519 mvpp2_xlg_config(port, mode, state); in mvpp2_mac_config()
6523 mvpp2_gmac_config(port, mode, state); in mvpp2_mac_config()
6525 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) in mvpp2_mac_config()
6526 mvpp2_port_loopback_set(port, state); in mvpp2_mac_config()
6532 struct mvpp2_port *port = mvpp2_phylink_to_port(config); in mvpp2_mac_finish() local
6534 if (port->priv->hw_version >= MVPP22 && in mvpp2_mac_finish()
6535 port->phy_interface != interface) { in mvpp2_mac_finish()
6536 port->phy_interface = interface; in mvpp2_mac_finish()
6539 mvpp22_gop_unmask_irq(port); in mvpp2_mac_finish()
6544 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, in mvpp2_mac_finish()
6547 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & in mvpp2_mac_finish()
6552 mvpp2_port_enable(port); in mvpp2_mac_finish()
6559 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, in mvpp2_mac_finish()
6563 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, in mvpp2_mac_finish()
6577 struct mvpp2_port *port = mvpp2_phylink_to_port(config); in mvpp2_mac_link_up() local
6589 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, in mvpp2_mac_link_up()
6607 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, in mvpp2_mac_link_up()
6625 mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG, in mvpp2_mac_link_up()
6630 if (port->priv->global_tx_fc) { in mvpp2_mac_link_up()
6631 port->tx_fc = tx_pause; in mvpp2_mac_link_up()
6633 mvpp2_rxq_enable_fc(port); in mvpp2_mac_link_up()
6635 mvpp2_rxq_disable_fc(port); in mvpp2_mac_link_up()
6636 if (port->priv->percpu_pools) { in mvpp2_mac_link_up()
6637 for (i = 0; i < port->nrxqs; i++) in mvpp2_mac_link_up()
6638 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause); in mvpp2_mac_link_up()
6640 mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause); in mvpp2_mac_link_up()
6641 mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause); in mvpp2_mac_link_up()
6643 if (port->priv->hw_version == MVPP23) in mvpp2_mac_link_up()
6644 mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause); in mvpp2_mac_link_up()
6647 mvpp2_port_enable(port); in mvpp2_mac_link_up()
6649 mvpp2_egress_enable(port); in mvpp2_mac_link_up()
6650 mvpp2_ingress_enable(port); in mvpp2_mac_link_up()
6651 netif_tx_wake_all_queues(port->dev); in mvpp2_mac_link_up()
6657 struct mvpp2_port *port = mvpp2_phylink_to_port(config); in mvpp2_mac_link_down() local
6662 val = readl(port->base + MVPP22_XLG_CTRL0_REG); in mvpp2_mac_link_down()
6665 writel(val, port->base + MVPP22_XLG_CTRL0_REG); in mvpp2_mac_link_down()
6667 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); in mvpp2_mac_link_down()
6670 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); in mvpp2_mac_link_down()
6674 netif_tx_stop_all_queues(port->dev); in mvpp2_mac_link_down()
6675 mvpp2_egress_disable(port); in mvpp2_mac_link_down()
6676 mvpp2_ingress_disable(port); in mvpp2_mac_link_down()
6678 mvpp2_port_disable(port); in mvpp2_mac_link_down()
6691 static void mvpp2_acpi_start(struct mvpp2_port *port) in mvpp2_acpi_start() argument
6698 .interface = port->phy_interface, in mvpp2_acpi_start()
6702 pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface); in mvpp2_acpi_start()
6704 mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND, in mvpp2_acpi_start()
6705 port->phy_interface); in mvpp2_acpi_start()
6706 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); in mvpp2_acpi_start()
6708 port->phy_interface, state.advertising, in mvpp2_acpi_start()
6710 mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND, in mvpp2_acpi_start()
6711 port->phy_interface); in mvpp2_acpi_start()
6712 mvpp2_mac_link_up(&port->phylink_config, NULL, in mvpp2_acpi_start()
6713 MLO_AN_INBAND, port->phy_interface, in mvpp2_acpi_start()
6717 /* In order to ensure backward compatibility for ACPI, check if the port
6736 struct mvpp2_port *port; in mvpp2_port_probe() local
6760 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); in mvpp2_port_probe()
6791 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { in mvpp2_port_probe()
6793 dev_err(&pdev->dev, "missing port-id value\n"); in mvpp2_port_probe()
6802 port = netdev_priv(dev); in mvpp2_port_probe()
6803 port->dev = dev; in mvpp2_port_probe()
6804 port->fwnode = port_fwnode; in mvpp2_port_probe()
6805 port->ntxqs = ntxqs; in mvpp2_port_probe()
6806 port->nrxqs = nrxqs; in mvpp2_port_probe()
6807 port->priv = priv; in mvpp2_port_probe()
6808 port->has_tx_irqs = has_tx_irqs; in mvpp2_port_probe()
6809 port->flags = flags; in mvpp2_port_probe()
6811 err = mvpp2_queue_vectors_init(port, port_node); in mvpp2_port_probe()
6816 port->port_irq = of_irq_get_byname(port_node, "link"); in mvpp2_port_probe()
6818 port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); in mvpp2_port_probe()
6819 if (port->port_irq == -EPROBE_DEFER) { in mvpp2_port_probe()
6823 if (port->port_irq <= 0) in mvpp2_port_probe()
6825 port->port_irq = 0; in mvpp2_port_probe()
6828 port->flags |= MVPP2_F_LOOPBACK; in mvpp2_port_probe()
6830 port->id = id; in mvpp2_port_probe()
6832 port->first_rxq = port->id * port->nrxqs; in mvpp2_port_probe()
6834 port->first_rxq = port->id * priv->max_port_rxqs; in mvpp2_port_probe()
6836 port->of_node = port_node; in mvpp2_port_probe()
6837 port->phy_interface = phy_mode; in mvpp2_port_probe()
6838 port->comphy = comphy; in mvpp2_port_probe()
6841 port->base = devm_platform_ioremap_resource(pdev, 2 + id); in mvpp2_port_probe()
6842 if (IS_ERR(port->base)) { in mvpp2_port_probe()
6843 err = PTR_ERR(port->base); in mvpp2_port_probe()
6847 port->stats_base = port->priv->lms_base + in mvpp2_port_probe()
6849 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; in mvpp2_port_probe()
6851 if (fwnode_property_read_u32(port_fwnode, "gop-port-id", in mvpp2_port_probe()
6852 &port->gop_id)) { in mvpp2_port_probe()
6854 dev_err(&pdev->dev, "missing gop-port-id value\n"); in mvpp2_port_probe()
6858 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); in mvpp2_port_probe()
6859 port->stats_base = port->priv->iface_base + in mvpp2_port_probe()
6861 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; in mvpp2_port_probe()
6867 port->hwtstamp = true; in mvpp2_port_probe()
6871 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); in mvpp2_port_probe()
6872 if (!port->stats) { in mvpp2_port_probe()
6877 port->ethtool_stats = devm_kcalloc(&pdev->dev, in mvpp2_port_probe()
6880 if (!port->ethtool_stats) { in mvpp2_port_probe()
6885 mutex_init(&port->gather_stats_lock); in mvpp2_port_probe()
6886 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); in mvpp2_port_probe()
6892 port->tx_ring_size = MVPP2_MAX_TXD_DFLT; in mvpp2_port_probe()
6893 port->rx_ring_size = MVPP2_MAX_RXD_DFLT; in mvpp2_port_probe()
6896 err = mvpp2_port_init(port); in mvpp2_port_probe()
6898 dev_err(&pdev->dev, "failed to init port %d\n", id); in mvpp2_port_probe()
6902 mvpp2_port_periodic_xon_disable(port); in mvpp2_port_probe()
6904 mvpp2_mac_reset_assert(port); in mvpp2_port_probe()
6905 mvpp22_pcs_reset_assert(port); in mvpp2_port_probe()
6907 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); in mvpp2_port_probe()
6908 if (!port->pcpu) { in mvpp2_port_probe()
6913 if (!port->has_tx_irqs) { in mvpp2_port_probe()
6915 port_pcpu = per_cpu_ptr(port->pcpu, thread); in mvpp2_port_probe()
6931 if (mvpp22_rss_is_supported(port)) { in mvpp2_port_probe()
6936 if (!port->priv->percpu_pools) in mvpp2_port_probe()
6937 mvpp2_set_hw_csum(port, port->pool_long->id); in mvpp2_port_probe()
6938 else if (port->ntxqs >= num_possible_cpus() * 2) in mvpp2_port_probe()
6953 dev->dev_port = port->id; in mvpp2_port_probe()
6955 port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; in mvpp2_port_probe()
6956 port->pcs_gmac.neg_mode = true; in mvpp2_port_probe()
6957 port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; in mvpp2_port_probe()
6958 port->pcs_xlg.neg_mode = true; in mvpp2_port_probe()
6961 port->phylink_config.dev = &dev->dev; in mvpp2_port_probe()
6962 port->phylink_config.type = PHYLINK_NETDEV; in mvpp2_port_probe()
6963 port->phylink_config.mac_capabilities = in mvpp2_port_probe()
6966 if (port->priv->global_tx_fc) in mvpp2_port_probe()
6967 port->phylink_config.mac_capabilities |= in mvpp2_port_probe()
6970 if (mvpp2_port_supports_xlg(port)) { in mvpp2_port_probe()
6976 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
6978 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
6980 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
6983 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
6986 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
6989 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
6993 port->phylink_config.mac_capabilities |= in mvpp2_port_probe()
6996 port->phylink_config.mac_capabilities |= in mvpp2_port_probe()
6999 port->phylink_config.mac_capabilities |= in mvpp2_port_probe()
7003 if (mvpp2_port_supports_rgmii(port)) { in mvpp2_port_probe()
7004 phy_interface_set_rgmii(port->phylink_config.supported_interfaces); in mvpp2_port_probe()
7006 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
7014 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
7016 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
7018 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
7022 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
7028 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
7030 port->phylink_config.supported_interfaces); in mvpp2_port_probe()
7033 phylink = phylink_create(&port->phylink_config, port_fwnode, in mvpp2_port_probe()
7039 port->phylink = phylink; in mvpp2_port_probe()
7041 dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id); in mvpp2_port_probe()
7042 port->phylink = NULL; in mvpp2_port_probe()
7045 /* Cycle the comphy to power it down, saving 270mW per port - in mvpp2_port_probe()
7049 if (port->comphy) { in mvpp2_port_probe()
7050 err = mvpp22_comphy_init(port, port->phy_interface); in mvpp2_port_probe()
7052 phy_power_off(port->comphy); in mvpp2_port_probe()
7062 priv->port_list[priv->port_count++] = port; in mvpp2_port_probe()
7067 if (port->phylink) in mvpp2_port_probe()
7068 phylink_destroy(port->phylink); in mvpp2_port_probe()
7070 free_percpu(port->pcpu); in mvpp2_port_probe()
7072 for (i = 0; i < port->ntxqs; i++) in mvpp2_port_probe()
7073 free_percpu(port->txqs[i]->pcpu); in mvpp2_port_probe()
7075 free_percpu(port->stats); in mvpp2_port_probe()
7077 if (port->port_irq) in mvpp2_port_probe()
7078 irq_dispose_mapping(port->port_irq); in mvpp2_port_probe()
7080 mvpp2_queue_vectors_deinit(port); in mvpp2_port_probe()
7087 static void mvpp2_port_remove(struct mvpp2_port *port) in mvpp2_port_remove() argument
7091 unregister_netdev(port->dev); in mvpp2_port_remove()
7092 if (port->phylink) in mvpp2_port_remove()
7093 phylink_destroy(port->phylink); in mvpp2_port_remove()
7094 free_percpu(port->pcpu); in mvpp2_port_remove()
7095 free_percpu(port->stats); in mvpp2_port_remove()
7096 for (i = 0; i < port->ntxqs; i++) in mvpp2_port_remove()
7097 free_percpu(port->txqs[i]->pcpu); in mvpp2_port_remove()
7098 mvpp2_queue_vectors_deinit(port); in mvpp2_port_remove()
7099 if (port->port_irq) in mvpp2_port_remove()
7100 irq_dispose_mapping(port->port_irq); in mvpp2_port_remove()
7101 free_netdev(port->dev); in mvpp2_port_remove()
7140 int port; in mvpp2_rx_fifo_init() local
7142 for (port = 0; port < MVPP2_MAX_PORTS; port++) { in mvpp2_rx_fifo_init()
7143 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), in mvpp2_rx_fifo_init()
7145 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), in mvpp2_rx_fifo_init()
7154 static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size) in mvpp22_rx_fifo_set_hw() argument
7158 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size); in mvpp22_rx_fifo_set_hw()
7159 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size); in mvpp22_rx_fifo_set_hw()
7163 * 4kB fixed space must be assigned for the loopback port.
7165 * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
7173 int port, size; in mvpp22_rx_fifo_init() local
7181 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) in mvpp22_rx_fifo_init()
7182 mvpp22_rx_fifo_set_hw(priv, port, 0); in mvpp22_rx_fifo_init()
7188 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { in mvpp22_rx_fifo_init()
7191 else if (port == 0) in mvpp22_rx_fifo_init()
7194 else if (port == 1) in mvpp22_rx_fifo_init()
7203 mvpp22_rx_fifo_set_hw(priv, port, size); in mvpp22_rx_fifo_init()
7214 int port, val; in mvpp23_rx_fifo_fc_set_tresh() local
7216 /* Port 0: maximum speed -10Gb/s port in mvpp23_rx_fifo_fc_set_tresh()
7218 * Port 1: maximum speed -5Gb/s port in mvpp23_rx_fifo_fc_set_tresh()
7220 * Port 2: maximum speed -1Gb/s port in mvpp23_rx_fifo_fc_set_tresh()
7224 /* Without loopback port */ in mvpp23_rx_fifo_fc_set_tresh()
7225 for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) { in mvpp23_rx_fifo_fc_set_tresh()
7226 if (port == 0) { in mvpp23_rx_fifo_fc_set_tresh()
7230 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); in mvpp23_rx_fifo_fc_set_tresh()
7231 } else if (port == 1) { in mvpp23_rx_fifo_fc_set_tresh()
7235 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); in mvpp23_rx_fifo_fc_set_tresh()
7240 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); in mvpp23_rx_fifo_fc_set_tresh()
7246 void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en) in mvpp23_rx_fifo_fc_en() argument
7250 val = mvpp2_read(priv, MVPP2_RX_FC_REG(port)); in mvpp23_rx_fifo_fc_en()
7257 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); in mvpp23_rx_fifo_fc_en()
7260 static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size) in mvpp22_tx_fifo_set_hw() argument
7264 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); in mvpp22_tx_fifo_set_hw()
7265 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold); in mvpp22_tx_fifo_set_hw()
7269 * 1kB fixed space must be assigned for the loopback port.
7272 * per single port).
7279 int port, size; in mvpp22_tx_fifo_init() local
7287 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) in mvpp22_tx_fifo_init()
7288 mvpp22_tx_fifo_set_hw(priv, port, 0); in mvpp22_tx_fifo_init()
7294 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { in mvpp22_tx_fifo_init()
7298 else if (port == 0) in mvpp22_tx_fifo_init()
7306 mvpp22_tx_fifo_set_hw(priv, port, size); in mvpp22_tx_fifo_init()
7636 if (!fwnode_property_read_u32(port_fwnode, "port-id", &i)) in mvpp2_probe()