1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /* Applied Micro X-Gene SoC Ethernet Driver
3   *
4   * Copyright (c) 2014, Applied Micro Circuits Corporation
5   * Authors: Iyappan Subramanian <isubramanian@apm.com>
6   *	    Ravi Patel <rapatel@apm.com>
7   *	    Keyur Chudgar <kchudgar@apm.com>
8   */
9  
10  #include "xgene_enet_main.h"
11  #include "xgene_enet_hw.h"
12  
xgene_enet_ring_init(struct xgene_enet_desc_ring * ring)13  static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
14  {
15  	u32 *ring_cfg = ring->state;
16  	u64 addr = ring->dma;
17  	enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
18  
19  	ring_cfg[4] |= (1 << SELTHRSH_POS) &
20  			CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
21  	ring_cfg[3] |= ACCEPTLERR;
22  	ring_cfg[2] |= QCOHERENT;
23  
24  	addr >>= 8;
25  	ring_cfg[2] |= (addr << RINGADDRL_POS) &
26  			CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
27  	addr >>= RINGADDRL_LEN;
28  	ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
29  	ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
30  			CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
31  }
32  
xgene_enet_ring_set_type(struct xgene_enet_desc_ring * ring)33  static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
34  {
35  	u32 *ring_cfg = ring->state;
36  	bool is_bufpool;
37  	u32 val;
38  
39  	is_bufpool = xgene_enet_is_bufpool(ring->id);
40  	val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
41  	ring_cfg[4] |= (val << RINGTYPE_POS) &
42  			CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
43  
44  	if (is_bufpool) {
45  		ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
46  				CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
47  	}
48  }
49  
xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring * ring)50  static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
51  {
52  	u32 *ring_cfg = ring->state;
53  
54  	ring_cfg[3] |= RECOMBBUF;
55  	ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
56  			CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
57  	ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
58  }
59  
xgene_enet_ring_wr32(struct xgene_enet_desc_ring * ring,u32 offset,u32 data)60  static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
61  				 u32 offset, u32 data)
62  {
63  	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
64  
65  	iowrite32(data, pdata->ring_csr_addr + offset);
66  }
67  
xgene_enet_ring_rd32(struct xgene_enet_desc_ring * ring,u32 offset,u32 * data)68  static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
69  				 u32 offset, u32 *data)
70  {
71  	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
72  
73  	*data = ioread32(pdata->ring_csr_addr + offset);
74  }
75  
xgene_enet_write_ring_state(struct xgene_enet_desc_ring * ring)76  static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
77  {
78  	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
79  	int i;
80  
81  	xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
82  	for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
83  		xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
84  				     ring->state[i]);
85  	}
86  }
87  
xgene_enet_clr_ring_state(struct xgene_enet_desc_ring * ring)88  static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
89  {
90  	memset(ring->state, 0, sizeof(ring->state));
91  	xgene_enet_write_ring_state(ring);
92  }
93  
xgene_enet_set_ring_state(struct xgene_enet_desc_ring * ring)94  static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
95  {
96  	xgene_enet_ring_set_type(ring);
97  
98  	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
99  	    xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
100  		xgene_enet_ring_set_recombbuf(ring);
101  
102  	xgene_enet_ring_init(ring);
103  	xgene_enet_write_ring_state(ring);
104  }
105  
xgene_enet_set_ring_id(struct xgene_enet_desc_ring * ring)106  static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
107  {
108  	u32 ring_id_val, ring_id_buf;
109  	bool is_bufpool;
110  
111  	is_bufpool = xgene_enet_is_bufpool(ring->id);
112  
113  	ring_id_val = ring->id & GENMASK(9, 0);
114  	ring_id_val |= OVERWRITE;
115  
116  	ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
117  	ring_id_buf |= PREFETCH_BUF_EN;
118  	if (is_bufpool)
119  		ring_id_buf |= IS_BUFFER_POOL;
120  
121  	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
122  	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
123  }
124  
xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring * ring)125  static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
126  {
127  	u32 ring_id;
128  
129  	ring_id = ring->id | OVERWRITE;
130  	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
131  	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
132  }
133  
xgene_enet_setup_ring(struct xgene_enet_desc_ring * ring)134  static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
135  				    struct xgene_enet_desc_ring *ring)
136  {
137  	u32 size = ring->size;
138  	u32 i, data;
139  	bool is_bufpool;
140  
141  	xgene_enet_clr_ring_state(ring);
142  	xgene_enet_set_ring_state(ring);
143  	xgene_enet_set_ring_id(ring);
144  
145  	ring->slots = xgene_enet_get_numslots(ring->id, size);
146  
147  	is_bufpool = xgene_enet_is_bufpool(ring->id);
148  	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
149  		return ring;
150  
151  	for (i = 0; i < ring->slots; i++)
152  		xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
153  
154  	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
155  	data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
156  	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
157  
158  	return ring;
159  }
160  
xgene_enet_clear_ring(struct xgene_enet_desc_ring * ring)161  static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
162  {
163  	u32 data;
164  	bool is_bufpool;
165  
166  	is_bufpool = xgene_enet_is_bufpool(ring->id);
167  	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
168  		goto out;
169  
170  	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
171  	data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
172  	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
173  
174  out:
175  	xgene_enet_clr_desc_ring_id(ring);
176  	xgene_enet_clr_ring_state(ring);
177  }
178  
xgene_enet_wr_cmd(struct xgene_enet_desc_ring * ring,int count)179  static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
180  {
181  	iowrite32(count, ring->cmd);
182  }
183  
xgene_enet_ring_len(struct xgene_enet_desc_ring * ring)184  static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
185  {
186  	u32 __iomem *cmd_base = ring->cmd_base;
187  	u32 ring_state, num_msgs;
188  
189  	ring_state = ioread32(&cmd_base[1]);
190  	num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
191  
192  	return num_msgs;
193  }
194  
xgene_enet_parse_error(struct xgene_enet_desc_ring * ring,enum xgene_enet_err_code status)195  void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
196  			    enum xgene_enet_err_code status)
197  {
198  	switch (status) {
199  	case INGRESS_CRC:
200  		ring->rx_crc_errors++;
201  		break;
202  	case INGRESS_CHECKSUM:
203  	case INGRESS_CHECKSUM_COMPUTE:
204  		ring->rx_errors++;
205  		break;
206  	case INGRESS_TRUNC_FRAME:
207  		ring->rx_frame_errors++;
208  		break;
209  	case INGRESS_PKT_LEN:
210  		ring->rx_length_errors++;
211  		break;
212  	case INGRESS_PKT_UNDER:
213  		ring->rx_frame_errors++;
214  		break;
215  	case INGRESS_FIFO_OVERRUN:
216  		ring->rx_fifo_errors++;
217  		break;
218  	default:
219  		break;
220  	}
221  }
222  
xgene_enet_wr_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 val)223  static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
224  			      u32 offset, u32 val)
225  {
226  	void __iomem *addr = pdata->eth_csr_addr + offset;
227  
228  	iowrite32(val, addr);
229  }
230  
xgene_enet_wr_ring_if(struct xgene_enet_pdata * pdata,u32 offset,u32 val)231  static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
232  				  u32 offset, u32 val)
233  {
234  	void __iomem *addr = pdata->eth_ring_if_addr + offset;
235  
236  	iowrite32(val, addr);
237  }
238  
xgene_enet_wr_diag_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 val)239  static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
240  				   u32 offset, u32 val)
241  {
242  	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
243  
244  	iowrite32(val, addr);
245  }
246  
xgene_enet_wr_mcx_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 val)247  static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
248  				  u32 offset, u32 val)
249  {
250  	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
251  
252  	iowrite32(val, addr);
253  }
254  
xgene_enet_wr_mac(struct xgene_enet_pdata * pdata,u32 wr_addr,u32 wr_data)255  void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr, u32 wr_data)
256  {
257  	void __iomem *addr, *wr, *cmd, *cmd_done;
258  	struct net_device *ndev = pdata->ndev;
259  	u8 wait = 10;
260  	u32 done;
261  
262  	if (pdata->mdio_driver && ndev->phydev &&
263  	    phy_interface_mode_is_rgmii(pdata->phy_mode)) {
264  		struct mii_bus *bus = ndev->phydev->mdio.bus;
265  
266  		return xgene_mdio_wr_mac(bus->priv, wr_addr, wr_data);
267  	}
268  
269  	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
270  	wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
271  	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
272  	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
273  
274  	spin_lock(&pdata->mac_lock);
275  	iowrite32(wr_addr, addr);
276  	iowrite32(wr_data, wr);
277  	iowrite32(XGENE_ENET_WR_CMD, cmd);
278  
279  	while (!(done = ioread32(cmd_done)) && wait--)
280  		udelay(1);
281  
282  	if (!done)
283  		netdev_err(ndev, "mac write failed, addr: %04x data: %08x\n",
284  			   wr_addr, wr_data);
285  
286  	iowrite32(0, cmd);
287  	spin_unlock(&pdata->mac_lock);
288  }
289  
xgene_enet_rd_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 * val)290  static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
291  			      u32 offset, u32 *val)
292  {
293  	void __iomem *addr = pdata->eth_csr_addr + offset;
294  
295  	*val = ioread32(addr);
296  }
297  
xgene_enet_rd_diag_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 * val)298  static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
299  				   u32 offset, u32 *val)
300  {
301  	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
302  
303  	*val = ioread32(addr);
304  }
305  
xgene_enet_rd_mcx_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 * val)306  static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
307  				  u32 offset, u32 *val)
308  {
309  	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
310  
311  	*val = ioread32(addr);
312  }
313  
xgene_enet_rd_mac(struct xgene_enet_pdata * pdata,u32 rd_addr)314  u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr)
315  {
316  	void __iomem *addr, *rd, *cmd, *cmd_done;
317  	struct net_device *ndev = pdata->ndev;
318  	u32 done, rd_data;
319  	u8 wait = 10;
320  
321  	if (pdata->mdio_driver && ndev->phydev &&
322  	    phy_interface_mode_is_rgmii(pdata->phy_mode)) {
323  		struct mii_bus *bus = ndev->phydev->mdio.bus;
324  
325  		return xgene_mdio_rd_mac(bus->priv, rd_addr);
326  	}
327  
328  	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
329  	rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
330  	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
331  	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
332  
333  	spin_lock(&pdata->mac_lock);
334  	iowrite32(rd_addr, addr);
335  	iowrite32(XGENE_ENET_RD_CMD, cmd);
336  
337  	while (!(done = ioread32(cmd_done)) && wait--)
338  		udelay(1);
339  
340  	if (!done)
341  		netdev_err(ndev, "mac read failed, addr: %04x\n", rd_addr);
342  
343  	rd_data = ioread32(rd);
344  	iowrite32(0, cmd);
345  	spin_unlock(&pdata->mac_lock);
346  
347  	return rd_data;
348  }
349  
xgene_enet_rd_stat(struct xgene_enet_pdata * pdata,u32 rd_addr)350  u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr)
351  {
352  	void __iomem *addr, *rd, *cmd, *cmd_done;
353  	u32 done, rd_data;
354  	u8 wait = 10;
355  
356  	addr = pdata->mcx_stats_addr + STAT_ADDR_REG_OFFSET;
357  	rd = pdata->mcx_stats_addr + STAT_READ_REG_OFFSET;
358  	cmd = pdata->mcx_stats_addr + STAT_COMMAND_REG_OFFSET;
359  	cmd_done = pdata->mcx_stats_addr + STAT_COMMAND_DONE_REG_OFFSET;
360  
361  	spin_lock(&pdata->stats_lock);
362  	iowrite32(rd_addr, addr);
363  	iowrite32(XGENE_ENET_RD_CMD, cmd);
364  
365  	while (!(done = ioread32(cmd_done)) && wait--)
366  		udelay(1);
367  
368  	if (!done)
369  		netdev_err(pdata->ndev, "mac stats read failed, addr: %04x\n",
370  			   rd_addr);
371  
372  	rd_data = ioread32(rd);
373  	iowrite32(0, cmd);
374  	spin_unlock(&pdata->stats_lock);
375  
376  	return rd_data;
377  }
378  
xgene_gmac_set_mac_addr(struct xgene_enet_pdata * pdata)379  static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
380  {
381  	const u8 *dev_addr = pdata->ndev->dev_addr;
382  	u32 addr0, addr1;
383  
384  	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
385  		(dev_addr[1] << 8) | dev_addr[0];
386  	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
387  
388  	xgene_enet_wr_mac(pdata, STATION_ADDR0_ADDR, addr0);
389  	xgene_enet_wr_mac(pdata, STATION_ADDR1_ADDR, addr1);
390  }
391  
xgene_enet_ecc_init(struct xgene_enet_pdata * pdata)392  static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
393  {
394  	struct net_device *ndev = pdata->ndev;
395  	u32 data;
396  	u8 wait = 10;
397  
398  	xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
399  	do {
400  		usleep_range(100, 110);
401  		xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
402  	} while ((data != 0xffffffff) && wait--);
403  
404  	if (data != 0xffffffff) {
405  		netdev_err(ndev, "Failed to release memory from shutdown\n");
406  		return -ENODEV;
407  	}
408  
409  	return 0;
410  }
411  
xgene_gmac_reset(struct xgene_enet_pdata * pdata)412  static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
413  {
414  	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
415  	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0);
416  }
417  
xgene_enet_configure_clock(struct xgene_enet_pdata * pdata)418  static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
419  {
420  	struct device *dev = &pdata->pdev->dev;
421  
422  	if (dev->of_node) {
423  		struct clk *parent = clk_get_parent(pdata->clk);
424  
425  		switch (pdata->phy_speed) {
426  		case SPEED_10:
427  			clk_set_rate(parent, 2500000);
428  			break;
429  		case SPEED_100:
430  			clk_set_rate(parent, 25000000);
431  			break;
432  		default:
433  			clk_set_rate(parent, 125000000);
434  			break;
435  		}
436  	}
437  #ifdef CONFIG_ACPI
438  	else {
439  		switch (pdata->phy_speed) {
440  		case SPEED_10:
441  			acpi_evaluate_object(ACPI_HANDLE(dev),
442  					     "S10", NULL, NULL);
443  			break;
444  		case SPEED_100:
445  			acpi_evaluate_object(ACPI_HANDLE(dev),
446  					     "S100", NULL, NULL);
447  			break;
448  		default:
449  			acpi_evaluate_object(ACPI_HANDLE(dev),
450  					     "S1G", NULL, NULL);
451  			break;
452  		}
453  	}
454  #endif
455  }
456  
xgene_gmac_set_speed(struct xgene_enet_pdata * pdata)457  static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
458  {
459  	u32 icm0, icm2, mc2;
460  	u32 intf_ctl, rgmii, value;
461  
462  	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
463  	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
464  	mc2 = xgene_enet_rd_mac(pdata, MAC_CONFIG_2_ADDR);
465  	intf_ctl = xgene_enet_rd_mac(pdata, INTERFACE_CONTROL_ADDR);
466  	xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
467  
468  	switch (pdata->phy_speed) {
469  	case SPEED_10:
470  		ENET_INTERFACE_MODE2_SET(&mc2, 1);
471  		intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
472  		CFG_MACMODE_SET(&icm0, 0);
473  		CFG_WAITASYNCRD_SET(&icm2, 500);
474  		rgmii &= ~CFG_SPEED_1250;
475  		break;
476  	case SPEED_100:
477  		ENET_INTERFACE_MODE2_SET(&mc2, 1);
478  		intf_ctl &= ~ENET_GHD_MODE;
479  		intf_ctl |= ENET_LHD_MODE;
480  		CFG_MACMODE_SET(&icm0, 1);
481  		CFG_WAITASYNCRD_SET(&icm2, 80);
482  		rgmii &= ~CFG_SPEED_1250;
483  		break;
484  	default:
485  		ENET_INTERFACE_MODE2_SET(&mc2, 2);
486  		intf_ctl &= ~ENET_LHD_MODE;
487  		intf_ctl |= ENET_GHD_MODE;
488  		CFG_MACMODE_SET(&icm0, 2);
489  		CFG_WAITASYNCRD_SET(&icm2, 0);
490  		CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
491  		CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
492  		rgmii |= CFG_SPEED_1250;
493  
494  		xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
495  		value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
496  		xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
497  		break;
498  	}
499  
500  	mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
501  	xgene_enet_wr_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
502  	xgene_enet_wr_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
503  	xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
504  	xgene_enet_configure_clock(pdata);
505  
506  	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
507  	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
508  }
509  
xgene_enet_set_frame_size(struct xgene_enet_pdata * pdata,int size)510  static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
511  {
512  	xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
513  }
514  
xgene_gmac_enable_tx_pause(struct xgene_enet_pdata * pdata,bool enable)515  static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
516  				       bool enable)
517  {
518  	u32 data;
519  
520  	xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data);
521  
522  	if (enable)
523  		data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
524  	else
525  		data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
526  
527  	xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data);
528  }
529  
xgene_gmac_flowctl_tx(struct xgene_enet_pdata * pdata,bool enable)530  static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
531  {
532  	u32 data;
533  
534  	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
535  
536  	if (enable)
537  		data |= TX_FLOW_EN;
538  	else
539  		data &= ~TX_FLOW_EN;
540  
541  	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
542  
543  	pdata->mac_ops->enable_tx_pause(pdata, enable);
544  }
545  
xgene_gmac_flowctl_rx(struct xgene_enet_pdata * pdata,bool enable)546  static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
547  {
548  	u32 data;
549  
550  	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
551  
552  	if (enable)
553  		data |= RX_FLOW_EN;
554  	else
555  		data &= ~RX_FLOW_EN;
556  
557  	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
558  }
559  
xgene_gmac_init(struct xgene_enet_pdata * pdata)560  static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
561  {
562  	u32 value;
563  
564  	if (!pdata->mdio_driver)
565  		xgene_gmac_reset(pdata);
566  
567  	xgene_gmac_set_speed(pdata);
568  	xgene_gmac_set_mac_addr(pdata);
569  
570  	/* Adjust MDC clock frequency */
571  	value = xgene_enet_rd_mac(pdata, MII_MGMT_CONFIG_ADDR);
572  	MGMT_CLOCK_SEL_SET(&value, 7);
573  	xgene_enet_wr_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
574  
575  	/* Enable drop if bufpool not available */
576  	xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
577  	value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
578  	xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
579  
580  	/* Rtype should be copied from FP */
581  	xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
582  
583  	/* Configure HW pause frame generation */
584  	xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value);
585  	value = (DEF_QUANTA << 16) | (value & 0xFFFF);
586  	xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value);
587  
588  	xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES);
589  	xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES);
590  
591  	xgene_gmac_flowctl_tx(pdata, pdata->tx_pause);
592  	xgene_gmac_flowctl_rx(pdata, pdata->rx_pause);
593  
594  	/* Rx-Tx traffic resume */
595  	xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
596  
597  	xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
598  	value &= ~TX_DV_GATE_EN0;
599  	value &= ~RX_DV_GATE_EN0;
600  	value |= RESUME_RX0;
601  	xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
602  
603  	xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
604  }
605  
xgene_gmac_get_drop_cnt(struct xgene_enet_pdata * pdata,u32 * rx,u32 * tx)606  static void xgene_gmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
607  				    u32 *rx, u32 *tx)
608  {
609  	u32 count;
610  
611  	xgene_enet_rd_mcx_csr(pdata, ICM_ECM_DROP_COUNT_REG0_ADDR, &count);
612  	*rx = ICM_DROP_COUNT(count);
613  	*tx = ECM_DROP_COUNT(count);
614  	/* Errata: 10GE_4 - Fix ICM_ECM_DROP_COUNT not clear-on-read */
615  	xgene_enet_rd_mcx_csr(pdata, ECM_CONFIG0_REG_0_ADDR, &count);
616  }
617  
xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata * pdata)618  static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
619  {
620  	u32 val = 0xffffffff;
621  
622  	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
623  	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
624  	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
625  	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
626  }
627  
xgene_enet_cle_bypass(struct xgene_enet_pdata * pdata,u32 dst_ring_num,u16 bufpool_id,u16 nxtbufpool_id)628  static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
629  				  u32 dst_ring_num, u16 bufpool_id,
630  				  u16 nxtbufpool_id)
631  {
632  	u32 cb;
633  	u32 fpsel, nxtfpsel;
634  
635  	fpsel = xgene_enet_get_fpsel(bufpool_id);
636  	nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
637  
638  	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
639  	cb |= CFG_CLE_BYPASS_EN0;
640  	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
641  	CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
642  	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
643  
644  	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
645  	CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
646  	CFG_CLE_FPSEL0_SET(&cb, fpsel);
647  	CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
648  	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
649  }
650  
xgene_gmac_rx_enable(struct xgene_enet_pdata * pdata)651  static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
652  {
653  	u32 data;
654  
655  	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
656  	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
657  }
658  
xgene_gmac_tx_enable(struct xgene_enet_pdata * pdata)659  static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
660  {
661  	u32 data;
662  
663  	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
664  	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
665  }
666  
xgene_gmac_rx_disable(struct xgene_enet_pdata * pdata)667  static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
668  {
669  	u32 data;
670  
671  	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
672  	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
673  }
674  
xgene_gmac_tx_disable(struct xgene_enet_pdata * pdata)675  static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
676  {
677  	u32 data;
678  
679  	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
680  	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
681  }
682  
xgene_ring_mgr_init(struct xgene_enet_pdata * p)683  bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
684  {
685  	if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
686  		return false;
687  
688  	if (ioread32(p->ring_csr_addr + SRST_ADDR))
689  		return false;
690  
691  	return true;
692  }
693  
xgene_enet_reset(struct xgene_enet_pdata * pdata)694  static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
695  {
696  	struct device *dev = &pdata->pdev->dev;
697  
698  	if (!xgene_ring_mgr_init(pdata))
699  		return -ENODEV;
700  
701  	if (pdata->mdio_driver) {
702  		xgene_enet_config_ring_if_assoc(pdata);
703  		return 0;
704  	}
705  
706  	if (dev->of_node) {
707  		clk_prepare_enable(pdata->clk);
708  		udelay(5);
709  		clk_disable_unprepare(pdata->clk);
710  		udelay(5);
711  		clk_prepare_enable(pdata->clk);
712  		udelay(5);
713  	} else {
714  #ifdef CONFIG_ACPI
715  		acpi_status status;
716  
717  		status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
718  					      "_RST", NULL, NULL);
719  		if (ACPI_FAILURE(status)) {
720  			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
721  					     "_INI", NULL, NULL);
722  		}
723  #endif
724  	}
725  
726  	xgene_enet_ecc_init(pdata);
727  	xgene_enet_config_ring_if_assoc(pdata);
728  
729  	return 0;
730  }
731  
xgene_enet_clear(struct xgene_enet_pdata * pdata,struct xgene_enet_desc_ring * ring)732  static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
733  			     struct xgene_enet_desc_ring *ring)
734  {
735  	u32 addr, data;
736  
737  	if (xgene_enet_is_bufpool(ring->id)) {
738  		addr = ENET_CFGSSQMIFPRESET_ADDR;
739  		data = BIT(xgene_enet_get_fpsel(ring->id));
740  	} else {
741  		addr = ENET_CFGSSQMIWQRESET_ADDR;
742  		data = BIT(xgene_enet_ring_bufnum(ring->id));
743  	}
744  
745  	xgene_enet_wr_ring_if(pdata, addr, data);
746  }
747  
xgene_gport_shutdown(struct xgene_enet_pdata * pdata)748  static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
749  {
750  	struct device *dev = &pdata->pdev->dev;
751  
752  	if (dev->of_node) {
753  		if (!IS_ERR(pdata->clk))
754  			clk_disable_unprepare(pdata->clk);
755  	}
756  }
757  
xgene_enet_flowctrl_cfg(struct net_device * ndev)758  static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev)
759  {
760  	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
761  	struct phy_device *phydev = ndev->phydev;
762  	u16 lcladv, rmtadv = 0;
763  	u32 rx_pause, tx_pause;
764  	u8 flowctl = 0;
765  
766  	if (!phydev->duplex || !pdata->pause_autoneg)
767  		return 0;
768  
769  	if (pdata->tx_pause)
770  		flowctl |= FLOW_CTRL_TX;
771  
772  	if (pdata->rx_pause)
773  		flowctl |= FLOW_CTRL_RX;
774  
775  	lcladv = mii_advertise_flowctrl(flowctl);
776  
777  	if (phydev->pause)
778  		rmtadv = LPA_PAUSE_CAP;
779  
780  	if (phydev->asym_pause)
781  		rmtadv |= LPA_PAUSE_ASYM;
782  
783  	flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
784  	tx_pause = !!(flowctl & FLOW_CTRL_TX);
785  	rx_pause = !!(flowctl & FLOW_CTRL_RX);
786  
787  	if (tx_pause != pdata->tx_pause) {
788  		pdata->tx_pause = tx_pause;
789  		pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
790  	}
791  
792  	if (rx_pause != pdata->rx_pause) {
793  		pdata->rx_pause = rx_pause;
794  		pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
795  	}
796  
797  	return 0;
798  }
799  
xgene_enet_adjust_link(struct net_device * ndev)800  static void xgene_enet_adjust_link(struct net_device *ndev)
801  {
802  	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
803  	const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
804  	struct phy_device *phydev = ndev->phydev;
805  
806  	if (phydev->link) {
807  		if (pdata->phy_speed != phydev->speed) {
808  			pdata->phy_speed = phydev->speed;
809  			mac_ops->set_speed(pdata);
810  			mac_ops->rx_enable(pdata);
811  			mac_ops->tx_enable(pdata);
812  			phy_print_status(phydev);
813  		}
814  
815  		xgene_enet_flowctrl_cfg(ndev);
816  	} else {
817  		mac_ops->rx_disable(pdata);
818  		mac_ops->tx_disable(pdata);
819  		pdata->phy_speed = SPEED_UNKNOWN;
820  		phy_print_status(phydev);
821  	}
822  }
823  
824  #ifdef CONFIG_ACPI
acpi_phy_find_device(struct device * dev)825  static struct acpi_device *acpi_phy_find_device(struct device *dev)
826  {
827  	struct fwnode_reference_args args;
828  	struct fwnode_handle *fw_node;
829  	int status;
830  
831  	fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
832  	status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
833  						  &args);
834  	if (ACPI_FAILURE(status) || !is_acpi_device_node(args.fwnode)) {
835  		dev_dbg(dev, "No matching phy in ACPI table\n");
836  		return NULL;
837  	}
838  
839  	return to_acpi_device_node(args.fwnode);
840  }
841  #endif
842  
xgene_enet_phy_connect(struct net_device * ndev)843  int xgene_enet_phy_connect(struct net_device *ndev)
844  {
845  	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
846  	struct device_node *np;
847  	struct phy_device *phy_dev;
848  	struct device *dev = &pdata->pdev->dev;
849  	int i;
850  
851  	if (dev->of_node) {
852  		for (i = 0 ; i < 2; i++) {
853  			np = of_parse_phandle(dev->of_node, "phy-handle", i);
854  			phy_dev = of_phy_connect(ndev, np,
855  						 &xgene_enet_adjust_link,
856  						 0, pdata->phy_mode);
857  			of_node_put(np);
858  			if (phy_dev)
859  				break;
860  		}
861  
862  		if (!phy_dev) {
863  			netdev_err(ndev, "Could not connect to PHY\n");
864  			return -ENODEV;
865  		}
866  	} else {
867  #ifdef CONFIG_ACPI
868  		struct acpi_device *adev = acpi_phy_find_device(dev);
869  		if (adev)
870  			phy_dev = adev->driver_data;
871  		else
872  			phy_dev = NULL;
873  
874  		if (!phy_dev ||
875  		    phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
876  				       pdata->phy_mode)) {
877  			netdev_err(ndev, "Could not connect to PHY\n");
878  			return  -ENODEV;
879  		}
880  #else
881  		return -ENODEV;
882  #endif
883  	}
884  
885  	pdata->phy_speed = SPEED_UNKNOWN;
886  	phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
887  	phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
888  	phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
889  	phy_support_asym_pause(phy_dev);
890  
891  	return 0;
892  }
893  
xgene_mdiobus_register(struct xgene_enet_pdata * pdata,struct mii_bus * mdio)894  static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
895  				  struct mii_bus *mdio)
896  {
897  	struct device *dev = &pdata->pdev->dev;
898  	struct net_device *ndev = pdata->ndev;
899  	struct phy_device *phy;
900  	struct device_node *child_np;
901  	struct device_node *mdio_np = NULL;
902  	u32 phy_addr;
903  	int ret;
904  
905  	if (dev->of_node) {
906  		for_each_child_of_node(dev->of_node, child_np) {
907  			if (of_device_is_compatible(child_np,
908  						    "apm,xgene-mdio")) {
909  				mdio_np = child_np;
910  				break;
911  			}
912  		}
913  
914  		if (!mdio_np) {
915  			netdev_dbg(ndev, "No mdio node in the dts\n");
916  			return -ENXIO;
917  		}
918  
919  		return of_mdiobus_register(mdio, mdio_np);
920  	}
921  
922  	/* Mask out all PHYs from auto probing. */
923  	mdio->phy_mask = ~0;
924  
925  	/* Register the MDIO bus */
926  	ret = mdiobus_register(mdio);
927  	if (ret)
928  		return ret;
929  
930  	ret = device_property_read_u32(dev, "phy-channel", &phy_addr);
931  	if (ret)
932  		ret = device_property_read_u32(dev, "phy-addr", &phy_addr);
933  	if (ret)
934  		return -EINVAL;
935  
936  	phy = xgene_enet_phy_register(mdio, phy_addr);
937  	if (!phy)
938  		return -EIO;
939  
940  	return ret;
941  }
942  
xgene_enet_mdio_config(struct xgene_enet_pdata * pdata)943  int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
944  {
945  	struct net_device *ndev = pdata->ndev;
946  	struct mii_bus *mdio_bus;
947  	int ret;
948  
949  	mdio_bus = mdiobus_alloc();
950  	if (!mdio_bus)
951  		return -ENOMEM;
952  
953  	mdio_bus->name = "APM X-Gene MDIO bus";
954  	mdio_bus->read = xgene_mdio_rgmii_read;
955  	mdio_bus->write = xgene_mdio_rgmii_write;
956  	snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
957  		 ndev->name);
958  
959  	mdio_bus->priv = (void __force *)pdata->mcx_mac_addr;
960  	mdio_bus->parent = &pdata->pdev->dev;
961  
962  	ret = xgene_mdiobus_register(pdata, mdio_bus);
963  	if (ret) {
964  		netdev_err(ndev, "Failed to register MDIO bus\n");
965  		mdiobus_free(mdio_bus);
966  		return ret;
967  	}
968  	pdata->mdio_bus = mdio_bus;
969  
970  	ret = xgene_enet_phy_connect(ndev);
971  	if (ret)
972  		xgene_enet_mdio_remove(pdata);
973  
974  	return ret;
975  }
976  
xgene_enet_phy_disconnect(struct xgene_enet_pdata * pdata)977  void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
978  {
979  	struct net_device *ndev = pdata->ndev;
980  
981  	if (ndev->phydev)
982  		phy_disconnect(ndev->phydev);
983  }
984  
xgene_enet_mdio_remove(struct xgene_enet_pdata * pdata)985  void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
986  {
987  	struct net_device *ndev = pdata->ndev;
988  
989  	if (ndev->phydev)
990  		phy_disconnect(ndev->phydev);
991  
992  	mdiobus_unregister(pdata->mdio_bus);
993  	mdiobus_free(pdata->mdio_bus);
994  	pdata->mdio_bus = NULL;
995  }
996  
997  const struct xgene_mac_ops xgene_gmac_ops = {
998  	.init = xgene_gmac_init,
999  	.reset = xgene_gmac_reset,
1000  	.rx_enable = xgene_gmac_rx_enable,
1001  	.tx_enable = xgene_gmac_tx_enable,
1002  	.rx_disable = xgene_gmac_rx_disable,
1003  	.tx_disable = xgene_gmac_tx_disable,
1004  	.get_drop_cnt = xgene_gmac_get_drop_cnt,
1005  	.set_speed = xgene_gmac_set_speed,
1006  	.set_mac_addr = xgene_gmac_set_mac_addr,
1007  	.set_framesize = xgene_enet_set_frame_size,
1008  	.enable_tx_pause = xgene_gmac_enable_tx_pause,
1009  	.flowctl_tx     = xgene_gmac_flowctl_tx,
1010  	.flowctl_rx     = xgene_gmac_flowctl_rx,
1011  };
1012  
1013  const struct xgene_port_ops xgene_gport_ops = {
1014  	.reset = xgene_enet_reset,
1015  	.clear = xgene_enet_clear,
1016  	.cle_bypass = xgene_enet_cle_bypass,
1017  	.shutdown = xgene_gport_shutdown,
1018  };
1019  
1020  struct xgene_ring_ops xgene_ring1_ops = {
1021  	.num_ring_config = NUM_RING_CONFIG,
1022  	.num_ring_id_shift = 6,
1023  	.setup = xgene_enet_setup_ring,
1024  	.clear = xgene_enet_clear_ring,
1025  	.wr_cmd = xgene_enet_wr_cmd,
1026  	.len = xgene_enet_ring_len,
1027  };
1028