1  // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  /*
3   * Driver for Microsemi VSC85xx PHYs - timestamping and PHC support
4   *
5   * Authors: Quentin Schulz & Antoine Tenart
6   * License: Dual MIT/GPL
7   * Copyright (c) 2020 Microsemi Corporation
8   */
9  
10  #include <linux/gpio/consumer.h>
11  #include <linux/ip.h>
12  #include <linux/net_tstamp.h>
13  #include <linux/mii.h>
14  #include <linux/phy.h>
15  #include <linux/ptp_classify.h>
16  #include <linux/ptp_clock_kernel.h>
17  #include <linux/udp.h>
18  #include <linux/unaligned.h>
19  
20  #include "mscc.h"
21  #include "mscc_ptp.h"
22  
23  /* Two PHYs share the same 1588 processor and it's to be entirely configured
24   * through the base PHY of this processor.
25   */
26  /* phydev->bus->mdio_lock should be locked when using this function */
phy_ts_base_write(struct phy_device * phydev,u32 regnum,u16 val)27  static int phy_ts_base_write(struct phy_device *phydev, u32 regnum, u16 val)
28  {
29  	struct vsc8531_private *priv = phydev->priv;
30  
31  	WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
32  	return __mdiobus_write(phydev->mdio.bus, priv->ts_base_addr, regnum,
33  			       val);
34  }
35  
36  /* phydev->bus->mdio_lock should be locked when using this function */
phy_ts_base_read(struct phy_device * phydev,u32 regnum)37  static int phy_ts_base_read(struct phy_device *phydev, u32 regnum)
38  {
39  	struct vsc8531_private *priv = phydev->priv;
40  
41  	WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
42  	return __mdiobus_read(phydev->mdio.bus, priv->ts_base_addr, regnum);
43  }
44  
45  enum ts_blk_hw {
46  	INGRESS_ENGINE_0,
47  	EGRESS_ENGINE_0,
48  	INGRESS_ENGINE_1,
49  	EGRESS_ENGINE_1,
50  	INGRESS_ENGINE_2,
51  	EGRESS_ENGINE_2,
52  	PROCESSOR_0,
53  	PROCESSOR_1,
54  };
55  
56  enum ts_blk {
57  	INGRESS,
58  	EGRESS,
59  	PROCESSOR,
60  };
61  
vsc85xx_ts_read_csr(struct phy_device * phydev,enum ts_blk blk,u16 addr)62  static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk,
63  			       u16 addr)
64  {
65  	struct vsc8531_private *priv = phydev->priv;
66  	bool base_port = phydev->mdio.addr == priv->ts_base_addr;
67  	u32 val, cnt = 0;
68  	enum ts_blk_hw blk_hw;
69  
70  	switch (blk) {
71  	case INGRESS:
72  		blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
73  		break;
74  	case EGRESS:
75  		blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
76  		break;
77  	case PROCESSOR:
78  	default:
79  		blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
80  		break;
81  	}
82  
83  	phy_lock_mdio_bus(phydev);
84  
85  	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
86  
87  	phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
88  			  BIU_ADDR_READ | BIU_BLK_ID(blk_hw) |
89  			  BIU_CSR_ADDR(addr));
90  
91  	do {
92  		val = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
93  	} while (!(val & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
94  
95  	val = phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_MSB);
96  	val <<= 16;
97  	val |= phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_LSB);
98  
99  	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
100  
101  	phy_unlock_mdio_bus(phydev);
102  
103  	return val;
104  }
105  
vsc85xx_ts_write_csr(struct phy_device * phydev,enum ts_blk blk,u16 addr,u32 val)106  static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk,
107  				 u16 addr, u32 val)
108  {
109  	struct vsc8531_private *priv = phydev->priv;
110  	bool base_port = phydev->mdio.addr == priv->ts_base_addr;
111  	u32 reg, bypass, cnt = 0, lower = val & 0xffff, upper = val >> 16;
112  	bool cond = (addr == MSCC_PHY_PTP_LTC_CTRL ||
113  		     addr == MSCC_PHY_1588_INGR_VSC85XX_INT_MASK ||
114  		     addr == MSCC_PHY_1588_VSC85XX_INT_MASK ||
115  		     addr == MSCC_PHY_1588_INGR_VSC85XX_INT_STATUS ||
116  		     addr == MSCC_PHY_1588_VSC85XX_INT_STATUS) &&
117  		    blk == PROCESSOR;
118  	enum ts_blk_hw blk_hw;
119  
120  	switch (blk) {
121  	case INGRESS:
122  		blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
123  		break;
124  	case EGRESS:
125  		blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
126  		break;
127  	case PROCESSOR:
128  	default:
129  		blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
130  		break;
131  	}
132  
133  	phy_lock_mdio_bus(phydev);
134  
135  	bypass = phy_ts_base_read(phydev, MSCC_PHY_BYPASS_CONTROL);
136  
137  	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
138  
139  	if (!cond || upper)
140  		phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper);
141  
142  	phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower);
143  
144  	phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
145  			  BIU_ADDR_WRITE | BIU_BLK_ID(blk_hw) |
146  			  BIU_CSR_ADDR(addr));
147  
148  	do {
149  		reg = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
150  	} while (!(reg & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
151  
152  	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
153  
154  	if (cond && upper)
155  		phy_ts_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, bypass);
156  
157  	phy_unlock_mdio_bus(phydev);
158  }
159  
160  /* Pick bytes from PTP header */
161  #define PTP_HEADER_TRNSP_MSG		26
162  #define PTP_HEADER_DOMAIN_NUM		25
163  #define PTP_HEADER_BYTE_8_31(x)		(31 - (x))
164  #define MAC_ADDRESS_BYTE(x)		((x) + (35 - ETH_ALEN + 1))
165  
vsc85xx_ts_fsb_init(struct phy_device * phydev)166  static int vsc85xx_ts_fsb_init(struct phy_device *phydev)
167  {
168  	u8 sig_sel[16] = {};
169  	signed char i, pos = 0;
170  
171  	/* Seq ID is 2B long and starts at 30th byte */
172  	for (i = 1; i >= 0; i--)
173  		sig_sel[pos++] = PTP_HEADER_BYTE_8_31(30 + i);
174  
175  	/* DomainNum */
176  	sig_sel[pos++] = PTP_HEADER_DOMAIN_NUM;
177  
178  	/* MsgType */
179  	sig_sel[pos++] = PTP_HEADER_TRNSP_MSG;
180  
181  	/* MAC address is 6B long */
182  	for (i = ETH_ALEN - 1; i >= 0; i--)
183  		sig_sel[pos++] = MAC_ADDRESS_BYTE(i);
184  
185  	/* Fill the last bytes of the signature to reach a 16B signature */
186  	for (; pos < ARRAY_SIZE(sig_sel); pos++)
187  		sig_sel[pos] = PTP_HEADER_TRNSP_MSG;
188  
189  	for (i = 0; i <= 2; i++) {
190  		u32 val = 0;
191  
192  		for (pos = i * 5 + 4; pos >= i * 5; pos--)
193  			val = (val << 6) | sig_sel[pos];
194  
195  		vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(i),
196  				     val);
197  	}
198  
199  	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(3),
200  			     sig_sel[15]);
201  
202  	return 0;
203  }
204  
205  static const u32 vsc85xx_egr_latency[] = {
206  	/* Copper Egress */
207  	1272, /* 1000Mbps */
208  	12516, /* 100Mbps */
209  	125444, /* 10Mbps */
210  	/* Fiber Egress */
211  	1277, /* 1000Mbps */
212  	12537, /* 100Mbps */
213  };
214  
215  static const u32 vsc85xx_egr_latency_macsec[] = {
216  	/* Copper Egress ON */
217  	3496, /* 1000Mbps */
218  	34760, /* 100Mbps */
219  	347844, /* 10Mbps */
220  	/* Fiber Egress ON */
221  	3502, /* 1000Mbps */
222  	34780, /* 100Mbps */
223  };
224  
225  static const u32 vsc85xx_ingr_latency[] = {
226  	/* Copper Ingress */
227  	208, /* 1000Mbps */
228  	304, /* 100Mbps */
229  	2023, /* 10Mbps */
230  	/* Fiber Ingress */
231  	98, /* 1000Mbps */
232  	197, /* 100Mbps */
233  };
234  
235  static const u32 vsc85xx_ingr_latency_macsec[] = {
236  	/* Copper Ingress */
237  	2408, /* 1000Mbps */
238  	22300, /* 100Mbps */
239  	222009, /* 10Mbps */
240  	/* Fiber Ingress */
241  	2299, /* 1000Mbps */
242  	22192, /* 100Mbps */
243  };
244  
vsc85xx_ts_set_latencies(struct phy_device * phydev)245  static void vsc85xx_ts_set_latencies(struct phy_device *phydev)
246  {
247  	u32 val, ingr_latency, egr_latency;
248  	u8 idx;
249  
250  	/* No need to set latencies of packets if the PHY is not connected */
251  	if (!phydev->link)
252  		return;
253  
254  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_STALL_LATENCY,
255  			     STALL_EGR_LATENCY(phydev->speed));
256  
257  	switch (phydev->speed) {
258  	case SPEED_100:
259  		idx = 1;
260  		break;
261  	case SPEED_1000:
262  		idx = 0;
263  		break;
264  	default:
265  		idx = 2;
266  		break;
267  	}
268  
269  	ingr_latency = IS_ENABLED(CONFIG_MACSEC) ?
270  		vsc85xx_ingr_latency_macsec[idx] : vsc85xx_ingr_latency[idx];
271  	egr_latency = IS_ENABLED(CONFIG_MACSEC) ?
272  		vsc85xx_egr_latency_macsec[idx] : vsc85xx_egr_latency[idx];
273  
274  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_LOCAL_LATENCY,
275  			     PTP_INGR_LOCAL_LATENCY(ingr_latency));
276  
277  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
278  				  MSCC_PHY_PTP_INGR_TSP_CTRL);
279  	val |= PHY_PTP_INGR_TSP_CTRL_LOAD_DELAYS;
280  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
281  			     val);
282  
283  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_LOCAL_LATENCY,
284  			     PTP_EGR_LOCAL_LATENCY(egr_latency));
285  
286  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
287  	val |= PHY_PTP_EGR_TSP_CTRL_LOAD_DELAYS;
288  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
289  }
290  
vsc85xx_ts_disable_flows(struct phy_device * phydev,enum ts_blk blk)291  static int vsc85xx_ts_disable_flows(struct phy_device *phydev, enum ts_blk blk)
292  {
293  	u8 i;
294  
295  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP, 0);
296  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
297  			     IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2));
298  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_NXT_COMP, 0);
299  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_UDP_CHKSUM,
300  			     IP2_NXT_PROT_UDP_CHKSUM_WIDTH(2));
301  	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_MPLS_COMP_NXT_COMP, 0);
302  	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, 0);
303  	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH2_NTX_PROT, 0);
304  
305  	for (i = 0; i < COMP_MAX_FLOWS; i++) {
306  		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(i),
307  				     IP1_FLOW_VALID_CH0 | IP1_FLOW_VALID_CH1);
308  		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_FLOW_ENA(i),
309  				     IP2_FLOW_VALID_CH0 | IP2_FLOW_VALID_CH1);
310  		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(i),
311  				     ETH1_FLOW_VALID_CH0 | ETH1_FLOW_VALID_CH1);
312  		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH2_FLOW_ENA(i),
313  				     ETH2_FLOW_VALID_CH0 | ETH2_FLOW_VALID_CH1);
314  		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_MPLS_FLOW_CTRL(i),
315  				     MPLS_FLOW_VALID_CH0 | MPLS_FLOW_VALID_CH1);
316  
317  		if (i >= PTP_COMP_MAX_FLOWS)
318  			continue;
319  
320  		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i), 0);
321  		vsc85xx_ts_write_csr(phydev, blk,
322  				     MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), 0);
323  		vsc85xx_ts_write_csr(phydev, blk,
324  				     MSCC_ANA_PTP_FLOW_MASK_UPPER(i), 0);
325  		vsc85xx_ts_write_csr(phydev, blk,
326  				     MSCC_ANA_PTP_FLOW_MASK_LOWER(i), 0);
327  		vsc85xx_ts_write_csr(phydev, blk,
328  				     MSCC_ANA_PTP_FLOW_MATCH_UPPER(i), 0);
329  		vsc85xx_ts_write_csr(phydev, blk,
330  				     MSCC_ANA_PTP_FLOW_MATCH_LOWER(i), 0);
331  		vsc85xx_ts_write_csr(phydev, blk,
332  				     MSCC_ANA_PTP_FLOW_PTP_ACTION(i), 0);
333  		vsc85xx_ts_write_csr(phydev, blk,
334  				     MSCC_ANA_PTP_FLOW_PTP_ACTION2(i), 0);
335  		vsc85xx_ts_write_csr(phydev, blk,
336  				     MSCC_ANA_PTP_FLOW_PTP_0_FIELD(i), 0);
337  		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_OAM_PTP_FLOW_ENA(i),
338  				     0);
339  	}
340  
341  	return 0;
342  }
343  
vsc85xx_ts_eth_cmp1_sig(struct phy_device * phydev)344  static int vsc85xx_ts_eth_cmp1_sig(struct phy_device *phydev)
345  {
346  	u32 val;
347  
348  	val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT);
349  	val &= ~ANA_ETH1_NTX_PROT_SIG_OFF_MASK;
350  	val |= ANA_ETH1_NTX_PROT_SIG_OFF(0);
351  	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
352  
353  	val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG);
354  	val &= ~ANA_FSB_ADDR_FROM_BLOCK_SEL_MASK;
355  	val |= ANA_FSB_ADDR_FROM_ETH1;
356  	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG, val);
357  
358  	return 0;
359  }
360  
get_ptp_header_l4(struct sk_buff * skb,struct iphdr * iphdr,struct udphdr * udphdr)361  static struct vsc85xx_ptphdr *get_ptp_header_l4(struct sk_buff *skb,
362  						struct iphdr *iphdr,
363  						struct udphdr *udphdr)
364  {
365  	if (iphdr->version != 4 || iphdr->protocol != IPPROTO_UDP)
366  		return NULL;
367  
368  	return (struct vsc85xx_ptphdr *)(((unsigned char *)udphdr) + UDP_HLEN);
369  }
370  
get_ptp_header_tx(struct sk_buff * skb)371  static struct vsc85xx_ptphdr *get_ptp_header_tx(struct sk_buff *skb)
372  {
373  	struct ethhdr *ethhdr = eth_hdr(skb);
374  	struct udphdr *udphdr;
375  	struct iphdr *iphdr;
376  
377  	if (ethhdr->h_proto == htons(ETH_P_1588))
378  		return (struct vsc85xx_ptphdr *)(((unsigned char *)ethhdr) +
379  						 skb_mac_header_len(skb));
380  
381  	if (ethhdr->h_proto != htons(ETH_P_IP))
382  		return NULL;
383  
384  	iphdr = ip_hdr(skb);
385  	udphdr = udp_hdr(skb);
386  
387  	return get_ptp_header_l4(skb, iphdr, udphdr);
388  }
389  
get_ptp_header_rx(struct sk_buff * skb,enum hwtstamp_rx_filters rx_filter)390  static struct vsc85xx_ptphdr *get_ptp_header_rx(struct sk_buff *skb,
391  						enum hwtstamp_rx_filters rx_filter)
392  {
393  	struct udphdr *udphdr;
394  	struct iphdr *iphdr;
395  
396  	if (rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT)
397  		return (struct vsc85xx_ptphdr *)skb->data;
398  
399  	iphdr = (struct iphdr *)skb->data;
400  	udphdr = (struct udphdr *)(skb->data + iphdr->ihl * 4);
401  
402  	return get_ptp_header_l4(skb, iphdr, udphdr);
403  }
404  
get_sig(struct sk_buff * skb,u8 * sig)405  static int get_sig(struct sk_buff *skb, u8 *sig)
406  {
407  	struct vsc85xx_ptphdr *ptphdr = get_ptp_header_tx(skb);
408  	struct ethhdr *ethhdr = eth_hdr(skb);
409  	unsigned int i;
410  
411  	if (!ptphdr)
412  		return -EOPNOTSUPP;
413  
414  	sig[0] = (__force u16)ptphdr->seq_id >> 8;
415  	sig[1] = (__force u16)ptphdr->seq_id & GENMASK(7, 0);
416  	sig[2] = ptphdr->domain;
417  	sig[3] = ptphdr->tsmt & GENMASK(3, 0);
418  
419  	memcpy(&sig[4], ethhdr->h_dest, ETH_ALEN);
420  
421  	/* Fill the last bytes of the signature to reach a 16B signature */
422  	for (i = 10; i < 16; i++)
423  		sig[i] = ptphdr->tsmt & GENMASK(3, 0);
424  
425  	return 0;
426  }
427  
vsc85xx_dequeue_skb(struct vsc85xx_ptp * ptp)428  static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
429  {
430  	struct skb_shared_hwtstamps shhwtstamps;
431  	struct vsc85xx_ts_fifo fifo;
432  	struct sk_buff *skb;
433  	u8 skb_sig[16], *p;
434  	int i, len;
435  	u32 reg;
436  
437  	memset(&fifo, 0, sizeof(fifo));
438  	p = (u8 *)&fifo;
439  
440  	reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
441  				  MSCC_PHY_PTP_EGR_TS_FIFO(0));
442  	if (reg & PTP_EGR_TS_FIFO_EMPTY)
443  		return;
444  
445  	*p++ = reg & 0xff;
446  	*p++ = (reg >> 8) & 0xff;
447  
448  	/* Read the current FIFO item. Reading FIFO6 pops the next one. */
449  	for (i = 1; i < 7; i++) {
450  		reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
451  					  MSCC_PHY_PTP_EGR_TS_FIFO(i));
452  		*p++ = reg & 0xff;
453  		*p++ = (reg >> 8) & 0xff;
454  		*p++ = (reg >> 16) & 0xff;
455  		*p++ = (reg >> 24) & 0xff;
456  	}
457  
458  	len = skb_queue_len(&ptp->tx_queue);
459  	if (len < 1)
460  		return;
461  
462  	while (len--) {
463  		skb = __skb_dequeue(&ptp->tx_queue);
464  		if (!skb)
465  			return;
466  
467  		/* Can't get the signature of the packet, won't ever
468  		 * be able to have one so let's dequeue the packet.
469  		 */
470  		if (get_sig(skb, skb_sig) < 0) {
471  			kfree_skb(skb);
472  			continue;
473  		}
474  
475  		/* Check if we found the signature we were looking for. */
476  		if (!memcmp(skb_sig, fifo.sig, sizeof(fifo.sig))) {
477  			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
478  			shhwtstamps.hwtstamp = ktime_set(fifo.secs, fifo.ns);
479  			skb_complete_tx_timestamp(skb, &shhwtstamps);
480  
481  			return;
482  		}
483  
484  		/* Valid signature but does not match the one of the
485  		 * packet in the FIFO right now, reschedule it for later
486  		 * packets.
487  		 */
488  		__skb_queue_tail(&ptp->tx_queue, skb);
489  	}
490  }
491  
vsc85xx_get_tx_ts(struct vsc85xx_ptp * ptp)492  static void vsc85xx_get_tx_ts(struct vsc85xx_ptp *ptp)
493  {
494  	u32 reg;
495  
496  	do {
497  		vsc85xx_dequeue_skb(ptp);
498  
499  		/* If other timestamps are available in the FIFO, process them. */
500  		reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
501  					  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
502  	} while (PTP_EGR_FIFO_LEVEL_LAST_READ(reg) > 1);
503  }
504  
vsc85xx_ptp_cmp_init(struct phy_device * phydev,enum ts_blk blk)505  static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk)
506  {
507  	struct vsc8531_private *vsc8531 = phydev->priv;
508  	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
509  	static const u8 msgs[] = {
510  		PTP_MSGTYPE_SYNC,
511  		PTP_MSGTYPE_DELAY_REQ
512  	};
513  	u32 val;
514  	u8 i;
515  
516  	for (i = 0; i < ARRAY_SIZE(msgs); i++) {
517  		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
518  				     base ? PTP_FLOW_VALID_CH0 :
519  				     PTP_FLOW_VALID_CH1);
520  
521  		val = vsc85xx_ts_read_csr(phydev, blk,
522  					  MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i));
523  		val &= ~PTP_FLOW_DOMAIN_RANGE_ENA;
524  		vsc85xx_ts_write_csr(phydev, blk,
525  				     MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), val);
526  
527  		vsc85xx_ts_write_csr(phydev, blk,
528  				     MSCC_ANA_PTP_FLOW_MATCH_UPPER(i),
529  				     msgs[i] << 24);
530  
531  		vsc85xx_ts_write_csr(phydev, blk,
532  				     MSCC_ANA_PTP_FLOW_MASK_UPPER(i),
533  				     PTP_FLOW_MSG_TYPE_MASK);
534  	}
535  
536  	return 0;
537  }
538  
vsc85xx_eth_cmp1_init(struct phy_device * phydev,enum ts_blk blk)539  static int vsc85xx_eth_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
540  {
541  	struct vsc8531_private *vsc8531 = phydev->priv;
542  	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
543  	u32 val;
544  
545  	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NXT_PROT_TAG, 0);
546  	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT_VLAN_TPID,
547  			     ANA_ETH1_NTX_PROT_VLAN_TPID(ETH_P_8021AD));
548  
549  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0),
550  			     base ? ETH1_FLOW_VALID_CH0 : ETH1_FLOW_VALID_CH1);
551  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
552  			     ANA_ETH1_FLOW_MATCH_VLAN_TAG2);
553  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
554  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), 0);
555  	vsc85xx_ts_write_csr(phydev, blk,
556  			     MSCC_ANA_ETH1_FLOW_VLAN_RANGE_I_TAG(0), 0);
557  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_VLAN_TAG1(0), 0);
558  	vsc85xx_ts_write_csr(phydev, blk,
559  			     MSCC_ANA_ETH1_FLOW_VLAN_TAG2_I_TAG(0), 0);
560  
561  	val = vsc85xx_ts_read_csr(phydev, blk,
562  				  MSCC_ANA_ETH1_FLOW_MATCH_MODE(0));
563  	val &= ~ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK;
564  	val |= ANA_ETH1_FLOW_MATCH_VLAN_VERIFY;
565  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
566  			     val);
567  
568  	return 0;
569  }
570  
vsc85xx_ip_cmp1_init(struct phy_device * phydev,enum ts_blk blk)571  static int vsc85xx_ip_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
572  {
573  	struct vsc8531_private *vsc8531 = phydev->priv;
574  	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
575  	u32 val;
576  
577  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_UPPER,
578  			     PTP_EV_PORT);
579  	/* Match on dest port only, ignore src */
580  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_UPPER,
581  			     0xffff);
582  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_LOWER,
583  			     0);
584  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_LOWER, 0);
585  
586  	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
587  	val &= ~IP1_FLOW_ENA_CHANNEL_MASK_MASK;
588  	val |= base ? IP1_FLOW_VALID_CH0 : IP1_FLOW_VALID_CH1;
589  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
590  
591  	/* Match all IPs */
592  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER(0), 0);
593  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER(0), 0);
594  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER_MID(0),
595  			     0);
596  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER_MID(0),
597  			     0);
598  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER_MID(0),
599  			     0);
600  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER_MID(0),
601  			     0);
602  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER(0), 0);
603  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER(0), 0);
604  
605  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_IP_CHKSUM_SEL, 0);
606  
607  	return 0;
608  }
609  
vsc85xx_adjfine(struct ptp_clock_info * info,long scaled_ppm)610  static int vsc85xx_adjfine(struct ptp_clock_info *info, long scaled_ppm)
611  {
612  	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
613  	struct phy_device *phydev = ptp->phydev;
614  	struct vsc8531_private *priv = phydev->priv;
615  	u64 adj = 0;
616  	u32 val;
617  
618  	if (abs(scaled_ppm) < 66 || abs(scaled_ppm) > 65536UL * 1000000UL)
619  		return 0;
620  
621  	adj = div64_u64(1000000ULL * 65536ULL, abs(scaled_ppm));
622  	if (adj > 1000000000L)
623  		adj = 1000000000L;
624  
625  	val = PTP_AUTO_ADJ_NS_ROLLOVER(adj);
626  	val |= scaled_ppm > 0 ? PTP_AUTO_ADJ_ADD_1NS : PTP_AUTO_ADJ_SUB_1NS;
627  
628  	mutex_lock(&priv->phc_lock);
629  
630  	/* Update the ppb val in nano seconds to the auto adjust reg. */
631  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_AUTO_ADJ,
632  			     val);
633  
634  	/* The auto adjust update val is set to 0 after write operation. */
635  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
636  	val |= PTP_LTC_CTRL_AUTO_ADJ_UPDATE;
637  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
638  
639  	mutex_unlock(&priv->phc_lock);
640  
641  	return 0;
642  }
643  
__vsc85xx_gettime(struct ptp_clock_info * info,struct timespec64 * ts)644  static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
645  {
646  	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
647  	struct phy_device *phydev = ptp->phydev;
648  	struct vsc85xx_shared_private *shared =
649  		(struct vsc85xx_shared_private *)phydev->shared->priv;
650  	struct vsc8531_private *priv = phydev->priv;
651  	u32 val;
652  
653  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
654  	val |= PTP_LTC_CTRL_SAVE_ENA;
655  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
656  
657  	/* Local Time Counter (LTC) is put in SAVE* regs on rising edge of
658  	 * LOAD_SAVE pin.
659  	 */
660  	mutex_lock(&shared->gpio_lock);
661  	gpiod_set_value(priv->load_save, 1);
662  
663  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
664  				  MSCC_PHY_PTP_LTC_SAVED_SEC_MSB);
665  
666  	ts->tv_sec = ((time64_t)val) << 32;
667  
668  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
669  				  MSCC_PHY_PTP_LTC_SAVED_SEC_LSB);
670  	ts->tv_sec += val;
671  
672  	ts->tv_nsec = vsc85xx_ts_read_csr(phydev, PROCESSOR,
673  					  MSCC_PHY_PTP_LTC_SAVED_NS);
674  
675  	gpiod_set_value(priv->load_save, 0);
676  	mutex_unlock(&shared->gpio_lock);
677  
678  	return 0;
679  }
680  
vsc85xx_gettime(struct ptp_clock_info * info,struct timespec64 * ts)681  static int vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
682  {
683  	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
684  	struct phy_device *phydev = ptp->phydev;
685  	struct vsc8531_private *priv = phydev->priv;
686  
687  	mutex_lock(&priv->phc_lock);
688  	__vsc85xx_gettime(info, ts);
689  	mutex_unlock(&priv->phc_lock);
690  
691  	return 0;
692  }
693  
__vsc85xx_settime(struct ptp_clock_info * info,const struct timespec64 * ts)694  static int __vsc85xx_settime(struct ptp_clock_info *info,
695  			     const struct timespec64 *ts)
696  {
697  	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
698  	struct phy_device *phydev = ptp->phydev;
699  	struct vsc85xx_shared_private *shared =
700  		(struct vsc85xx_shared_private *)phydev->shared->priv;
701  	struct vsc8531_private *priv = phydev->priv;
702  	u32 val;
703  
704  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB,
705  			     PTP_LTC_LOAD_SEC_MSB(ts->tv_sec));
706  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB,
707  			     PTP_LTC_LOAD_SEC_LSB(ts->tv_sec));
708  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_NS,
709  			     PTP_LTC_LOAD_NS(ts->tv_nsec));
710  
711  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
712  	val |= PTP_LTC_CTRL_LOAD_ENA;
713  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
714  
715  	/* Local Time Counter (LTC) is set from LOAD* regs on rising edge of
716  	 * LOAD_SAVE pin.
717  	 */
718  	mutex_lock(&shared->gpio_lock);
719  	gpiod_set_value(priv->load_save, 1);
720  
721  	val &= ~PTP_LTC_CTRL_LOAD_ENA;
722  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
723  
724  	gpiod_set_value(priv->load_save, 0);
725  	mutex_unlock(&shared->gpio_lock);
726  
727  	return 0;
728  }
729  
vsc85xx_settime(struct ptp_clock_info * info,const struct timespec64 * ts)730  static int vsc85xx_settime(struct ptp_clock_info *info,
731  			   const struct timespec64 *ts)
732  {
733  	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
734  	struct phy_device *phydev = ptp->phydev;
735  	struct vsc8531_private *priv = phydev->priv;
736  
737  	mutex_lock(&priv->phc_lock);
738  	__vsc85xx_settime(info, ts);
739  	mutex_unlock(&priv->phc_lock);
740  
741  	return 0;
742  }
743  
vsc85xx_adjtime(struct ptp_clock_info * info,s64 delta)744  static int vsc85xx_adjtime(struct ptp_clock_info *info, s64 delta)
745  {
746  	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
747  	struct phy_device *phydev = ptp->phydev;
748  	struct vsc8531_private *priv = phydev->priv;
749  	u32 val;
750  
751  	/* Can't recover that big of an offset. Let's set the time directly. */
752  	if (abs(delta) >= NSEC_PER_SEC) {
753  		struct timespec64 ts;
754  		u64 now;
755  
756  		mutex_lock(&priv->phc_lock);
757  
758  		__vsc85xx_gettime(info, &ts);
759  		now = ktime_to_ns(timespec64_to_ktime(ts));
760  		ts = ns_to_timespec64(now + delta);
761  		__vsc85xx_settime(info, &ts);
762  
763  		mutex_unlock(&priv->phc_lock);
764  
765  		return 0;
766  	}
767  
768  	mutex_lock(&priv->phc_lock);
769  
770  	val = PTP_LTC_OFFSET_VAL(abs(delta)) | PTP_LTC_OFFSET_ADJ;
771  	if (delta > 0)
772  		val |= PTP_LTC_OFFSET_ADD;
773  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_OFFSET, val);
774  
775  	mutex_unlock(&priv->phc_lock);
776  
777  	return 0;
778  }
779  
vsc85xx_eth1_next_comp(struct phy_device * phydev,enum ts_blk blk,u32 next_comp,u32 etype)780  static int vsc85xx_eth1_next_comp(struct phy_device *phydev, enum ts_blk blk,
781  				  u32 next_comp, u32 etype)
782  {
783  	u32 val;
784  
785  	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT);
786  	val &= ~ANA_ETH1_NTX_PROT_COMPARATOR_MASK;
787  	val |= next_comp;
788  	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
789  
790  	val = ANA_ETH1_NXT_PROT_ETYPE_MATCH(etype) |
791  		ANA_ETH1_NXT_PROT_ETYPE_MATCH_ENA;
792  	vsc85xx_ts_write_csr(phydev, blk,
793  			     MSCC_PHY_ANA_ETH1_NXT_PROT_ETYPE_MATCH, val);
794  
795  	return 0;
796  }
797  
vsc85xx_ip1_next_comp(struct phy_device * phydev,enum ts_blk blk,u32 next_comp,u32 header)798  static int vsc85xx_ip1_next_comp(struct phy_device *phydev, enum ts_blk blk,
799  				 u32 next_comp, u32 header)
800  {
801  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP,
802  			     ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR(header) |
803  			     next_comp);
804  
805  	return 0;
806  }
807  
vsc85xx_ts_ptp_action_flow(struct phy_device * phydev,enum ts_blk blk,u8 flow,enum ptp_cmd cmd)808  static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk, u8 flow, enum ptp_cmd cmd)
809  {
810  	u32 val;
811  
812  	/* Check non-zero reserved field */
813  	val = PTP_FLOW_PTP_0_FIELD_PTP_FRAME | PTP_FLOW_PTP_0_FIELD_RSVRD_CHECK;
814  	vsc85xx_ts_write_csr(phydev, blk,
815  			     MSCC_ANA_PTP_FLOW_PTP_0_FIELD(flow), val);
816  
817  	val = PTP_FLOW_PTP_ACTION_CORR_OFFSET(8) |
818  	      PTP_FLOW_PTP_ACTION_TIME_OFFSET(8) |
819  	      PTP_FLOW_PTP_ACTION_PTP_CMD(cmd == PTP_SAVE_IN_TS_FIFO ?
820  					  PTP_NOP : cmd);
821  	if (cmd == PTP_SAVE_IN_TS_FIFO)
822  		val |= PTP_FLOW_PTP_ACTION_SAVE_LOCAL_TIME;
823  	else if (cmd == PTP_WRITE_NS)
824  		val |= PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_UPDATE |
825  		       PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET(6);
826  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_PTP_ACTION(flow),
827  			     val);
828  
829  	if (cmd == PTP_WRITE_1588)
830  		/* Rewrite timestamp directly in frame */
831  		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(34) |
832  		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(10);
833  	else if (cmd == PTP_SAVE_IN_TS_FIFO)
834  		/* no rewrite */
835  		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(0) |
836  		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(0);
837  	else
838  		/* Write in reserved field */
839  		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(16) |
840  		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(4);
841  	vsc85xx_ts_write_csr(phydev, blk,
842  			     MSCC_ANA_PTP_FLOW_PTP_ACTION2(flow), val);
843  
844  	return 0;
845  }
846  
vsc85xx_ptp_conf(struct phy_device * phydev,enum ts_blk blk,bool one_step,bool enable)847  static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk,
848  			    bool one_step, bool enable)
849  {
850  	static const u8 msgs[] = {
851  		PTP_MSGTYPE_SYNC,
852  		PTP_MSGTYPE_DELAY_REQ
853  	};
854  	u32 val;
855  	u8 i;
856  
857  	for (i = 0; i < ARRAY_SIZE(msgs); i++) {
858  		if (blk == INGRESS)
859  			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
860  						   PTP_WRITE_NS);
861  		else if (msgs[i] == PTP_MSGTYPE_SYNC && one_step)
862  			/* no need to know Sync t when sending in one_step */
863  			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
864  						   PTP_WRITE_1588);
865  		else
866  			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
867  						   PTP_SAVE_IN_TS_FIFO);
868  
869  		val = vsc85xx_ts_read_csr(phydev, blk,
870  					  MSCC_ANA_PTP_FLOW_ENA(i));
871  		val &= ~PTP_FLOW_ENA;
872  		if (enable)
873  			val |= PTP_FLOW_ENA;
874  		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
875  				     val);
876  	}
877  
878  	return 0;
879  }
880  
vsc85xx_eth1_conf(struct phy_device * phydev,enum ts_blk blk,bool enable)881  static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
882  			     bool enable)
883  {
884  	struct vsc8531_private *vsc8531 = phydev->priv;
885  	u32 val = ANA_ETH1_FLOW_ADDR_MATCH2_DEST;
886  
887  	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
888  		/* PTP over Ethernet multicast address for SYNC and DELAY msg */
889  		u8 ptp_multicast[6] = {0x01, 0x1b, 0x19, 0x00, 0x00, 0x00};
890  
891  		val |= ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR |
892  		       get_unaligned_be16(&ptp_multicast[4]);
893  		vsc85xx_ts_write_csr(phydev, blk,
894  				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
895  		vsc85xx_ts_write_csr(phydev, blk,
896  				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0),
897  				     get_unaligned_be32(ptp_multicast));
898  	} else {
899  		val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
900  		vsc85xx_ts_write_csr(phydev, blk,
901  				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
902  		vsc85xx_ts_write_csr(phydev, blk,
903  				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
904  	}
905  
906  	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0));
907  	val &= ~ETH1_FLOW_ENA;
908  	if (enable)
909  		val |= ETH1_FLOW_ENA;
910  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0), val);
911  
912  	return 0;
913  }
914  
vsc85xx_ip1_conf(struct phy_device * phydev,enum ts_blk blk,bool enable)915  static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
916  			    bool enable)
917  {
918  	u32 val;
919  
920  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP1_MODE,
921  			     ANA_IP1_NXT_PROT_IPV4 |
922  			     ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV4);
923  
924  	/* Matching UDP protocol number */
925  	val = ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK(0xff) |
926  	      ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH(IPPROTO_UDP) |
927  	      ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF(9);
928  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP_MATCH1,
929  			     val);
930  
931  	/* End of IP protocol, start of next protocol (UDP) */
932  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_OFFSET2,
933  			     ANA_IP1_NXT_PROT_OFFSET2(20));
934  
935  	val = vsc85xx_ts_read_csr(phydev, blk,
936  				  MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM);
937  	val &= ~(IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK |
938  		 IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK);
939  	val |= IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2);
940  
941  	val &= ~(IP1_NXT_PROT_UDP_CHKSUM_UPDATE |
942  		 IP1_NXT_PROT_UDP_CHKSUM_CLEAR);
943  	/* UDP checksum offset in IPv4 packet
944  	 * according to: https://tools.ietf.org/html/rfc768
945  	 */
946  	val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
947  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
948  			     val);
949  
950  	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
951  	val &= ~(IP1_FLOW_MATCH_ADDR_MASK | IP1_FLOW_ENA);
952  	val |= IP1_FLOW_MATCH_DEST_SRC_ADDR;
953  	if (enable)
954  		val |= IP1_FLOW_ENA;
955  	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
956  
957  	return 0;
958  }
959  
vsc85xx_ts_engine_init(struct phy_device * phydev,bool one_step)960  static int vsc85xx_ts_engine_init(struct phy_device *phydev, bool one_step)
961  {
962  	struct vsc8531_private *vsc8531 = phydev->priv;
963  	bool ptp_l4, base = phydev->mdio.addr == vsc8531->ts_base_addr;
964  	u8 eng_id = base ? 0 : 1;
965  	u32 val;
966  
967  	ptp_l4 = vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
968  
969  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
970  				  MSCC_PHY_PTP_ANALYZER_MODE);
971  	/* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
972  	val &= ~(PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id)) |
973  		 PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id)));
974  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
975  			     val);
976  
977  	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
978  		vsc85xx_eth1_next_comp(phydev, INGRESS,
979  				       ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
980  		vsc85xx_eth1_next_comp(phydev, EGRESS,
981  				       ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
982  	} else {
983  		vsc85xx_eth1_next_comp(phydev, INGRESS,
984  				       ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
985  				       ETH_P_IP);
986  		vsc85xx_eth1_next_comp(phydev, EGRESS,
987  				       ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
988  				       ETH_P_IP);
989  		/* Header length of IPv[4/6] + UDP */
990  		vsc85xx_ip1_next_comp(phydev, INGRESS,
991  				      ANA_ETH1_NTX_PROT_PTP_OAM, 28);
992  		vsc85xx_ip1_next_comp(phydev, EGRESS,
993  				      ANA_ETH1_NTX_PROT_PTP_OAM, 28);
994  	}
995  
996  	vsc85xx_eth1_conf(phydev, INGRESS,
997  			  vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
998  	vsc85xx_ip1_conf(phydev, INGRESS,
999  			 ptp_l4 && vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1000  	vsc85xx_ptp_conf(phydev, INGRESS, one_step,
1001  			 vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1002  
1003  	vsc85xx_eth1_conf(phydev, EGRESS,
1004  			  vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1005  	vsc85xx_ip1_conf(phydev, EGRESS,
1006  			 ptp_l4 && vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1007  	vsc85xx_ptp_conf(phydev, EGRESS, one_step,
1008  			 vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1009  
1010  	val &= ~PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1011  	if (vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF)
1012  		val |= PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1013  
1014  	val &= ~PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1015  	if (vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE)
1016  		val |= PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1017  
1018  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1019  			     val);
1020  
1021  	return 0;
1022  }
1023  
vsc85xx_link_change_notify(struct phy_device * phydev)1024  void vsc85xx_link_change_notify(struct phy_device *phydev)
1025  {
1026  	struct vsc8531_private *priv = phydev->priv;
1027  
1028  	mutex_lock(&priv->ts_lock);
1029  	vsc85xx_ts_set_latencies(phydev);
1030  	mutex_unlock(&priv->ts_lock);
1031  }
1032  
vsc85xx_ts_reset_fifo(struct phy_device * phydev)1033  static void vsc85xx_ts_reset_fifo(struct phy_device *phydev)
1034  {
1035  	u32 val;
1036  
1037  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1038  				  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1039  	val |= PTP_EGR_TS_FIFO_RESET;
1040  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1041  			     val);
1042  
1043  	val &= ~PTP_EGR_TS_FIFO_RESET;
1044  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1045  			     val);
1046  }
1047  
vsc85xx_hwtstamp(struct mii_timestamper * mii_ts,struct kernel_hwtstamp_config * cfg,struct netlink_ext_ack * extack)1048  static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
1049  			    struct kernel_hwtstamp_config *cfg,
1050  			    struct netlink_ext_ack *extack)
1051  {
1052  	struct vsc8531_private *vsc8531 =
1053  		container_of(mii_ts, struct vsc8531_private, mii_ts);
1054  	struct phy_device *phydev = vsc8531->ptp->phydev;
1055  	bool one_step = false;
1056  	u32 val;
1057  
1058  	switch (cfg->tx_type) {
1059  	case HWTSTAMP_TX_ONESTEP_SYNC:
1060  		one_step = true;
1061  		break;
1062  	case HWTSTAMP_TX_ON:
1063  		break;
1064  	case HWTSTAMP_TX_OFF:
1065  		break;
1066  	default:
1067  		return -ERANGE;
1068  	}
1069  
1070  	vsc8531->ptp->tx_type = cfg->tx_type;
1071  
1072  	switch (cfg->rx_filter) {
1073  	case HWTSTAMP_FILTER_NONE:
1074  		break;
1075  	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1076  		/* ETH->IP->UDP->PTP */
1077  		break;
1078  	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1079  		/* ETH->PTP */
1080  		break;
1081  	default:
1082  		return -ERANGE;
1083  	}
1084  
1085  	vsc8531->ptp->rx_filter = cfg->rx_filter;
1086  
1087  	mutex_lock(&vsc8531->ts_lock);
1088  
1089  	__skb_queue_purge(&vsc8531->ptp->tx_queue);
1090  	__skb_queue_head_init(&vsc8531->ptp->tx_queue);
1091  
1092  	/* Disable predictor while configuring the 1588 block */
1093  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1094  				  MSCC_PHY_PTP_INGR_PREDICTOR);
1095  	val &= ~PTP_INGR_PREDICTOR_EN;
1096  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1097  			     val);
1098  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1099  				  MSCC_PHY_PTP_EGR_PREDICTOR);
1100  	val &= ~PTP_EGR_PREDICTOR_EN;
1101  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1102  			     val);
1103  
1104  	/* Bypass egress or ingress blocks if timestamping isn't used */
1105  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1106  	val &= ~(PTP_IFACE_CTRL_EGR_BYPASS | PTP_IFACE_CTRL_INGR_BYPASS);
1107  	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
1108  		val |= PTP_IFACE_CTRL_EGR_BYPASS;
1109  	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE)
1110  		val |= PTP_IFACE_CTRL_INGR_BYPASS;
1111  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1112  
1113  	/* Resetting FIFO so that it's empty after reconfiguration */
1114  	vsc85xx_ts_reset_fifo(phydev);
1115  
1116  	vsc85xx_ts_engine_init(phydev, one_step);
1117  
1118  	/* Re-enable predictors now */
1119  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1120  				  MSCC_PHY_PTP_INGR_PREDICTOR);
1121  	val |= PTP_INGR_PREDICTOR_EN;
1122  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1123  			     val);
1124  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1125  				  MSCC_PHY_PTP_EGR_PREDICTOR);
1126  	val |= PTP_EGR_PREDICTOR_EN;
1127  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1128  			     val);
1129  
1130  	vsc8531->ptp->configured = 1;
1131  	mutex_unlock(&vsc8531->ts_lock);
1132  
1133  	return 0;
1134  }
1135  
vsc85xx_ts_info(struct mii_timestamper * mii_ts,struct kernel_ethtool_ts_info * info)1136  static int vsc85xx_ts_info(struct mii_timestamper *mii_ts,
1137  			   struct kernel_ethtool_ts_info *info)
1138  {
1139  	struct vsc8531_private *vsc8531 =
1140  		container_of(mii_ts, struct vsc8531_private, mii_ts);
1141  
1142  	info->phc_index = ptp_clock_index(vsc8531->ptp->ptp_clock);
1143  	info->so_timestamping =
1144  		SOF_TIMESTAMPING_TX_HARDWARE |
1145  		SOF_TIMESTAMPING_RX_HARDWARE |
1146  		SOF_TIMESTAMPING_RAW_HARDWARE;
1147  	info->tx_types =
1148  		(1 << HWTSTAMP_TX_OFF) |
1149  		(1 << HWTSTAMP_TX_ON) |
1150  		(1 << HWTSTAMP_TX_ONESTEP_SYNC);
1151  	info->rx_filters =
1152  		(1 << HWTSTAMP_FILTER_NONE) |
1153  		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1154  		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1155  
1156  	return 0;
1157  }
1158  
vsc85xx_txtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)1159  static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
1160  			     struct sk_buff *skb, int type)
1161  {
1162  	struct vsc8531_private *vsc8531 =
1163  		container_of(mii_ts, struct vsc8531_private, mii_ts);
1164  
1165  	if (!vsc8531->ptp->configured)
1166  		return;
1167  
1168  	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) {
1169  		kfree_skb(skb);
1170  		return;
1171  	}
1172  
1173  	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1174  
1175  	mutex_lock(&vsc8531->ts_lock);
1176  	__skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
1177  	mutex_unlock(&vsc8531->ts_lock);
1178  }
1179  
vsc85xx_rxtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)1180  static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
1181  			     struct sk_buff *skb, int type)
1182  {
1183  	struct vsc8531_private *vsc8531 =
1184  		container_of(mii_ts, struct vsc8531_private, mii_ts);
1185  	struct skb_shared_hwtstamps *shhwtstamps = NULL;
1186  	struct vsc85xx_ptphdr *ptphdr;
1187  	struct timespec64 ts;
1188  	unsigned long ns;
1189  
1190  	if (!vsc8531->ptp->configured)
1191  		return false;
1192  
1193  	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE ||
1194  	    type == PTP_CLASS_NONE)
1195  		return false;
1196  
1197  	vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
1198  
1199  	ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
1200  	if (!ptphdr)
1201  		return false;
1202  
1203  	shhwtstamps = skb_hwtstamps(skb);
1204  	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1205  
1206  	ns = ntohl(ptphdr->rsrvd2);
1207  
1208  	/* nsec is in reserved field */
1209  	if (ts.tv_nsec < ns)
1210  		ts.tv_sec--;
1211  
1212  	shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
1213  	netif_rx(skb);
1214  
1215  	return true;
1216  }
1217  
1218  static const struct ptp_clock_info vsc85xx_clk_caps = {
1219  	.owner		= THIS_MODULE,
1220  	.name		= "VSC85xx timer",
1221  	.max_adj	= S32_MAX,
1222  	.n_alarm	= 0,
1223  	.n_pins		= 0,
1224  	.n_ext_ts	= 0,
1225  	.n_per_out	= 0,
1226  	.pps		= 0,
1227  	.adjtime        = &vsc85xx_adjtime,
1228  	.adjfine	= &vsc85xx_adjfine,
1229  	.gettime64	= &vsc85xx_gettime,
1230  	.settime64	= &vsc85xx_settime,
1231  };
1232  
vsc8584_base_priv(struct phy_device * phydev)1233  static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
1234  {
1235  	struct vsc8531_private *vsc8531 = phydev->priv;
1236  
1237  	if (vsc8531->ts_base_addr != phydev->mdio.addr) {
1238  		struct mdio_device *dev;
1239  
1240  		dev = phydev->mdio.bus->mdio_map[vsc8531->ts_base_addr];
1241  		phydev = container_of(dev, struct phy_device, mdio);
1242  
1243  		return phydev->priv;
1244  	}
1245  
1246  	return vsc8531;
1247  }
1248  
vsc8584_is_1588_input_clk_configured(struct phy_device * phydev)1249  static bool vsc8584_is_1588_input_clk_configured(struct phy_device *phydev)
1250  {
1251  	struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1252  
1253  	return vsc8531->input_clk_init;
1254  }
1255  
vsc8584_set_input_clk_configured(struct phy_device * phydev)1256  static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
1257  {
1258  	struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1259  
1260  	vsc8531->input_clk_init = true;
1261  }
1262  
__vsc8584_init_ptp(struct phy_device * phydev)1263  static int __vsc8584_init_ptp(struct phy_device *phydev)
1264  {
1265  	struct vsc8531_private *vsc8531 = phydev->priv;
1266  	static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
1267  	static const u8  ltc_seq_a[] = { 8, 6, 5, 4, 2 };
1268  	u32 val;
1269  
1270  	if (!vsc8584_is_1588_input_clk_configured(phydev)) {
1271  		phy_lock_mdio_bus(phydev);
1272  
1273  		/* 1588_DIFF_INPUT_CLK configuration: Use an external clock for
1274  		 * the LTC, as per 3.13.29 in the VSC8584 datasheet.
1275  		 */
1276  		phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1277  				  MSCC_PHY_PAGE_1588);
1278  		phy_ts_base_write(phydev, 29, 0x7ae0);
1279  		phy_ts_base_write(phydev, 30, 0xb71c);
1280  		phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1281  				  MSCC_PHY_PAGE_STANDARD);
1282  
1283  		phy_unlock_mdio_bus(phydev);
1284  
1285  		vsc8584_set_input_clk_configured(phydev);
1286  	}
1287  
1288  	/* Disable predictor before configuring the 1588 block */
1289  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1290  				  MSCC_PHY_PTP_INGR_PREDICTOR);
1291  	val &= ~PTP_INGR_PREDICTOR_EN;
1292  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1293  			     val);
1294  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1295  				  MSCC_PHY_PTP_EGR_PREDICTOR);
1296  	val &= ~PTP_EGR_PREDICTOR_EN;
1297  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1298  			     val);
1299  
1300  	/* By default, the internal clock of fixed rate 250MHz is used */
1301  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
1302  	val &= ~PTP_LTC_CTRL_CLK_SEL_MASK;
1303  	val |= PTP_LTC_CTRL_CLK_SEL_INTERNAL_250;
1304  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
1305  
1306  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE);
1307  	val &= ~PTP_LTC_SEQUENCE_A_MASK;
1308  	val |= PTP_LTC_SEQUENCE_A(ltc_seq_a[PHC_CLK_250MHZ]);
1309  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE, val);
1310  
1311  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ);
1312  	val &= ~(PTP_LTC_SEQ_ERR_MASK | PTP_LTC_SEQ_ADD_SUB);
1313  	if (ltc_seq_e[PHC_CLK_250MHZ])
1314  		val |= PTP_LTC_SEQ_ADD_SUB;
1315  	val |= PTP_LTC_SEQ_ERR(ltc_seq_e[PHC_CLK_250MHZ]);
1316  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ, val);
1317  
1318  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_1PPS_WIDTH_ADJ,
1319  			     PPS_WIDTH_ADJ);
1320  
1321  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_DELAY_FIFO,
1322  			     IS_ENABLED(CONFIG_MACSEC) ?
1323  			     PTP_INGR_DELAY_FIFO_DEPTH_MACSEC :
1324  			     PTP_INGR_DELAY_FIFO_DEPTH_DEFAULT);
1325  
1326  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_DELAY_FIFO,
1327  			     IS_ENABLED(CONFIG_MACSEC) ?
1328  			     PTP_EGR_DELAY_FIFO_DEPTH_MACSEC :
1329  			     PTP_EGR_DELAY_FIFO_DEPTH_DEFAULT);
1330  
1331  	/* Enable n-phase sampler for Viper Rev-B */
1332  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1333  				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1334  	val &= ~(PTP_ACCUR_PPS_OUT_BYPASS | PTP_ACCUR_PPS_IN_BYPASS |
1335  		 PTP_ACCUR_EGR_SOF_BYPASS | PTP_ACCUR_INGR_SOF_BYPASS |
1336  		 PTP_ACCUR_LOAD_SAVE_BYPASS);
1337  	val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1338  	       PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1339  	       PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1340  	       PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1341  	       PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1342  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1343  			     val);
1344  
1345  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1346  				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1347  	val |= PTP_ACCUR_CALIB_TRIGG;
1348  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1349  			     val);
1350  
1351  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1352  				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1353  	val &= ~PTP_ACCUR_CALIB_TRIGG;
1354  	val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1355  	       PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1356  	       PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1357  	       PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1358  	       PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1359  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1360  			     val);
1361  
1362  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1363  				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1364  	val |= PTP_ACCUR_CALIB_TRIGG;
1365  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1366  			     val);
1367  
1368  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1369  				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1370  	val &= ~PTP_ACCUR_CALIB_TRIGG;
1371  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1372  			     val);
1373  
1374  	/* Do not access FIFO via SI */
1375  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1376  				  MSCC_PHY_PTP_TSTAMP_FIFO_SI);
1377  	val &= ~PTP_TSTAMP_FIFO_SI_EN;
1378  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_TSTAMP_FIFO_SI,
1379  			     val);
1380  
1381  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1382  				  MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1383  	val &= ~PTP_INGR_REWRITER_REDUCE_PREAMBLE;
1384  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1385  			     val);
1386  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1387  				  MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1388  	val &= ~PTP_EGR_REWRITER_REDUCE_PREAMBLE;
1389  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1390  			     val);
1391  
1392  	/* Put the flag that indicates the frame has been modified to bit 7 */
1393  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1394  				  MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1395  	val |= PTP_INGR_REWRITER_FLAG_BIT_OFF(7) | PTP_INGR_REWRITER_FLAG_VAL;
1396  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1397  			     val);
1398  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1399  				  MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1400  	val |= PTP_EGR_REWRITER_FLAG_BIT_OFF(7);
1401  	val &= ~PTP_EGR_REWRITER_FLAG_VAL;
1402  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1403  			     val);
1404  
1405  	/* 30bit mode for RX timestamp, only the nanoseconds are kept in
1406  	 * reserved field.
1407  	 */
1408  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1409  				  MSCC_PHY_PTP_INGR_TSP_CTRL);
1410  	val |= PHY_PTP_INGR_TSP_CTRL_FRACT_NS;
1411  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
1412  			     val);
1413  
1414  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
1415  	val |= PHY_PTP_EGR_TSP_CTRL_FRACT_NS;
1416  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
1417  
1418  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1419  				  MSCC_PHY_PTP_SERIAL_TOD_IFACE);
1420  	val |= PTP_SERIAL_TOD_IFACE_LS_AUTO_CLR;
1421  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_SERIAL_TOD_IFACE,
1422  			     val);
1423  
1424  	vsc85xx_ts_fsb_init(phydev);
1425  
1426  	/* Set the Egress timestamp FIFO configuration and status register */
1427  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1428  				  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1429  	val &= ~(PTP_EGR_TS_FIFO_SIG_BYTES_MASK | PTP_EGR_TS_FIFO_THRESH_MASK);
1430  	/* 16 bytes for the signature, 10 for the timestamp in the TS FIFO */
1431  	val |= PTP_EGR_TS_FIFO_SIG_BYTES(16) | PTP_EGR_TS_FIFO_THRESH(7);
1432  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1433  			     val);
1434  
1435  	vsc85xx_ts_reset_fifo(phydev);
1436  
1437  	val = PTP_IFACE_CTRL_CLK_ENA;
1438  	if (!IS_ENABLED(CONFIG_MACSEC))
1439  		val |= PTP_IFACE_CTRL_GMII_PROT;
1440  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1441  
1442  	vsc85xx_ts_set_latencies(phydev);
1443  
1444  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_VERSION_CODE);
1445  
1446  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1447  	val |= PTP_IFACE_CTRL_EGR_BYPASS;
1448  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1449  
1450  	vsc85xx_ts_disable_flows(phydev, EGRESS);
1451  	vsc85xx_ts_disable_flows(phydev, INGRESS);
1452  
1453  	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1454  				  MSCC_PHY_PTP_ANALYZER_MODE);
1455  	/* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
1456  	val &= ~(PTP_ANALYZER_MODE_EGR_ENA_MASK |
1457  		 PTP_ANALYZER_MODE_INGR_ENA_MASK |
1458  		 PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK |
1459  		 PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK);
1460  	/* Strict matching in flow (packets should match flows from the same
1461  	 * index in all enabled comparators (except PTP)).
1462  	 */
1463  	val |= PTP_ANA_SPLIT_ENCAP_FLOW | PTP_ANA_INGR_ENCAP_FLOW_MODE(0x7) |
1464  	       PTP_ANA_EGR_ENCAP_FLOW_MODE(0x7);
1465  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1466  			     val);
1467  
1468  	/* Initialized for ingress and egress flows:
1469  	 * - The Ethernet comparator.
1470  	 * - The IP comparator.
1471  	 * - The PTP comparator.
1472  	 */
1473  	vsc85xx_eth_cmp1_init(phydev, INGRESS);
1474  	vsc85xx_ip_cmp1_init(phydev, INGRESS);
1475  	vsc85xx_ptp_cmp_init(phydev, INGRESS);
1476  	vsc85xx_eth_cmp1_init(phydev, EGRESS);
1477  	vsc85xx_ip_cmp1_init(phydev, EGRESS);
1478  	vsc85xx_ptp_cmp_init(phydev, EGRESS);
1479  
1480  	vsc85xx_ts_eth_cmp1_sig(phydev);
1481  
1482  	vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
1483  	vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
1484  	vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
1485  	vsc8531->mii_ts.ts_info  = vsc85xx_ts_info;
1486  	phydev->mii_ts = &vsc8531->mii_ts;
1487  
1488  	memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
1489  
1490  	vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
1491  						     &phydev->mdio.dev);
1492  	return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
1493  }
1494  
vsc8584_config_ts_intr(struct phy_device * phydev)1495  void vsc8584_config_ts_intr(struct phy_device *phydev)
1496  {
1497  	struct vsc8531_private *priv = phydev->priv;
1498  
1499  	mutex_lock(&priv->ts_lock);
1500  	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_1588_VSC85XX_INT_MASK,
1501  			     VSC85XX_1588_INT_MASK_MASK);
1502  	mutex_unlock(&priv->ts_lock);
1503  }
1504  
vsc8584_ptp_init(struct phy_device * phydev)1505  int vsc8584_ptp_init(struct phy_device *phydev)
1506  {
1507  	switch (phydev->phy_id & phydev->drv->phy_id_mask) {
1508  	case PHY_ID_VSC8572:
1509  	case PHY_ID_VSC8574:
1510  	case PHY_ID_VSC8575:
1511  	case PHY_ID_VSC8582:
1512  	case PHY_ID_VSC8584:
1513  		return __vsc8584_init_ptp(phydev);
1514  	}
1515  
1516  	return 0;
1517  }
1518  
vsc8584_handle_ts_interrupt(struct phy_device * phydev)1519  irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
1520  {
1521  	struct vsc8531_private *priv = phydev->priv;
1522  	int rc;
1523  
1524  	mutex_lock(&priv->ts_lock);
1525  	rc = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1526  				 MSCC_PHY_1588_VSC85XX_INT_STATUS);
1527  	/* Ack the PTP interrupt */
1528  	vsc85xx_ts_write_csr(phydev, PROCESSOR,
1529  			     MSCC_PHY_1588_VSC85XX_INT_STATUS, rc);
1530  
1531  	if (!(rc & VSC85XX_1588_INT_MASK_MASK)) {
1532  		mutex_unlock(&priv->ts_lock);
1533  		return IRQ_NONE;
1534  	}
1535  
1536  	if (rc & VSC85XX_1588_INT_FIFO_ADD) {
1537  		vsc85xx_get_tx_ts(priv->ptp);
1538  	} else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) {
1539  		__skb_queue_purge(&priv->ptp->tx_queue);
1540  		vsc85xx_ts_reset_fifo(phydev);
1541  	}
1542  
1543  	mutex_unlock(&priv->ts_lock);
1544  	return IRQ_HANDLED;
1545  }
1546  
vsc8584_ptp_probe(struct phy_device * phydev)1547  int vsc8584_ptp_probe(struct phy_device *phydev)
1548  {
1549  	struct vsc8531_private *vsc8531 = phydev->priv;
1550  
1551  	vsc8531->ptp = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531->ptp),
1552  				    GFP_KERNEL);
1553  	if (!vsc8531->ptp)
1554  		return -ENOMEM;
1555  
1556  	mutex_init(&vsc8531->phc_lock);
1557  	mutex_init(&vsc8531->ts_lock);
1558  
1559  	/* Retrieve the shared load/save GPIO. Request it as non exclusive as
1560  	 * the same GPIO can be requested by all the PHYs of the same package.
1561  	 * This GPIO must be used with the gpio_lock taken (the lock is shared
1562  	 * between all PHYs).
1563  	 */
1564  	vsc8531->load_save = devm_gpiod_get_optional(&phydev->mdio.dev, "load-save",
1565  						     GPIOD_FLAGS_BIT_NONEXCLUSIVE |
1566  						     GPIOD_OUT_LOW);
1567  	if (IS_ERR(vsc8531->load_save)) {
1568  		phydev_err(phydev, "Can't get load-save GPIO (%ld)\n",
1569  			   PTR_ERR(vsc8531->load_save));
1570  		return PTR_ERR(vsc8531->load_save);
1571  	}
1572  
1573  	/* Timestamp selected by default to keep legacy API */
1574  	phydev->default_timestamp = true;
1575  
1576  	vsc8531->ptp->phydev = phydev;
1577  
1578  	return 0;
1579  }
1580  
vsc8584_ptp_probe_once(struct phy_device * phydev)1581  int vsc8584_ptp_probe_once(struct phy_device *phydev)
1582  {
1583  	struct vsc85xx_shared_private *shared =
1584  		(struct vsc85xx_shared_private *)phydev->shared->priv;
1585  
1586  	/* Initialize shared GPIO lock */
1587  	mutex_init(&shared->gpio_lock);
1588  
1589  	return 0;
1590  }
1591