1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
4  *
5  * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/iopoll.h>
10 #include <linux/mdio.h>
11 #include <linux/phy.h>
12 #include <linux/oa_tc6.h>
13 
14 /* OPEN Alliance TC6 registers */
15 /* Standard Capabilities Register */
16 #define OA_TC6_REG_STDCAP			0x0002
17 #define STDCAP_DIRECT_PHY_REG_ACCESS		BIT(8)
18 
19 /* Reset Control and Status Register */
20 #define OA_TC6_REG_RESET			0x0003
21 #define RESET_SWRESET				BIT(0)	/* Software Reset */
22 
23 /* Configuration Register #0 */
24 #define OA_TC6_REG_CONFIG0			0x0004
25 #define CONFIG0_SYNC				BIT(15)
26 #define CONFIG0_ZARFE_ENABLE			BIT(12)
27 
28 /* Status Register #0 */
29 #define OA_TC6_REG_STATUS0			0x0008
30 #define STATUS0_RESETC				BIT(6)	/* Reset Complete */
31 #define STATUS0_HEADER_ERROR			BIT(5)
32 #define STATUS0_LOSS_OF_FRAME_ERROR		BIT(4)
33 #define STATUS0_RX_BUFFER_OVERFLOW_ERROR	BIT(3)
34 #define STATUS0_TX_PROTOCOL_ERROR		BIT(0)
35 
36 /* Buffer Status Register */
37 #define OA_TC6_REG_BUFFER_STATUS		0x000B
38 #define BUFFER_STATUS_TX_CREDITS_AVAILABLE	GENMASK(15, 8)
39 #define BUFFER_STATUS_RX_CHUNKS_AVAILABLE	GENMASK(7, 0)
40 
41 /* Interrupt Mask Register #0 */
42 #define OA_TC6_REG_INT_MASK0			0x000C
43 #define INT_MASK0_HEADER_ERR_MASK		BIT(5)
44 #define INT_MASK0_LOSS_OF_FRAME_ERR_MASK	BIT(4)
45 #define INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK	BIT(3)
46 #define INT_MASK0_TX_PROTOCOL_ERR_MASK		BIT(0)
47 
48 /* PHY Clause 22 registers base address and mask */
49 #define OA_TC6_PHY_STD_REG_ADDR_BASE		0xFF00
50 #define OA_TC6_PHY_STD_REG_ADDR_MASK		0x1F
51 
52 /* Control command header */
53 #define OA_TC6_CTRL_HEADER_DATA_NOT_CTRL	BIT(31)
54 #define OA_TC6_CTRL_HEADER_WRITE_NOT_READ	BIT(29)
55 #define OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR	GENMASK(27, 24)
56 #define OA_TC6_CTRL_HEADER_ADDR			GENMASK(23, 8)
57 #define OA_TC6_CTRL_HEADER_LENGTH		GENMASK(7, 1)
58 #define OA_TC6_CTRL_HEADER_PARITY		BIT(0)
59 
60 /* Data header */
61 #define OA_TC6_DATA_HEADER_DATA_NOT_CTRL	BIT(31)
62 #define OA_TC6_DATA_HEADER_DATA_VALID		BIT(21)
63 #define OA_TC6_DATA_HEADER_START_VALID		BIT(20)
64 #define OA_TC6_DATA_HEADER_START_WORD_OFFSET	GENMASK(19, 16)
65 #define OA_TC6_DATA_HEADER_END_VALID		BIT(14)
66 #define OA_TC6_DATA_HEADER_END_BYTE_OFFSET	GENMASK(13, 8)
67 #define OA_TC6_DATA_HEADER_PARITY		BIT(0)
68 
69 /* Data footer */
70 #define OA_TC6_DATA_FOOTER_EXTENDED_STS		BIT(31)
71 #define OA_TC6_DATA_FOOTER_RXD_HEADER_BAD	BIT(30)
72 #define OA_TC6_DATA_FOOTER_CONFIG_SYNC		BIT(29)
73 #define OA_TC6_DATA_FOOTER_RX_CHUNKS		GENMASK(28, 24)
74 #define OA_TC6_DATA_FOOTER_DATA_VALID		BIT(21)
75 #define OA_TC6_DATA_FOOTER_START_VALID		BIT(20)
76 #define OA_TC6_DATA_FOOTER_START_WORD_OFFSET	GENMASK(19, 16)
77 #define OA_TC6_DATA_FOOTER_END_VALID		BIT(14)
78 #define OA_TC6_DATA_FOOTER_END_BYTE_OFFSET	GENMASK(13, 8)
79 #define OA_TC6_DATA_FOOTER_TX_CREDITS		GENMASK(5, 1)
80 
81 /* PHY – Clause 45 registers memory map selector (MMS) as per table 6 in the
82  * OPEN Alliance specification.
83  */
84 #define OA_TC6_PHY_C45_PCS_MMS2			2	/* MMD 3 */
85 #define OA_TC6_PHY_C45_PMA_PMD_MMS3		3	/* MMD 1 */
86 #define OA_TC6_PHY_C45_VS_PLCA_MMS4		4	/* MMD 31 */
87 #define OA_TC6_PHY_C45_AUTO_NEG_MMS5		5	/* MMD 7 */
88 #define OA_TC6_PHY_C45_POWER_UNIT_MMS6		6	/* MMD 13 */
89 
90 #define OA_TC6_CTRL_HEADER_SIZE			4
91 #define OA_TC6_CTRL_REG_VALUE_SIZE		4
92 #define OA_TC6_CTRL_IGNORED_SIZE		4
93 #define OA_TC6_CTRL_MAX_REGISTERS		128
94 #define OA_TC6_CTRL_SPI_BUF_SIZE		(OA_TC6_CTRL_HEADER_SIZE +\
95 						(OA_TC6_CTRL_MAX_REGISTERS *\
96 						OA_TC6_CTRL_REG_VALUE_SIZE) +\
97 						OA_TC6_CTRL_IGNORED_SIZE)
98 #define OA_TC6_CHUNK_PAYLOAD_SIZE		64
99 #define OA_TC6_DATA_HEADER_SIZE			4
100 #define OA_TC6_CHUNK_SIZE			(OA_TC6_DATA_HEADER_SIZE +\
101 						OA_TC6_CHUNK_PAYLOAD_SIZE)
102 #define OA_TC6_MAX_TX_CHUNKS			48
103 #define OA_TC6_SPI_DATA_BUF_SIZE		(OA_TC6_MAX_TX_CHUNKS *\
104 						OA_TC6_CHUNK_SIZE)
105 #define STATUS0_RESETC_POLL_DELAY		1000
106 #define STATUS0_RESETC_POLL_TIMEOUT		1000000
107 
108 /* Internal structure for MAC-PHY drivers */
109 struct oa_tc6 {
110 	struct device *dev;
111 	struct net_device *netdev;
112 	struct phy_device *phydev;
113 	struct mii_bus *mdiobus;
114 	struct spi_device *spi;
115 	struct mutex spi_ctrl_lock; /* Protects spi control transfer */
116 	void *spi_ctrl_tx_buf;
117 	void *spi_ctrl_rx_buf;
118 	void *spi_data_tx_buf;
119 	void *spi_data_rx_buf;
120 	struct sk_buff *ongoing_tx_skb;
121 	struct sk_buff *waiting_tx_skb;
122 	struct sk_buff *rx_skb;
123 	struct task_struct *spi_thread;
124 	wait_queue_head_t spi_wq;
125 	u16 tx_skb_offset;
126 	u16 spi_data_tx_buf_offset;
127 	u16 tx_credits;
128 	u8 rx_chunks_available;
129 	bool rx_buf_overflow;
130 	bool int_flag;
131 };
132 
133 enum oa_tc6_header_type {
134 	OA_TC6_CTRL_HEADER,
135 	OA_TC6_DATA_HEADER,
136 };
137 
138 enum oa_tc6_register_op {
139 	OA_TC6_CTRL_REG_READ = 0,
140 	OA_TC6_CTRL_REG_WRITE = 1,
141 };
142 
143 enum oa_tc6_data_valid_info {
144 	OA_TC6_DATA_INVALID,
145 	OA_TC6_DATA_VALID,
146 };
147 
148 enum oa_tc6_data_start_valid_info {
149 	OA_TC6_DATA_START_INVALID,
150 	OA_TC6_DATA_START_VALID,
151 };
152 
153 enum oa_tc6_data_end_valid_info {
154 	OA_TC6_DATA_END_INVALID,
155 	OA_TC6_DATA_END_VALID,
156 };
157 
oa_tc6_spi_transfer(struct oa_tc6 * tc6,enum oa_tc6_header_type header_type,u16 length)158 static int oa_tc6_spi_transfer(struct oa_tc6 *tc6,
159 			       enum oa_tc6_header_type header_type, u16 length)
160 {
161 	struct spi_transfer xfer = { 0 };
162 	struct spi_message msg;
163 
164 	if (header_type == OA_TC6_DATA_HEADER) {
165 		xfer.tx_buf = tc6->spi_data_tx_buf;
166 		xfer.rx_buf = tc6->spi_data_rx_buf;
167 	} else {
168 		xfer.tx_buf = tc6->spi_ctrl_tx_buf;
169 		xfer.rx_buf = tc6->spi_ctrl_rx_buf;
170 	}
171 	xfer.len = length;
172 
173 	spi_message_init(&msg);
174 	spi_message_add_tail(&xfer, &msg);
175 
176 	return spi_sync(tc6->spi, &msg);
177 }
178 
oa_tc6_get_parity(u32 p)179 static int oa_tc6_get_parity(u32 p)
180 {
181 	/* Public domain code snippet, lifted from
182 	 * http://www-graphics.stanford.edu/~seander/bithacks.html
183 	 */
184 	p ^= p >> 1;
185 	p ^= p >> 2;
186 	p = (p & 0x11111111U) * 0x11111111U;
187 
188 	/* Odd parity is used here */
189 	return !((p >> 28) & 1);
190 }
191 
oa_tc6_prepare_ctrl_header(u32 addr,u8 length,enum oa_tc6_register_op reg_op)192 static __be32 oa_tc6_prepare_ctrl_header(u32 addr, u8 length,
193 					 enum oa_tc6_register_op reg_op)
194 {
195 	u32 header;
196 
197 	header = FIELD_PREP(OA_TC6_CTRL_HEADER_DATA_NOT_CTRL,
198 			    OA_TC6_CTRL_HEADER) |
199 		 FIELD_PREP(OA_TC6_CTRL_HEADER_WRITE_NOT_READ, reg_op) |
200 		 FIELD_PREP(OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR, addr >> 16) |
201 		 FIELD_PREP(OA_TC6_CTRL_HEADER_ADDR, addr) |
202 		 FIELD_PREP(OA_TC6_CTRL_HEADER_LENGTH, length - 1);
203 	header |= FIELD_PREP(OA_TC6_CTRL_HEADER_PARITY,
204 			     oa_tc6_get_parity(header));
205 
206 	return cpu_to_be32(header);
207 }
208 
oa_tc6_update_ctrl_write_data(struct oa_tc6 * tc6,u32 value[],u8 length)209 static void oa_tc6_update_ctrl_write_data(struct oa_tc6 *tc6, u32 value[],
210 					  u8 length)
211 {
212 	__be32 *tx_buf = tc6->spi_ctrl_tx_buf + OA_TC6_CTRL_HEADER_SIZE;
213 
214 	for (int i = 0; i < length; i++)
215 		*tx_buf++ = cpu_to_be32(value[i]);
216 }
217 
oa_tc6_calculate_ctrl_buf_size(u8 length)218 static u16 oa_tc6_calculate_ctrl_buf_size(u8 length)
219 {
220 	/* Control command consists 4 bytes header + 4 bytes register value for
221 	 * each register + 4 bytes ignored value.
222 	 */
223 	return OA_TC6_CTRL_HEADER_SIZE + OA_TC6_CTRL_REG_VALUE_SIZE * length +
224 	       OA_TC6_CTRL_IGNORED_SIZE;
225 }
226 
oa_tc6_prepare_ctrl_spi_buf(struct oa_tc6 * tc6,u32 address,u32 value[],u8 length,enum oa_tc6_register_op reg_op)227 static void oa_tc6_prepare_ctrl_spi_buf(struct oa_tc6 *tc6, u32 address,
228 					u32 value[], u8 length,
229 					enum oa_tc6_register_op reg_op)
230 {
231 	__be32 *tx_buf = tc6->spi_ctrl_tx_buf;
232 
233 	*tx_buf = oa_tc6_prepare_ctrl_header(address, length, reg_op);
234 
235 	if (reg_op == OA_TC6_CTRL_REG_WRITE)
236 		oa_tc6_update_ctrl_write_data(tc6, value, length);
237 }
238 
oa_tc6_check_ctrl_write_reply(struct oa_tc6 * tc6,u8 size)239 static int oa_tc6_check_ctrl_write_reply(struct oa_tc6 *tc6, u8 size)
240 {
241 	u8 *tx_buf = tc6->spi_ctrl_tx_buf;
242 	u8 *rx_buf = tc6->spi_ctrl_rx_buf;
243 
244 	rx_buf += OA_TC6_CTRL_IGNORED_SIZE;
245 
246 	/* The echoed control write must match with the one that was
247 	 * transmitted.
248 	 */
249 	if (memcmp(tx_buf, rx_buf, size - OA_TC6_CTRL_IGNORED_SIZE))
250 		return -EPROTO;
251 
252 	return 0;
253 }
254 
oa_tc6_check_ctrl_read_reply(struct oa_tc6 * tc6,u8 size)255 static int oa_tc6_check_ctrl_read_reply(struct oa_tc6 *tc6, u8 size)
256 {
257 	u32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE;
258 	u32 *tx_buf = tc6->spi_ctrl_tx_buf;
259 
260 	/* The echoed control read header must match with the one that was
261 	 * transmitted.
262 	 */
263 	if (*tx_buf != *rx_buf)
264 		return -EPROTO;
265 
266 	return 0;
267 }
268 
oa_tc6_copy_ctrl_read_data(struct oa_tc6 * tc6,u32 value[],u8 length)269 static void oa_tc6_copy_ctrl_read_data(struct oa_tc6 *tc6, u32 value[],
270 				       u8 length)
271 {
272 	__be32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE +
273 			 OA_TC6_CTRL_HEADER_SIZE;
274 
275 	for (int i = 0; i < length; i++)
276 		value[i] = be32_to_cpu(*rx_buf++);
277 }
278 
oa_tc6_perform_ctrl(struct oa_tc6 * tc6,u32 address,u32 value[],u8 length,enum oa_tc6_register_op reg_op)279 static int oa_tc6_perform_ctrl(struct oa_tc6 *tc6, u32 address, u32 value[],
280 			       u8 length, enum oa_tc6_register_op reg_op)
281 {
282 	u16 size;
283 	int ret;
284 
285 	/* Prepare control command and copy to SPI control buffer */
286 	oa_tc6_prepare_ctrl_spi_buf(tc6, address, value, length, reg_op);
287 
288 	size = oa_tc6_calculate_ctrl_buf_size(length);
289 
290 	/* Perform SPI transfer */
291 	ret = oa_tc6_spi_transfer(tc6, OA_TC6_CTRL_HEADER, size);
292 	if (ret) {
293 		dev_err(&tc6->spi->dev, "SPI transfer failed for control: %d\n",
294 			ret);
295 		return ret;
296 	}
297 
298 	/* Check echoed/received control write command reply for errors */
299 	if (reg_op == OA_TC6_CTRL_REG_WRITE)
300 		return oa_tc6_check_ctrl_write_reply(tc6, size);
301 
302 	/* Check echoed/received control read command reply for errors */
303 	ret = oa_tc6_check_ctrl_read_reply(tc6, size);
304 	if (ret)
305 		return ret;
306 
307 	oa_tc6_copy_ctrl_read_data(tc6, value, length);
308 
309 	return 0;
310 }
311 
312 /**
313  * oa_tc6_read_registers - function for reading multiple consecutive registers.
314  * @tc6: oa_tc6 struct.
315  * @address: address of the first register to be read in the MAC-PHY.
316  * @value: values to be read from the starting register address @address.
317  * @length: number of consecutive registers to be read from @address.
318  *
319  * Maximum of 128 consecutive registers can be read starting at @address.
320  *
321  * Return: 0 on success otherwise failed.
322  */
oa_tc6_read_registers(struct oa_tc6 * tc6,u32 address,u32 value[],u8 length)323 int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
324 			  u8 length)
325 {
326 	int ret;
327 
328 	if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
329 		dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
330 		return -EINVAL;
331 	}
332 
333 	mutex_lock(&tc6->spi_ctrl_lock);
334 	ret = oa_tc6_perform_ctrl(tc6, address, value, length,
335 				  OA_TC6_CTRL_REG_READ);
336 	mutex_unlock(&tc6->spi_ctrl_lock);
337 
338 	return ret;
339 }
340 EXPORT_SYMBOL_GPL(oa_tc6_read_registers);
341 
342 /**
343  * oa_tc6_read_register - function for reading a MAC-PHY register.
344  * @tc6: oa_tc6 struct.
345  * @address: register address of the MAC-PHY to be read.
346  * @value: value read from the @address register address of the MAC-PHY.
347  *
348  * Return: 0 on success otherwise failed.
349  */
oa_tc6_read_register(struct oa_tc6 * tc6,u32 address,u32 * value)350 int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value)
351 {
352 	return oa_tc6_read_registers(tc6, address, value, 1);
353 }
354 EXPORT_SYMBOL_GPL(oa_tc6_read_register);
355 
356 /**
357  * oa_tc6_write_registers - function for writing multiple consecutive registers.
358  * @tc6: oa_tc6 struct.
359  * @address: address of the first register to be written in the MAC-PHY.
360  * @value: values to be written from the starting register address @address.
361  * @length: number of consecutive registers to be written from @address.
362  *
363  * Maximum of 128 consecutive registers can be written starting at @address.
364  *
365  * Return: 0 on success otherwise failed.
366  */
oa_tc6_write_registers(struct oa_tc6 * tc6,u32 address,u32 value[],u8 length)367 int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
368 			   u8 length)
369 {
370 	int ret;
371 
372 	if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
373 		dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
374 		return -EINVAL;
375 	}
376 
377 	mutex_lock(&tc6->spi_ctrl_lock);
378 	ret = oa_tc6_perform_ctrl(tc6, address, value, length,
379 				  OA_TC6_CTRL_REG_WRITE);
380 	mutex_unlock(&tc6->spi_ctrl_lock);
381 
382 	return ret;
383 }
384 EXPORT_SYMBOL_GPL(oa_tc6_write_registers);
385 
386 /**
387  * oa_tc6_write_register - function for writing a MAC-PHY register.
388  * @tc6: oa_tc6 struct.
389  * @address: register address of the MAC-PHY to be written.
390  * @value: value to be written in the @address register address of the MAC-PHY.
391  *
392  * Return: 0 on success otherwise failed.
393  */
oa_tc6_write_register(struct oa_tc6 * tc6,u32 address,u32 value)394 int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value)
395 {
396 	return oa_tc6_write_registers(tc6, address, &value, 1);
397 }
398 EXPORT_SYMBOL_GPL(oa_tc6_write_register);
399 
oa_tc6_check_phy_reg_direct_access_capability(struct oa_tc6 * tc6)400 static int oa_tc6_check_phy_reg_direct_access_capability(struct oa_tc6 *tc6)
401 {
402 	u32 regval;
403 	int ret;
404 
405 	ret = oa_tc6_read_register(tc6, OA_TC6_REG_STDCAP, &regval);
406 	if (ret)
407 		return ret;
408 
409 	if (!(regval & STDCAP_DIRECT_PHY_REG_ACCESS))
410 		return -ENODEV;
411 
412 	return 0;
413 }
414 
oa_tc6_handle_link_change(struct net_device * netdev)415 static void oa_tc6_handle_link_change(struct net_device *netdev)
416 {
417 	phy_print_status(netdev->phydev);
418 }
419 
oa_tc6_mdiobus_read(struct mii_bus * bus,int addr,int regnum)420 static int oa_tc6_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
421 {
422 	struct oa_tc6 *tc6 = bus->priv;
423 	u32 regval;
424 	bool ret;
425 
426 	ret = oa_tc6_read_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
427 				   (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
428 				   &regval);
429 	if (ret)
430 		return ret;
431 
432 	return regval;
433 }
434 
oa_tc6_mdiobus_write(struct mii_bus * bus,int addr,int regnum,u16 val)435 static int oa_tc6_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
436 				u16 val)
437 {
438 	struct oa_tc6 *tc6 = bus->priv;
439 
440 	return oa_tc6_write_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
441 				     (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
442 				     val);
443 }
444 
oa_tc6_get_phy_c45_mms(int devnum)445 static int oa_tc6_get_phy_c45_mms(int devnum)
446 {
447 	switch (devnum) {
448 	case MDIO_MMD_PCS:
449 		return OA_TC6_PHY_C45_PCS_MMS2;
450 	case MDIO_MMD_PMAPMD:
451 		return OA_TC6_PHY_C45_PMA_PMD_MMS3;
452 	case MDIO_MMD_VEND2:
453 		return OA_TC6_PHY_C45_VS_PLCA_MMS4;
454 	case MDIO_MMD_AN:
455 		return OA_TC6_PHY_C45_AUTO_NEG_MMS5;
456 	case MDIO_MMD_POWER_UNIT:
457 		return OA_TC6_PHY_C45_POWER_UNIT_MMS6;
458 	default:
459 		return -EOPNOTSUPP;
460 	}
461 }
462 
oa_tc6_mdiobus_read_c45(struct mii_bus * bus,int addr,int devnum,int regnum)463 static int oa_tc6_mdiobus_read_c45(struct mii_bus *bus, int addr, int devnum,
464 				   int regnum)
465 {
466 	struct oa_tc6 *tc6 = bus->priv;
467 	u32 regval;
468 	int ret;
469 
470 	ret = oa_tc6_get_phy_c45_mms(devnum);
471 	if (ret < 0)
472 		return ret;
473 
474 	ret = oa_tc6_read_register(tc6, (ret << 16) | regnum, &regval);
475 	if (ret)
476 		return ret;
477 
478 	return regval;
479 }
480 
oa_tc6_mdiobus_write_c45(struct mii_bus * bus,int addr,int devnum,int regnum,u16 val)481 static int oa_tc6_mdiobus_write_c45(struct mii_bus *bus, int addr, int devnum,
482 				    int regnum, u16 val)
483 {
484 	struct oa_tc6 *tc6 = bus->priv;
485 	int ret;
486 
487 	ret = oa_tc6_get_phy_c45_mms(devnum);
488 	if (ret < 0)
489 		return ret;
490 
491 	return oa_tc6_write_register(tc6, (ret << 16) | regnum, val);
492 }
493 
oa_tc6_mdiobus_register(struct oa_tc6 * tc6)494 static int oa_tc6_mdiobus_register(struct oa_tc6 *tc6)
495 {
496 	int ret;
497 
498 	tc6->mdiobus = mdiobus_alloc();
499 	if (!tc6->mdiobus) {
500 		netdev_err(tc6->netdev, "MDIO bus alloc failed\n");
501 		return -ENOMEM;
502 	}
503 
504 	tc6->mdiobus->priv = tc6;
505 	tc6->mdiobus->read = oa_tc6_mdiobus_read;
506 	tc6->mdiobus->write = oa_tc6_mdiobus_write;
507 	/* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and
508 	 * C45 registers space. If the PHY is discovered via C22 bus protocol it
509 	 * assumes it uses C22 protocol and always uses C22 registers indirect
510 	 * access to access C45 registers. This is because, we don't have a
511 	 * clean separation between C22/C45 register space and C22/C45 MDIO bus
512 	 * protocols. Resulting, PHY C45 registers direct access can't be used
513 	 * which can save multiple SPI bus access. To support this feature, PHY
514 	 * drivers can set .read_mmd/.write_mmd in the PHY driver to call
515 	 * .read_c45/.write_c45. Ex: drivers/net/phy/microchip_t1s.c
516 	 */
517 	tc6->mdiobus->read_c45 = oa_tc6_mdiobus_read_c45;
518 	tc6->mdiobus->write_c45 = oa_tc6_mdiobus_write_c45;
519 	tc6->mdiobus->name = "oa-tc6-mdiobus";
520 	tc6->mdiobus->parent = tc6->dev;
521 
522 	snprintf(tc6->mdiobus->id, ARRAY_SIZE(tc6->mdiobus->id), "%s",
523 		 dev_name(&tc6->spi->dev));
524 
525 	ret = mdiobus_register(tc6->mdiobus);
526 	if (ret) {
527 		netdev_err(tc6->netdev, "Could not register MDIO bus\n");
528 		mdiobus_free(tc6->mdiobus);
529 		return ret;
530 	}
531 
532 	return 0;
533 }
534 
oa_tc6_mdiobus_unregister(struct oa_tc6 * tc6)535 static void oa_tc6_mdiobus_unregister(struct oa_tc6 *tc6)
536 {
537 	mdiobus_unregister(tc6->mdiobus);
538 	mdiobus_free(tc6->mdiobus);
539 }
540 
oa_tc6_phy_init(struct oa_tc6 * tc6)541 static int oa_tc6_phy_init(struct oa_tc6 *tc6)
542 {
543 	int ret;
544 
545 	ret = oa_tc6_check_phy_reg_direct_access_capability(tc6);
546 	if (ret) {
547 		netdev_err(tc6->netdev,
548 			   "Direct PHY register access is not supported by the MAC-PHY\n");
549 		return ret;
550 	}
551 
552 	ret = oa_tc6_mdiobus_register(tc6);
553 	if (ret)
554 		return ret;
555 
556 	tc6->phydev = phy_find_first(tc6->mdiobus);
557 	if (!tc6->phydev) {
558 		netdev_err(tc6->netdev, "No PHY found\n");
559 		oa_tc6_mdiobus_unregister(tc6);
560 		return -ENODEV;
561 	}
562 
563 	tc6->phydev->is_internal = true;
564 	ret = phy_connect_direct(tc6->netdev, tc6->phydev,
565 				 &oa_tc6_handle_link_change,
566 				 PHY_INTERFACE_MODE_INTERNAL);
567 	if (ret) {
568 		netdev_err(tc6->netdev, "Can't attach PHY to %s\n",
569 			   tc6->mdiobus->id);
570 		oa_tc6_mdiobus_unregister(tc6);
571 		return ret;
572 	}
573 
574 	phy_attached_info(tc6->netdev->phydev);
575 
576 	return 0;
577 }
578 
oa_tc6_phy_exit(struct oa_tc6 * tc6)579 static void oa_tc6_phy_exit(struct oa_tc6 *tc6)
580 {
581 	phy_disconnect(tc6->phydev);
582 	oa_tc6_mdiobus_unregister(tc6);
583 }
584 
oa_tc6_read_status0(struct oa_tc6 * tc6)585 static int oa_tc6_read_status0(struct oa_tc6 *tc6)
586 {
587 	u32 regval;
588 	int ret;
589 
590 	ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &regval);
591 	if (ret) {
592 		dev_err(&tc6->spi->dev, "STATUS0 register read failed: %d\n",
593 			ret);
594 		return 0;
595 	}
596 
597 	return regval;
598 }
599 
oa_tc6_sw_reset_macphy(struct oa_tc6 * tc6)600 static int oa_tc6_sw_reset_macphy(struct oa_tc6 *tc6)
601 {
602 	u32 regval = RESET_SWRESET;
603 	int ret;
604 
605 	ret = oa_tc6_write_register(tc6, OA_TC6_REG_RESET, regval);
606 	if (ret)
607 		return ret;
608 
609 	/* Poll for soft reset complete for every 1ms until 1s timeout */
610 	ret = readx_poll_timeout(oa_tc6_read_status0, tc6, regval,
611 				 regval & STATUS0_RESETC,
612 				 STATUS0_RESETC_POLL_DELAY,
613 				 STATUS0_RESETC_POLL_TIMEOUT);
614 	if (ret)
615 		return -ENODEV;
616 
617 	/* Clear the reset complete status */
618 	return oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, regval);
619 }
620 
oa_tc6_unmask_macphy_error_interrupts(struct oa_tc6 * tc6)621 static int oa_tc6_unmask_macphy_error_interrupts(struct oa_tc6 *tc6)
622 {
623 	u32 regval;
624 	int ret;
625 
626 	ret = oa_tc6_read_register(tc6, OA_TC6_REG_INT_MASK0, &regval);
627 	if (ret)
628 		return ret;
629 
630 	regval &= ~(INT_MASK0_TX_PROTOCOL_ERR_MASK |
631 		    INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK |
632 		    INT_MASK0_LOSS_OF_FRAME_ERR_MASK |
633 		    INT_MASK0_HEADER_ERR_MASK);
634 
635 	return oa_tc6_write_register(tc6, OA_TC6_REG_INT_MASK0, regval);
636 }
637 
oa_tc6_enable_data_transfer(struct oa_tc6 * tc6)638 static int oa_tc6_enable_data_transfer(struct oa_tc6 *tc6)
639 {
640 	u32 value;
641 	int ret;
642 
643 	ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &value);
644 	if (ret)
645 		return ret;
646 
647 	/* Enable configuration synchronization for data transfer */
648 	value |= CONFIG0_SYNC;
649 
650 	return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, value);
651 }
652 
oa_tc6_cleanup_ongoing_rx_skb(struct oa_tc6 * tc6)653 static void oa_tc6_cleanup_ongoing_rx_skb(struct oa_tc6 *tc6)
654 {
655 	if (tc6->rx_skb) {
656 		tc6->netdev->stats.rx_dropped++;
657 		kfree_skb(tc6->rx_skb);
658 		tc6->rx_skb = NULL;
659 	}
660 }
661 
oa_tc6_cleanup_ongoing_tx_skb(struct oa_tc6 * tc6)662 static void oa_tc6_cleanup_ongoing_tx_skb(struct oa_tc6 *tc6)
663 {
664 	if (tc6->ongoing_tx_skb) {
665 		tc6->netdev->stats.tx_dropped++;
666 		kfree_skb(tc6->ongoing_tx_skb);
667 		tc6->ongoing_tx_skb = NULL;
668 	}
669 }
670 
oa_tc6_process_extended_status(struct oa_tc6 * tc6)671 static int oa_tc6_process_extended_status(struct oa_tc6 *tc6)
672 {
673 	u32 value;
674 	int ret;
675 
676 	ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &value);
677 	if (ret) {
678 		netdev_err(tc6->netdev, "STATUS0 register read failed: %d\n",
679 			   ret);
680 		return ret;
681 	}
682 
683 	/* Clear the error interrupts status */
684 	ret = oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, value);
685 	if (ret) {
686 		netdev_err(tc6->netdev, "STATUS0 register write failed: %d\n",
687 			   ret);
688 		return ret;
689 	}
690 
691 	if (FIELD_GET(STATUS0_RX_BUFFER_OVERFLOW_ERROR, value)) {
692 		tc6->rx_buf_overflow = true;
693 		oa_tc6_cleanup_ongoing_rx_skb(tc6);
694 		net_err_ratelimited("%s: Receive buffer overflow error\n",
695 				    tc6->netdev->name);
696 		return -EAGAIN;
697 	}
698 	if (FIELD_GET(STATUS0_TX_PROTOCOL_ERROR, value)) {
699 		netdev_err(tc6->netdev, "Transmit protocol error\n");
700 		return -ENODEV;
701 	}
702 	/* TODO: Currently loss of frame and header errors are treated as
703 	 * non-recoverable errors. They will be handled in the next version.
704 	 */
705 	if (FIELD_GET(STATUS0_LOSS_OF_FRAME_ERROR, value)) {
706 		netdev_err(tc6->netdev, "Loss of frame error\n");
707 		return -ENODEV;
708 	}
709 	if (FIELD_GET(STATUS0_HEADER_ERROR, value)) {
710 		netdev_err(tc6->netdev, "Header error\n");
711 		return -ENODEV;
712 	}
713 
714 	return 0;
715 }
716 
oa_tc6_process_rx_chunk_footer(struct oa_tc6 * tc6,u32 footer)717 static int oa_tc6_process_rx_chunk_footer(struct oa_tc6 *tc6, u32 footer)
718 {
719 	/* Process rx chunk footer for the following,
720 	 * 1. tx credits
721 	 * 2. errors if any from MAC-PHY
722 	 * 3. receive chunks available
723 	 */
724 	tc6->tx_credits = FIELD_GET(OA_TC6_DATA_FOOTER_TX_CREDITS, footer);
725 	tc6->rx_chunks_available = FIELD_GET(OA_TC6_DATA_FOOTER_RX_CHUNKS,
726 					     footer);
727 
728 	if (FIELD_GET(OA_TC6_DATA_FOOTER_EXTENDED_STS, footer)) {
729 		int ret = oa_tc6_process_extended_status(tc6);
730 
731 		if (ret)
732 			return ret;
733 	}
734 
735 	/* TODO: Currently received header bad and configuration unsync errors
736 	 * are treated as non-recoverable errors. They will be handled in the
737 	 * next version.
738 	 */
739 	if (FIELD_GET(OA_TC6_DATA_FOOTER_RXD_HEADER_BAD, footer)) {
740 		netdev_err(tc6->netdev, "Rxd header bad error\n");
741 		return -ENODEV;
742 	}
743 
744 	if (!FIELD_GET(OA_TC6_DATA_FOOTER_CONFIG_SYNC, footer)) {
745 		netdev_err(tc6->netdev, "Config unsync error\n");
746 		return -ENODEV;
747 	}
748 
749 	return 0;
750 }
751 
oa_tc6_submit_rx_skb(struct oa_tc6 * tc6)752 static void oa_tc6_submit_rx_skb(struct oa_tc6 *tc6)
753 {
754 	tc6->rx_skb->protocol = eth_type_trans(tc6->rx_skb, tc6->netdev);
755 	tc6->netdev->stats.rx_packets++;
756 	tc6->netdev->stats.rx_bytes += tc6->rx_skb->len;
757 
758 	netif_rx(tc6->rx_skb);
759 
760 	tc6->rx_skb = NULL;
761 }
762 
oa_tc6_update_rx_skb(struct oa_tc6 * tc6,u8 * payload,u8 length)763 static void oa_tc6_update_rx_skb(struct oa_tc6 *tc6, u8 *payload, u8 length)
764 {
765 	memcpy(skb_put(tc6->rx_skb, length), payload, length);
766 }
767 
oa_tc6_allocate_rx_skb(struct oa_tc6 * tc6)768 static int oa_tc6_allocate_rx_skb(struct oa_tc6 *tc6)
769 {
770 	tc6->rx_skb = netdev_alloc_skb_ip_align(tc6->netdev, tc6->netdev->mtu +
771 						ETH_HLEN + ETH_FCS_LEN);
772 	if (!tc6->rx_skb) {
773 		tc6->netdev->stats.rx_dropped++;
774 		return -ENOMEM;
775 	}
776 
777 	return 0;
778 }
779 
oa_tc6_prcs_complete_rx_frame(struct oa_tc6 * tc6,u8 * payload,u16 size)780 static int oa_tc6_prcs_complete_rx_frame(struct oa_tc6 *tc6, u8 *payload,
781 					 u16 size)
782 {
783 	int ret;
784 
785 	ret = oa_tc6_allocate_rx_skb(tc6);
786 	if (ret)
787 		return ret;
788 
789 	oa_tc6_update_rx_skb(tc6, payload, size);
790 
791 	oa_tc6_submit_rx_skb(tc6);
792 
793 	return 0;
794 }
795 
oa_tc6_prcs_rx_frame_start(struct oa_tc6 * tc6,u8 * payload,u16 size)796 static int oa_tc6_prcs_rx_frame_start(struct oa_tc6 *tc6, u8 *payload, u16 size)
797 {
798 	int ret;
799 
800 	ret = oa_tc6_allocate_rx_skb(tc6);
801 	if (ret)
802 		return ret;
803 
804 	oa_tc6_update_rx_skb(tc6, payload, size);
805 
806 	return 0;
807 }
808 
oa_tc6_prcs_rx_frame_end(struct oa_tc6 * tc6,u8 * payload,u16 size)809 static void oa_tc6_prcs_rx_frame_end(struct oa_tc6 *tc6, u8 *payload, u16 size)
810 {
811 	oa_tc6_update_rx_skb(tc6, payload, size);
812 
813 	oa_tc6_submit_rx_skb(tc6);
814 }
815 
oa_tc6_prcs_ongoing_rx_frame(struct oa_tc6 * tc6,u8 * payload,u32 footer)816 static void oa_tc6_prcs_ongoing_rx_frame(struct oa_tc6 *tc6, u8 *payload,
817 					 u32 footer)
818 {
819 	oa_tc6_update_rx_skb(tc6, payload, OA_TC6_CHUNK_PAYLOAD_SIZE);
820 }
821 
oa_tc6_prcs_rx_chunk_payload(struct oa_tc6 * tc6,u8 * data,u32 footer)822 static int oa_tc6_prcs_rx_chunk_payload(struct oa_tc6 *tc6, u8 *data,
823 					u32 footer)
824 {
825 	u8 start_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_START_WORD_OFFSET,
826 					 footer) * sizeof(u32);
827 	u8 end_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_END_BYTE_OFFSET,
828 				       footer);
829 	bool start_valid = FIELD_GET(OA_TC6_DATA_FOOTER_START_VALID, footer);
830 	bool end_valid = FIELD_GET(OA_TC6_DATA_FOOTER_END_VALID, footer);
831 	u16 size;
832 
833 	/* Restart the new rx frame after receiving rx buffer overflow error */
834 	if (start_valid && tc6->rx_buf_overflow)
835 		tc6->rx_buf_overflow = false;
836 
837 	if (tc6->rx_buf_overflow)
838 		return 0;
839 
840 	/* Process the chunk with complete rx frame */
841 	if (start_valid && end_valid && start_byte_offset < end_byte_offset) {
842 		size = end_byte_offset + 1 - start_byte_offset;
843 		return oa_tc6_prcs_complete_rx_frame(tc6,
844 						     &data[start_byte_offset],
845 						     size);
846 	}
847 
848 	/* Process the chunk with only rx frame start */
849 	if (start_valid && !end_valid) {
850 		size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
851 		return oa_tc6_prcs_rx_frame_start(tc6,
852 						  &data[start_byte_offset],
853 						  size);
854 	}
855 
856 	/* Process the chunk with only rx frame end */
857 	if (end_valid && !start_valid) {
858 		size = end_byte_offset + 1;
859 		oa_tc6_prcs_rx_frame_end(tc6, data, size);
860 		return 0;
861 	}
862 
863 	/* Process the chunk with previous rx frame end and next rx frame
864 	 * start.
865 	 */
866 	if (start_valid && end_valid && start_byte_offset > end_byte_offset) {
867 		/* After rx buffer overflow error received, there might be a
868 		 * possibility of getting an end valid of a previously
869 		 * incomplete rx frame along with the new rx frame start valid.
870 		 */
871 		if (tc6->rx_skb) {
872 			size = end_byte_offset + 1;
873 			oa_tc6_prcs_rx_frame_end(tc6, data, size);
874 		}
875 		size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
876 		return oa_tc6_prcs_rx_frame_start(tc6,
877 						  &data[start_byte_offset],
878 						  size);
879 	}
880 
881 	/* Process the chunk with ongoing rx frame data */
882 	oa_tc6_prcs_ongoing_rx_frame(tc6, data, footer);
883 
884 	return 0;
885 }
886 
oa_tc6_get_rx_chunk_footer(struct oa_tc6 * tc6,u16 footer_offset)887 static u32 oa_tc6_get_rx_chunk_footer(struct oa_tc6 *tc6, u16 footer_offset)
888 {
889 	u8 *rx_buf = tc6->spi_data_rx_buf;
890 	__be32 footer;
891 
892 	footer = *((__be32 *)&rx_buf[footer_offset]);
893 
894 	return be32_to_cpu(footer);
895 }
896 
oa_tc6_process_spi_data_rx_buf(struct oa_tc6 * tc6,u16 length)897 static int oa_tc6_process_spi_data_rx_buf(struct oa_tc6 *tc6, u16 length)
898 {
899 	u16 no_of_rx_chunks = length / OA_TC6_CHUNK_SIZE;
900 	u32 footer;
901 	int ret;
902 
903 	/* All the rx chunks in the receive SPI data buffer are examined here */
904 	for (int i = 0; i < no_of_rx_chunks; i++) {
905 		/* Last 4 bytes in each received chunk consist footer info */
906 		footer = oa_tc6_get_rx_chunk_footer(tc6, i * OA_TC6_CHUNK_SIZE +
907 						    OA_TC6_CHUNK_PAYLOAD_SIZE);
908 
909 		ret = oa_tc6_process_rx_chunk_footer(tc6, footer);
910 		if (ret)
911 			return ret;
912 
913 		/* If there is a data valid chunks then process it for the
914 		 * information needed to determine the validity and the location
915 		 * of the receive frame data.
916 		 */
917 		if (FIELD_GET(OA_TC6_DATA_FOOTER_DATA_VALID, footer)) {
918 			u8 *payload = tc6->spi_data_rx_buf + i *
919 				      OA_TC6_CHUNK_SIZE;
920 
921 			ret = oa_tc6_prcs_rx_chunk_payload(tc6, payload,
922 							   footer);
923 			if (ret)
924 				return ret;
925 		}
926 	}
927 
928 	return 0;
929 }
930 
oa_tc6_prepare_data_header(bool data_valid,bool start_valid,bool end_valid,u8 end_byte_offset)931 static __be32 oa_tc6_prepare_data_header(bool data_valid, bool start_valid,
932 					 bool end_valid, u8 end_byte_offset)
933 {
934 	u32 header = FIELD_PREP(OA_TC6_DATA_HEADER_DATA_NOT_CTRL,
935 				OA_TC6_DATA_HEADER) |
936 		     FIELD_PREP(OA_TC6_DATA_HEADER_DATA_VALID, data_valid) |
937 		     FIELD_PREP(OA_TC6_DATA_HEADER_START_VALID, start_valid) |
938 		     FIELD_PREP(OA_TC6_DATA_HEADER_END_VALID, end_valid) |
939 		     FIELD_PREP(OA_TC6_DATA_HEADER_END_BYTE_OFFSET,
940 				end_byte_offset);
941 
942 	header |= FIELD_PREP(OA_TC6_DATA_HEADER_PARITY,
943 			     oa_tc6_get_parity(header));
944 
945 	return cpu_to_be32(header);
946 }
947 
oa_tc6_add_tx_skb_to_spi_buf(struct oa_tc6 * tc6)948 static void oa_tc6_add_tx_skb_to_spi_buf(struct oa_tc6 *tc6)
949 {
950 	enum oa_tc6_data_end_valid_info end_valid = OA_TC6_DATA_END_INVALID;
951 	__be32 *tx_buf = tc6->spi_data_tx_buf + tc6->spi_data_tx_buf_offset;
952 	u16 remaining_len = tc6->ongoing_tx_skb->len - tc6->tx_skb_offset;
953 	u8 *tx_skb_data = tc6->ongoing_tx_skb->data + tc6->tx_skb_offset;
954 	enum oa_tc6_data_start_valid_info start_valid;
955 	u8 end_byte_offset = 0;
956 	u16 length_to_copy;
957 
958 	/* Initial value is assigned here to avoid more than 80 characters in
959 	 * the declaration place.
960 	 */
961 	start_valid = OA_TC6_DATA_START_INVALID;
962 
963 	/* Set start valid if the current tx chunk contains the start of the tx
964 	 * ethernet frame.
965 	 */
966 	if (!tc6->tx_skb_offset)
967 		start_valid = OA_TC6_DATA_START_VALID;
968 
969 	/* If the remaining tx skb length is more than the chunk payload size of
970 	 * 64 bytes then copy only 64 bytes and leave the ongoing tx skb for
971 	 * next tx chunk.
972 	 */
973 	length_to_copy = min_t(u16, remaining_len, OA_TC6_CHUNK_PAYLOAD_SIZE);
974 
975 	/* Copy the tx skb data to the tx chunk payload buffer */
976 	memcpy(tx_buf + 1, tx_skb_data, length_to_copy);
977 	tc6->tx_skb_offset += length_to_copy;
978 
979 	/* Set end valid if the current tx chunk contains the end of the tx
980 	 * ethernet frame.
981 	 */
982 	if (tc6->ongoing_tx_skb->len == tc6->tx_skb_offset) {
983 		end_valid = OA_TC6_DATA_END_VALID;
984 		end_byte_offset = length_to_copy - 1;
985 		tc6->tx_skb_offset = 0;
986 		tc6->netdev->stats.tx_bytes += tc6->ongoing_tx_skb->len;
987 		tc6->netdev->stats.tx_packets++;
988 		kfree_skb(tc6->ongoing_tx_skb);
989 		tc6->ongoing_tx_skb = NULL;
990 	}
991 
992 	*tx_buf = oa_tc6_prepare_data_header(OA_TC6_DATA_VALID, start_valid,
993 					     end_valid, end_byte_offset);
994 	tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
995 }
996 
oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 * tc6)997 static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
998 {
999 	u16 used_tx_credits;
1000 
1001 	/* Get tx skbs and convert them into tx chunks based on the tx credits
1002 	 * available.
1003 	 */
1004 	for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
1005 	     used_tx_credits++) {
1006 		if (!tc6->ongoing_tx_skb) {
1007 			tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
1008 			tc6->waiting_tx_skb = NULL;
1009 		}
1010 		if (!tc6->ongoing_tx_skb)
1011 			break;
1012 		oa_tc6_add_tx_skb_to_spi_buf(tc6);
1013 	}
1014 
1015 	return used_tx_credits * OA_TC6_CHUNK_SIZE;
1016 }
1017 
oa_tc6_add_empty_chunks_to_spi_buf(struct oa_tc6 * tc6,u16 needed_empty_chunks)1018 static void oa_tc6_add_empty_chunks_to_spi_buf(struct oa_tc6 *tc6,
1019 					       u16 needed_empty_chunks)
1020 {
1021 	__be32 header;
1022 
1023 	header = oa_tc6_prepare_data_header(OA_TC6_DATA_INVALID,
1024 					    OA_TC6_DATA_START_INVALID,
1025 					    OA_TC6_DATA_END_INVALID, 0);
1026 
1027 	while (needed_empty_chunks--) {
1028 		__be32 *tx_buf = tc6->spi_data_tx_buf +
1029 				 tc6->spi_data_tx_buf_offset;
1030 
1031 		*tx_buf = header;
1032 		tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
1033 	}
1034 }
1035 
oa_tc6_prepare_spi_tx_buf_for_rx_chunks(struct oa_tc6 * tc6,u16 len)1036 static u16 oa_tc6_prepare_spi_tx_buf_for_rx_chunks(struct oa_tc6 *tc6, u16 len)
1037 {
1038 	u16 tx_chunks = len / OA_TC6_CHUNK_SIZE;
1039 	u16 needed_empty_chunks;
1040 
1041 	/* If there are more chunks to receive than to transmit, we need to add
1042 	 * enough empty tx chunks to allow the reception of the excess rx
1043 	 * chunks.
1044 	 */
1045 	if (tx_chunks >= tc6->rx_chunks_available)
1046 		return len;
1047 
1048 	needed_empty_chunks = tc6->rx_chunks_available - tx_chunks;
1049 
1050 	oa_tc6_add_empty_chunks_to_spi_buf(tc6, needed_empty_chunks);
1051 
1052 	return needed_empty_chunks * OA_TC6_CHUNK_SIZE + len;
1053 }
1054 
oa_tc6_try_spi_transfer(struct oa_tc6 * tc6)1055 static int oa_tc6_try_spi_transfer(struct oa_tc6 *tc6)
1056 {
1057 	int ret;
1058 
1059 	while (true) {
1060 		u16 spi_len = 0;
1061 
1062 		tc6->spi_data_tx_buf_offset = 0;
1063 
1064 		if (tc6->ongoing_tx_skb || tc6->waiting_tx_skb)
1065 			spi_len = oa_tc6_prepare_spi_tx_buf_for_tx_skbs(tc6);
1066 
1067 		spi_len = oa_tc6_prepare_spi_tx_buf_for_rx_chunks(tc6, spi_len);
1068 
1069 		if (tc6->int_flag) {
1070 			tc6->int_flag = false;
1071 			if (spi_len == 0) {
1072 				oa_tc6_add_empty_chunks_to_spi_buf(tc6, 1);
1073 				spi_len = OA_TC6_CHUNK_SIZE;
1074 			}
1075 		}
1076 
1077 		if (spi_len == 0)
1078 			break;
1079 
1080 		ret = oa_tc6_spi_transfer(tc6, OA_TC6_DATA_HEADER, spi_len);
1081 		if (ret) {
1082 			netdev_err(tc6->netdev, "SPI data transfer failed: %d\n",
1083 				   ret);
1084 			return ret;
1085 		}
1086 
1087 		ret = oa_tc6_process_spi_data_rx_buf(tc6, spi_len);
1088 		if (ret) {
1089 			if (ret == -EAGAIN)
1090 				continue;
1091 
1092 			oa_tc6_cleanup_ongoing_tx_skb(tc6);
1093 			oa_tc6_cleanup_ongoing_rx_skb(tc6);
1094 			netdev_err(tc6->netdev, "Device error: %d\n", ret);
1095 			return ret;
1096 		}
1097 
1098 		if (!tc6->waiting_tx_skb && netif_queue_stopped(tc6->netdev))
1099 			netif_wake_queue(tc6->netdev);
1100 	}
1101 
1102 	return 0;
1103 }
1104 
oa_tc6_spi_thread_handler(void * data)1105 static int oa_tc6_spi_thread_handler(void *data)
1106 {
1107 	struct oa_tc6 *tc6 = data;
1108 	int ret;
1109 
1110 	while (likely(!kthread_should_stop())) {
1111 		/* This kthread will be waken up if there is a tx skb or mac-phy
1112 		 * interrupt to perform spi transfer with tx chunks.
1113 		 */
1114 		wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb ||
1115 					 tc6->int_flag ||
1116 					 kthread_should_stop());
1117 
1118 		if (kthread_should_stop())
1119 			break;
1120 
1121 		ret = oa_tc6_try_spi_transfer(tc6);
1122 		if (ret)
1123 			return ret;
1124 	}
1125 
1126 	return 0;
1127 }
1128 
oa_tc6_update_buffer_status_from_register(struct oa_tc6 * tc6)1129 static int oa_tc6_update_buffer_status_from_register(struct oa_tc6 *tc6)
1130 {
1131 	u32 value;
1132 	int ret;
1133 
1134 	/* Initially tx credits and rx chunks available to be updated from the
1135 	 * register as there is no data transfer performed yet. Later they will
1136 	 * be updated from the rx footer.
1137 	 */
1138 	ret = oa_tc6_read_register(tc6, OA_TC6_REG_BUFFER_STATUS, &value);
1139 	if (ret)
1140 		return ret;
1141 
1142 	tc6->tx_credits = FIELD_GET(BUFFER_STATUS_TX_CREDITS_AVAILABLE, value);
1143 	tc6->rx_chunks_available = FIELD_GET(BUFFER_STATUS_RX_CHUNKS_AVAILABLE,
1144 					     value);
1145 
1146 	return 0;
1147 }
1148 
oa_tc6_macphy_isr(int irq,void * data)1149 static irqreturn_t oa_tc6_macphy_isr(int irq, void *data)
1150 {
1151 	struct oa_tc6 *tc6 = data;
1152 
1153 	/* MAC-PHY interrupt can occur for the following reasons.
1154 	 * - availability of tx credits if it was 0 before and not reported in
1155 	 *   the previous rx footer.
1156 	 * - availability of rx chunks if it was 0 before and not reported in
1157 	 *   the previous rx footer.
1158 	 * - extended status event not reported in the previous rx footer.
1159 	 */
1160 	tc6->int_flag = true;
1161 	/* Wake spi kthread to perform spi transfer */
1162 	wake_up_interruptible(&tc6->spi_wq);
1163 
1164 	return IRQ_HANDLED;
1165 }
1166 
1167 /**
1168  * oa_tc6_zero_align_receive_frame_enable - function to enable zero align
1169  * receive frame feature.
1170  * @tc6: oa_tc6 struct.
1171  *
1172  * Return: 0 on success otherwise failed.
1173  */
oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 * tc6)1174 int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6)
1175 {
1176 	u32 regval;
1177 	int ret;
1178 
1179 	ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &regval);
1180 	if (ret)
1181 		return ret;
1182 
1183 	/* Set Zero-Align Receive Frame Enable */
1184 	regval |= CONFIG0_ZARFE_ENABLE;
1185 
1186 	return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, regval);
1187 }
1188 EXPORT_SYMBOL_GPL(oa_tc6_zero_align_receive_frame_enable);
1189 
1190 /**
1191  * oa_tc6_start_xmit - function for sending the tx skb which consists ethernet
1192  * frame.
1193  * @tc6: oa_tc6 struct.
1194  * @skb: socket buffer in which the ethernet frame is stored.
1195  *
1196  * Return: NETDEV_TX_OK if the transmit ethernet frame skb added in the tx_skb_q
1197  * otherwise returns NETDEV_TX_BUSY.
1198  */
oa_tc6_start_xmit(struct oa_tc6 * tc6,struct sk_buff * skb)1199 netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
1200 {
1201 	if (tc6->waiting_tx_skb) {
1202 		netif_stop_queue(tc6->netdev);
1203 		return NETDEV_TX_BUSY;
1204 	}
1205 
1206 	if (skb_linearize(skb)) {
1207 		dev_kfree_skb_any(skb);
1208 		tc6->netdev->stats.tx_dropped++;
1209 		return NETDEV_TX_OK;
1210 	}
1211 
1212 	tc6->waiting_tx_skb = skb;
1213 
1214 	/* Wake spi kthread to perform spi transfer */
1215 	wake_up_interruptible(&tc6->spi_wq);
1216 
1217 	return NETDEV_TX_OK;
1218 }
1219 EXPORT_SYMBOL_GPL(oa_tc6_start_xmit);
1220 
1221 /**
1222  * oa_tc6_init - allocates and initializes oa_tc6 structure.
1223  * @spi: device with which data will be exchanged.
1224  * @netdev: network device interface structure.
1225  *
1226  * Return: pointer reference to the oa_tc6 structure if the MAC-PHY
1227  * initialization is successful otherwise NULL.
1228  */
oa_tc6_init(struct spi_device * spi,struct net_device * netdev)1229 struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
1230 {
1231 	struct oa_tc6 *tc6;
1232 	int ret;
1233 
1234 	tc6 = devm_kzalloc(&spi->dev, sizeof(*tc6), GFP_KERNEL);
1235 	if (!tc6)
1236 		return NULL;
1237 
1238 	tc6->spi = spi;
1239 	tc6->netdev = netdev;
1240 	SET_NETDEV_DEV(netdev, &spi->dev);
1241 	mutex_init(&tc6->spi_ctrl_lock);
1242 
1243 	/* Set the SPI controller to pump at realtime priority */
1244 	tc6->spi->rt = true;
1245 	spi_setup(tc6->spi);
1246 
1247 	tc6->spi_ctrl_tx_buf = devm_kzalloc(&tc6->spi->dev,
1248 					    OA_TC6_CTRL_SPI_BUF_SIZE,
1249 					    GFP_KERNEL);
1250 	if (!tc6->spi_ctrl_tx_buf)
1251 		return NULL;
1252 
1253 	tc6->spi_ctrl_rx_buf = devm_kzalloc(&tc6->spi->dev,
1254 					    OA_TC6_CTRL_SPI_BUF_SIZE,
1255 					    GFP_KERNEL);
1256 	if (!tc6->spi_ctrl_rx_buf)
1257 		return NULL;
1258 
1259 	tc6->spi_data_tx_buf = devm_kzalloc(&tc6->spi->dev,
1260 					    OA_TC6_SPI_DATA_BUF_SIZE,
1261 					    GFP_KERNEL);
1262 	if (!tc6->spi_data_tx_buf)
1263 		return NULL;
1264 
1265 	tc6->spi_data_rx_buf = devm_kzalloc(&tc6->spi->dev,
1266 					    OA_TC6_SPI_DATA_BUF_SIZE,
1267 					    GFP_KERNEL);
1268 	if (!tc6->spi_data_rx_buf)
1269 		return NULL;
1270 
1271 	ret = oa_tc6_sw_reset_macphy(tc6);
1272 	if (ret) {
1273 		dev_err(&tc6->spi->dev,
1274 			"MAC-PHY software reset failed: %d\n", ret);
1275 		return NULL;
1276 	}
1277 
1278 	ret = oa_tc6_unmask_macphy_error_interrupts(tc6);
1279 	if (ret) {
1280 		dev_err(&tc6->spi->dev,
1281 			"MAC-PHY error interrupts unmask failed: %d\n", ret);
1282 		return NULL;
1283 	}
1284 
1285 	ret = oa_tc6_phy_init(tc6);
1286 	if (ret) {
1287 		dev_err(&tc6->spi->dev,
1288 			"MAC internal PHY initialization failed: %d\n", ret);
1289 		return NULL;
1290 	}
1291 
1292 	ret = oa_tc6_enable_data_transfer(tc6);
1293 	if (ret) {
1294 		dev_err(&tc6->spi->dev, "Failed to enable data transfer: %d\n",
1295 			ret);
1296 		goto phy_exit;
1297 	}
1298 
1299 	ret = oa_tc6_update_buffer_status_from_register(tc6);
1300 	if (ret) {
1301 		dev_err(&tc6->spi->dev,
1302 			"Failed to update buffer status: %d\n", ret);
1303 		goto phy_exit;
1304 	}
1305 
1306 	init_waitqueue_head(&tc6->spi_wq);
1307 
1308 	tc6->spi_thread = kthread_run(oa_tc6_spi_thread_handler, tc6,
1309 				      "oa-tc6-spi-thread");
1310 	if (IS_ERR(tc6->spi_thread)) {
1311 		dev_err(&tc6->spi->dev, "Failed to create SPI thread\n");
1312 		goto phy_exit;
1313 	}
1314 
1315 	sched_set_fifo(tc6->spi_thread);
1316 
1317 	ret = devm_request_irq(&tc6->spi->dev, tc6->spi->irq, oa_tc6_macphy_isr,
1318 			       IRQF_TRIGGER_FALLING, dev_name(&tc6->spi->dev),
1319 			       tc6);
1320 	if (ret) {
1321 		dev_err(&tc6->spi->dev, "Failed to request macphy isr %d\n",
1322 			ret);
1323 		goto kthread_stop;
1324 	}
1325 
1326 	/* oa_tc6_sw_reset_macphy() function resets and clears the MAC-PHY reset
1327 	 * complete status. IRQ is also asserted on reset completion and it is
1328 	 * remain asserted until MAC-PHY receives a data chunk. So performing an
1329 	 * empty data chunk transmission will deassert the IRQ. Refer section
1330 	 * 7.7 and 9.2.8.8 in the OPEN Alliance specification for more details.
1331 	 */
1332 	tc6->int_flag = true;
1333 	wake_up_interruptible(&tc6->spi_wq);
1334 
1335 	return tc6;
1336 
1337 kthread_stop:
1338 	kthread_stop(tc6->spi_thread);
1339 phy_exit:
1340 	oa_tc6_phy_exit(tc6);
1341 	return NULL;
1342 }
1343 EXPORT_SYMBOL_GPL(oa_tc6_init);
1344 
1345 /**
1346  * oa_tc6_exit - exit function.
1347  * @tc6: oa_tc6 struct.
1348  */
oa_tc6_exit(struct oa_tc6 * tc6)1349 void oa_tc6_exit(struct oa_tc6 *tc6)
1350 {
1351 	oa_tc6_phy_exit(tc6);
1352 	kthread_stop(tc6->spi_thread);
1353 	dev_kfree_skb_any(tc6->ongoing_tx_skb);
1354 	dev_kfree_skb_any(tc6->waiting_tx_skb);
1355 	dev_kfree_skb_any(tc6->rx_skb);
1356 }
1357 EXPORT_SYMBOL_GPL(oa_tc6_exit);
1358 
1359 MODULE_DESCRIPTION("OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface Lib");
1360 MODULE_AUTHOR("Parthiban Veerasooran <parthiban.veerasooran@microchip.com>");
1361 MODULE_LICENSE("GPL");
1362