1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Access SD/MMC cards through SPI master controllers
4  *
5  * (C) Copyright 2005, Intec Automation,
6  *		Mike Lavender (mike@steroidmicros)
7  * (C) Copyright 2006-2007, David Brownell
8  * (C) Copyright 2007, Axis Communications,
9  *		Hans-Peter Nilsson (hp@axis.com)
10  * (C) Copyright 2007, ATRON electronic GmbH,
11  *		Jan Nikitenko <jan.nikitenko@gmail.com>
12  */
13 #include <linux/sched.h>
14 #include <linux/delay.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/crc7.h>
19 #include <linux/crc-itu-t.h>
20 #include <linux/scatterlist.h>
21 
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/mmc.h>		/* for R1_SPI_* bit values */
24 #include <linux/mmc/slot-gpio.h>
25 
26 #include <linux/spi/spi.h>
27 #include <linux/spi/mmc_spi.h>
28 
29 #include <linux/unaligned.h>
30 
31 
32 /* NOTES:
33  *
34  * - For now, we won't try to interoperate with a real mmc/sd/sdio
35  *   controller, although some of them do have hardware support for
36  *   SPI protocol.  The main reason for such configs would be mmc-ish
37  *   cards like DataFlash, which don't support that "native" protocol.
38  *
39  *   We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to
40  *   switch between driver stacks, and in any case if "native" mode
41  *   is available, it will be faster and hence preferable.
42  *
43  * - MMC depends on a different chipselect management policy than the
44  *   SPI interface currently supports for shared bus segments:  it needs
45  *   to issue multiple spi_message requests with the chipselect active,
46  *   using the results of one message to decide the next one to issue.
47  *
48  *   Pending updates to the programming interface, this driver expects
49  *   that it not share the bus with other drivers (precluding conflicts).
50  *
51  * - We tell the controller to keep the chipselect active from the
52  *   beginning of an mmc_host_ops.request until the end.  So beware
53  *   of SPI controller drivers that mis-handle the cs_change flag!
54  *
55  *   However, many cards seem OK with chipselect flapping up/down
56  *   during that time ... at least on unshared bus segments.
57  */
58 
59 
60 /*
61  * Local protocol constants, internal to data block protocols.
62  */
63 
64 /* Response tokens used to ack each block written: */
65 #define SPI_MMC_RESPONSE_CODE(x)	((x) & 0x1f)
66 #define SPI_RESPONSE_ACCEPTED		((2 << 1)|1)
67 #define SPI_RESPONSE_CRC_ERR		((5 << 1)|1)
68 #define SPI_RESPONSE_WRITE_ERR		((6 << 1)|1)
69 
70 /* Read and write blocks start with these tokens and end with crc;
71  * on error, read tokens act like a subset of R2_SPI_* values.
72  */
73 #define SPI_TOKEN_SINGLE	0xfe	/* single block r/w, multiblock read */
74 #define SPI_TOKEN_MULTI_WRITE	0xfc	/* multiblock write */
75 #define SPI_TOKEN_STOP_TRAN	0xfd	/* terminate multiblock write */
76 
77 #define MMC_SPI_BLOCKSIZE	512
78 
79 #define MMC_SPI_R1B_TIMEOUT_MS	3000
80 #define MMC_SPI_INIT_TIMEOUT_MS	3000
81 
82 /* One of the critical speed parameters is the amount of data which may
83  * be transferred in one command. If this value is too low, the SD card
84  * controller has to do multiple partial block writes (argggh!). With
85  * today (2008) SD cards there is little speed gain if we transfer more
86  * than 64 KBytes at a time. So use this value until there is any indication
87  * that we should do more here.
88  */
89 #define MMC_SPI_BLOCKSATONCE	128
90 
91 /****************************************************************************/
92 
93 /*
94  * Local Data Structures
95  */
96 
97 /* "scratch" is per-{command,block} data exchanged with the card */
98 struct scratch {
99 	u8			status[29];
100 	u8			data_token;
101 	__be16			crc_val;
102 };
103 
104 struct mmc_spi_host {
105 	struct mmc_host		*mmc;
106 	struct spi_device	*spi;
107 
108 	unsigned char		power_mode;
109 	u16			powerup_msecs;
110 
111 	struct mmc_spi_platform_data	*pdata;
112 
113 	/* for bulk data transfers */
114 	struct spi_transfer	token, t, crc, early_status;
115 	struct spi_message	m;
116 
117 	/* for status readback */
118 	struct spi_transfer	status;
119 	struct spi_message	readback;
120 
121 	/* buffer used for commands and for message "overhead" */
122 	struct scratch		*data;
123 
124 	/* Specs say to write ones most of the time, even when the card
125 	 * has no need to read its input data; and many cards won't care.
126 	 * This is our source of those ones.
127 	 */
128 	void			*ones;
129 };
130 
131 
132 /****************************************************************************/
133 
134 /*
135  * MMC-over-SPI protocol glue, used by the MMC stack interface
136  */
137 
mmc_cs_off(struct mmc_spi_host * host)138 static inline int mmc_cs_off(struct mmc_spi_host *host)
139 {
140 	/* chipselect will always be inactive after setup() */
141 	return spi_setup(host->spi);
142 }
143 
mmc_spi_readbytes(struct mmc_spi_host * host,unsigned int len)144 static int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned int len)
145 {
146 	if (len > sizeof(*host->data)) {
147 		WARN_ON(1);
148 		return -EIO;
149 	}
150 
151 	host->status.len = len;
152 
153 	return spi_sync_locked(host->spi, &host->readback);
154 }
155 
mmc_spi_skip(struct mmc_spi_host * host,unsigned long timeout,unsigned n,u8 byte)156 static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
157 			unsigned n, u8 byte)
158 {
159 	u8 *cp = host->data->status;
160 	unsigned long start = jiffies;
161 
162 	do {
163 		int		status;
164 		unsigned	i;
165 
166 		status = mmc_spi_readbytes(host, n);
167 		if (status < 0)
168 			return status;
169 
170 		for (i = 0; i < n; i++) {
171 			if (cp[i] != byte)
172 				return cp[i];
173 		}
174 
175 		/* If we need long timeouts, we may release the CPU */
176 		cond_resched();
177 	} while (time_is_after_jiffies(start + timeout));
178 	return -ETIMEDOUT;
179 }
180 
181 static inline int
mmc_spi_wait_unbusy(struct mmc_spi_host * host,unsigned long timeout)182 mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
183 {
184 	return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
185 }
186 
mmc_spi_readtoken(struct mmc_spi_host * host,unsigned long timeout)187 static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
188 {
189 	return mmc_spi_skip(host, timeout, 1, 0xff);
190 }
191 
192 
193 /*
194  * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol
195  * hosts return!  The low byte holds R1_SPI bits.  The next byte may hold
196  * R2_SPI bits ... for SEND_STATUS, or after data read errors.
197  *
198  * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on
199  * newer cards R7 (IF_COND).
200  */
201 
maptype(struct mmc_command * cmd)202 static char *maptype(struct mmc_command *cmd)
203 {
204 	switch (mmc_spi_resp_type(cmd)) {
205 	case MMC_RSP_SPI_R1:	return "R1";
206 	case MMC_RSP_SPI_R1B:	return "R1B";
207 	case MMC_RSP_SPI_R2:	return "R2/R5";
208 	case MMC_RSP_SPI_R3:	return "R3/R4/R7";
209 	default:		return "?";
210 	}
211 }
212 
213 /* return zero, else negative errno after setting cmd->error */
mmc_spi_response_get(struct mmc_spi_host * host,struct mmc_command * cmd,int cs_on)214 static int mmc_spi_response_get(struct mmc_spi_host *host,
215 		struct mmc_command *cmd, int cs_on)
216 {
217 	unsigned long timeout_ms;
218 	u8	*cp = host->data->status;
219 	u8	*end = cp + host->t.len;
220 	int	value = 0;
221 	int	bitshift;
222 	u8 	leftover = 0;
223 	unsigned short rotator;
224 	int 	i;
225 	char	tag[32];
226 
227 	snprintf(tag, sizeof(tag), "  ... CMD%d response SPI_%s",
228 		cmd->opcode, maptype(cmd));
229 
230 	/* Except for data block reads, the whole response will already
231 	 * be stored in the scratch buffer.  It's somewhere after the
232 	 * command and the first byte we read after it.  We ignore that
233 	 * first byte.  After STOP_TRANSMISSION command it may include
234 	 * two data bits, but otherwise it's all ones.
235 	 */
236 	cp += 8;
237 	while (cp < end && *cp == 0xff)
238 		cp++;
239 
240 	/* Data block reads (R1 response types) may need more data... */
241 	if (cp == end) {
242 		cp = host->data->status;
243 		end = cp+1;
244 
245 		/* Card sends N(CR) (== 1..8) bytes of all-ones then one
246 		 * status byte ... and we already scanned 2 bytes.
247 		 *
248 		 * REVISIT block read paths use nasty byte-at-a-time I/O
249 		 * so it can always DMA directly into the target buffer.
250 		 * It'd probably be better to memcpy() the first chunk and
251 		 * avoid extra i/o calls...
252 		 *
253 		 * Note we check for more than 8 bytes, because in practice,
254 		 * some SD cards are slow...
255 		 */
256 		for (i = 2; i < 16; i++) {
257 			value = mmc_spi_readbytes(host, 1);
258 			if (value < 0)
259 				goto done;
260 			if (*cp != 0xff)
261 				goto checkstatus;
262 		}
263 		value = -ETIMEDOUT;
264 		goto done;
265 	}
266 
267 checkstatus:
268 	bitshift = 0;
269 	if (*cp & 0x80)	{
270 		/* Houston, we have an ugly card with a bit-shifted response */
271 		rotator = *cp++ << 8;
272 		/* read the next byte */
273 		if (cp == end) {
274 			value = mmc_spi_readbytes(host, 1);
275 			if (value < 0)
276 				goto done;
277 			cp = host->data->status;
278 			end = cp+1;
279 		}
280 		rotator |= *cp++;
281 		while (rotator & 0x8000) {
282 			bitshift++;
283 			rotator <<= 1;
284 		}
285 		cmd->resp[0] = rotator >> 8;
286 		leftover = rotator;
287 	} else {
288 		cmd->resp[0] = *cp++;
289 	}
290 	cmd->error = 0;
291 
292 	/* Status byte: the entire seven-bit R1 response.  */
293 	if (cmd->resp[0] != 0) {
294 		if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
295 				& cmd->resp[0])
296 			value = -EFAULT; /* Bad address */
297 		else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
298 			value = -ENOSYS; /* Function not implemented */
299 		else if (R1_SPI_COM_CRC & cmd->resp[0])
300 			value = -EILSEQ; /* Illegal byte sequence */
301 		else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
302 				& cmd->resp[0])
303 			value = -EIO;    /* I/O error */
304 		/* else R1_SPI_IDLE, "it's resetting" */
305 	}
306 
307 	switch (mmc_spi_resp_type(cmd)) {
308 
309 	/* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads)
310 	 * and less-common stuff like various erase operations.
311 	 */
312 	case MMC_RSP_SPI_R1B:
313 		/* maybe we read all the busy tokens already */
314 		while (cp < end && *cp == 0)
315 			cp++;
316 		if (cp == end) {
317 			timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
318 				MMC_SPI_R1B_TIMEOUT_MS;
319 			mmc_spi_wait_unbusy(host, msecs_to_jiffies(timeout_ms));
320 		}
321 		break;
322 
323 	/* SPI R2 == R1 + second status byte; SEND_STATUS
324 	 * SPI R5 == R1 + data byte; IO_RW_DIRECT
325 	 */
326 	case MMC_RSP_SPI_R2:
327 		/* read the next byte */
328 		if (cp == end) {
329 			value = mmc_spi_readbytes(host, 1);
330 			if (value < 0)
331 				goto done;
332 			cp = host->data->status;
333 			end = cp+1;
334 		}
335 		if (bitshift) {
336 			rotator = leftover << 8;
337 			rotator |= *cp << bitshift;
338 			cmd->resp[0] |= (rotator & 0xFF00);
339 		} else {
340 			cmd->resp[0] |= *cp << 8;
341 		}
342 		break;
343 
344 	/* SPI R3, R4, or R7 == R1 + 4 bytes */
345 	case MMC_RSP_SPI_R3:
346 		rotator = leftover << 8;
347 		cmd->resp[1] = 0;
348 		for (i = 0; i < 4; i++) {
349 			cmd->resp[1] <<= 8;
350 			/* read the next byte */
351 			if (cp == end) {
352 				value = mmc_spi_readbytes(host, 1);
353 				if (value < 0)
354 					goto done;
355 				cp = host->data->status;
356 				end = cp+1;
357 			}
358 			if (bitshift) {
359 				rotator |= *cp++ << bitshift;
360 				cmd->resp[1] |= (rotator >> 8);
361 				rotator <<= 8;
362 			} else {
363 				cmd->resp[1] |= *cp++;
364 			}
365 		}
366 		break;
367 
368 	/* SPI R1 == just one status byte */
369 	case MMC_RSP_SPI_R1:
370 		break;
371 
372 	default:
373 		dev_dbg(&host->spi->dev, "bad response type %04x\n",
374 			mmc_spi_resp_type(cmd));
375 		if (value >= 0)
376 			value = -EINVAL;
377 		goto done;
378 	}
379 
380 	if (value < 0)
381 		dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
382 			tag, cmd->resp[0], cmd->resp[1]);
383 
384 	/* disable chipselect on errors and some success cases */
385 	if (value >= 0 && cs_on)
386 		return value;
387 done:
388 	if (value < 0)
389 		cmd->error = value;
390 	mmc_cs_off(host);
391 	return value;
392 }
393 
394 /* Issue command and read its response.
395  * Returns zero on success, negative for error.
396  *
397  * On error, caller must cope with mmc core retry mechanism.  That
398  * means immediate low-level resubmit, which affects the bus lock...
399  */
400 static int
mmc_spi_command_send(struct mmc_spi_host * host,struct mmc_request * mrq,struct mmc_command * cmd,int cs_on)401 mmc_spi_command_send(struct mmc_spi_host *host,
402 		struct mmc_request *mrq,
403 		struct mmc_command *cmd, int cs_on)
404 {
405 	struct scratch		*data = host->data;
406 	u8			*cp = data->status;
407 	int			status;
408 	struct spi_transfer	*t;
409 
410 	/* We can handle most commands (except block reads) in one full
411 	 * duplex I/O operation before either starting the next transfer
412 	 * (data block or command) or else deselecting the card.
413 	 *
414 	 * First, write 7 bytes:
415 	 *  - an all-ones byte to ensure the card is ready
416 	 *  - opcode byte (plus start and transmission bits)
417 	 *  - four bytes of big-endian argument
418 	 *  - crc7 (plus end bit) ... always computed, it's cheap
419 	 *
420 	 * We init the whole buffer to all-ones, which is what we need
421 	 * to write while we're reading (later) response data.
422 	 */
423 	memset(cp, 0xff, sizeof(data->status));
424 
425 	cp[1] = 0x40 | cmd->opcode;
426 	put_unaligned_be32(cmd->arg, cp + 2);
427 	cp[6] = crc7_be(0, cp + 1, 5) | 0x01;
428 	cp += 7;
429 
430 	/* Then, read up to 13 bytes (while writing all-ones):
431 	 *  - N(CR) (== 1..8) bytes of all-ones
432 	 *  - status byte (for all response types)
433 	 *  - the rest of the response, either:
434 	 *      + nothing, for R1 or R1B responses
435 	 *	+ second status byte, for R2 responses
436 	 *	+ four data bytes, for R3 and R7 responses
437 	 *
438 	 * Finally, read some more bytes ... in the nice cases we know in
439 	 * advance how many, and reading 1 more is always OK:
440 	 *  - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
441 	 *  - N(RC) (== 1..N) bytes of all-ones, before next command
442 	 *  - N(WR) (== 1..N) bytes of all-ones, before data write
443 	 *
444 	 * So in those cases one full duplex I/O of at most 21 bytes will
445 	 * handle the whole command, leaving the card ready to receive a
446 	 * data block or new command.  We do that whenever we can, shaving
447 	 * CPU and IRQ costs (especially when using DMA or FIFOs).
448 	 *
449 	 * There are two other cases, where it's not generally practical
450 	 * to rely on a single I/O:
451 	 *
452 	 *  - R1B responses need at least N(EC) bytes of all-zeroes.
453 	 *
454 	 *    In this case we can *try* to fit it into one I/O, then
455 	 *    maybe read more data later.
456 	 *
457 	 *  - Data block reads are more troublesome, since a variable
458 	 *    number of padding bytes precede the token and data.
459 	 *      + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
460 	 *      + N(AC) (== 1..many) bytes of all-ones
461 	 *
462 	 *    In this case we currently only have minimal speedups here:
463 	 *    when N(CR) == 1 we can avoid I/O in response_get().
464 	 */
465 	if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
466 		cp += 2;	/* min(N(CR)) + status */
467 		/* R1 */
468 	} else {
469 		cp += 10;	/* max(N(CR)) + status + min(N(RC),N(WR)) */
470 		if (cmd->flags & MMC_RSP_SPI_S2)	/* R2/R5 */
471 			cp++;
472 		else if (cmd->flags & MMC_RSP_SPI_B4)	/* R3/R4/R7 */
473 			cp += 4;
474 		else if (cmd->flags & MMC_RSP_BUSY)	/* R1B */
475 			cp = data->status + sizeof(data->status);
476 		/* else:  R1 (most commands) */
477 	}
478 
479 	dev_dbg(&host->spi->dev, "  CMD%d, resp %s\n",
480 		cmd->opcode, maptype(cmd));
481 
482 	/* send command, leaving chipselect active */
483 	spi_message_init(&host->m);
484 
485 	t = &host->t;
486 	memset(t, 0, sizeof(*t));
487 	t->tx_buf = t->rx_buf = data->status;
488 	t->len = cp - data->status;
489 	t->cs_change = 1;
490 	spi_message_add_tail(t, &host->m);
491 
492 	status = spi_sync_locked(host->spi, &host->m);
493 	if (status < 0) {
494 		dev_dbg(&host->spi->dev, "  ... write returned %d\n", status);
495 		cmd->error = status;
496 		return status;
497 	}
498 
499 	/* after no-data commands and STOP_TRANSMISSION, chipselect off */
500 	return mmc_spi_response_get(host, cmd, cs_on);
501 }
502 
503 /* Build data message with up to four separate transfers.  For TX, we
504  * start by writing the data token.  And in most cases, we finish with
505  * a status transfer.
506  *
507  * We always provide TX data for data and CRC.  The MMC/SD protocol
508  * requires us to write ones; but Linux defaults to writing zeroes;
509  * so we explicitly initialize it to all ones on RX paths.
510  */
511 static void
mmc_spi_setup_data_message(struct mmc_spi_host * host,bool multiple,bool write)512 mmc_spi_setup_data_message(struct mmc_spi_host *host, bool multiple, bool write)
513 {
514 	struct spi_transfer	*t;
515 	struct scratch		*scratch = host->data;
516 
517 	spi_message_init(&host->m);
518 
519 	/* for reads, readblock() skips 0xff bytes before finding
520 	 * the token; for writes, this transfer issues that token.
521 	 */
522 	if (write) {
523 		t = &host->token;
524 		memset(t, 0, sizeof(*t));
525 		t->len = 1;
526 		if (multiple)
527 			scratch->data_token = SPI_TOKEN_MULTI_WRITE;
528 		else
529 			scratch->data_token = SPI_TOKEN_SINGLE;
530 		t->tx_buf = &scratch->data_token;
531 		spi_message_add_tail(t, &host->m);
532 	}
533 
534 	/* Body of transfer is buffer, then CRC ...
535 	 * either TX-only, or RX with TX-ones.
536 	 */
537 	t = &host->t;
538 	memset(t, 0, sizeof(*t));
539 	t->tx_buf = host->ones;
540 	/* length and actual buffer info are written later */
541 	spi_message_add_tail(t, &host->m);
542 
543 	t = &host->crc;
544 	memset(t, 0, sizeof(*t));
545 	t->len = 2;
546 	if (write) {
547 		/* the actual CRC may get written later */
548 		t->tx_buf = &scratch->crc_val;
549 	} else {
550 		t->tx_buf = host->ones;
551 		t->rx_buf = &scratch->crc_val;
552 	}
553 	spi_message_add_tail(t, &host->m);
554 
555 	/*
556 	 * A single block read is followed by N(EC) [0+] all-ones bytes
557 	 * before deselect ... don't bother.
558 	 *
559 	 * Multiblock reads are followed by N(AC) [1+] all-ones bytes before
560 	 * the next block is read, or a STOP_TRANSMISSION is issued.  We'll
561 	 * collect that single byte, so readblock() doesn't need to.
562 	 *
563 	 * For a write, the one-byte data response follows immediately, then
564 	 * come zero or more busy bytes, then N(WR) [1+] all-ones bytes.
565 	 * Then single block reads may deselect, and multiblock ones issue
566 	 * the next token (next data block, or STOP_TRAN).  We can try to
567 	 * minimize I/O ops by using a single read to collect end-of-busy.
568 	 */
569 	if (multiple || write) {
570 		t = &host->early_status;
571 		memset(t, 0, sizeof(*t));
572 		t->len = write ? sizeof(scratch->status) : 1;
573 		t->tx_buf = host->ones;
574 		t->rx_buf = scratch->status;
575 		t->cs_change = 1;
576 		spi_message_add_tail(t, &host->m);
577 	}
578 }
579 
580 /*
581  * Write one block:
582  *  - caller handled preceding N(WR) [1+] all-ones bytes
583  *  - data block
584  *	+ token
585  *	+ data bytes
586  *	+ crc16
587  *  - an all-ones byte ... card writes a data-response byte
588  *  - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy'
589  *
590  * Return negative errno, else success.
591  */
592 static int
mmc_spi_writeblock(struct mmc_spi_host * host,struct spi_transfer * t,unsigned long timeout)593 mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
594 	unsigned long timeout)
595 {
596 	struct spi_device	*spi = host->spi;
597 	int			status, i;
598 	struct scratch		*scratch = host->data;
599 	u32			pattern;
600 
601 	if (host->mmc->use_spi_crc)
602 		scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
603 
604 	status = spi_sync_locked(spi, &host->m);
605 	if (status != 0) {
606 		dev_dbg(&spi->dev, "write error (%d)\n", status);
607 		return status;
608 	}
609 
610 	/*
611 	 * Get the transmission data-response reply.  It must follow
612 	 * immediately after the data block we transferred.  This reply
613 	 * doesn't necessarily tell whether the write operation succeeded;
614 	 * it just says if the transmission was ok and whether *earlier*
615 	 * writes succeeded; see the standard.
616 	 *
617 	 * In practice, there are (even modern SDHC-)cards which are late
618 	 * in sending the response, and miss the time frame by a few bits,
619 	 * so we have to cope with this situation and check the response
620 	 * bit-by-bit. Arggh!!!
621 	 */
622 	pattern = get_unaligned_be32(scratch->status);
623 
624 	/* First 3 bit of pattern are undefined */
625 	pattern |= 0xE0000000;
626 
627 	/* left-adjust to leading 0 bit */
628 	while (pattern & 0x80000000)
629 		pattern <<= 1;
630 	/* right-adjust for pattern matching. Code is in bit 4..0 now. */
631 	pattern >>= 27;
632 
633 	switch (pattern) {
634 	case SPI_RESPONSE_ACCEPTED:
635 		status = 0;
636 		break;
637 	case SPI_RESPONSE_CRC_ERR:
638 		/* host shall then issue MMC_STOP_TRANSMISSION */
639 		status = -EILSEQ;
640 		break;
641 	case SPI_RESPONSE_WRITE_ERR:
642 		/* host shall then issue MMC_STOP_TRANSMISSION,
643 		 * and should MMC_SEND_STATUS to sort it out
644 		 */
645 		status = -EIO;
646 		break;
647 	default:
648 		status = -EPROTO;
649 		break;
650 	}
651 	if (status != 0) {
652 		dev_dbg(&spi->dev, "write error %02x (%d)\n",
653 			scratch->status[0], status);
654 		return status;
655 	}
656 
657 	t->tx_buf += t->len;
658 
659 	/* Return when not busy.  If we didn't collect that status yet,
660 	 * we'll need some more I/O.
661 	 */
662 	for (i = 4; i < sizeof(scratch->status); i++) {
663 		/* card is non-busy if the most recent bit is 1 */
664 		if (scratch->status[i] & 0x01)
665 			return 0;
666 	}
667 	return mmc_spi_wait_unbusy(host, timeout);
668 }
669 
670 /*
671  * Read one block:
672  *  - skip leading all-ones bytes ... either
673  *      + N(AC) [1..f(clock,CSD)] usually, else
674  *      + N(CX) [0..8] when reading CSD or CID
675  *  - data block
676  *	+ token ... if error token, no data or crc
677  *	+ data bytes
678  *	+ crc16
679  *
680  * After single block reads, we're done; N(EC) [0+] all-ones bytes follow
681  * before dropping chipselect.
682  *
683  * For multiblock reads, caller either reads the next block or issues a
684  * STOP_TRANSMISSION command.
685  */
686 static int
mmc_spi_readblock(struct mmc_spi_host * host,struct spi_transfer * t,unsigned long timeout)687 mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
688 	unsigned long timeout)
689 {
690 	struct spi_device	*spi = host->spi;
691 	int			status;
692 	struct scratch		*scratch = host->data;
693 	unsigned int 		bitshift;
694 	u8			leftover;
695 
696 	/* At least one SD card sends an all-zeroes byte when N(CX)
697 	 * applies, before the all-ones bytes ... just cope with that.
698 	 */
699 	status = mmc_spi_readbytes(host, 1);
700 	if (status < 0)
701 		return status;
702 	status = scratch->status[0];
703 	if (status == 0xff || status == 0)
704 		status = mmc_spi_readtoken(host, timeout);
705 
706 	if (status < 0) {
707 		dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
708 		return status;
709 	}
710 
711 	/* The token may be bit-shifted...
712 	 * the first 0-bit precedes the data stream.
713 	 */
714 	bitshift = 7;
715 	while (status & 0x80) {
716 		status <<= 1;
717 		bitshift--;
718 	}
719 	leftover = status << 1;
720 
721 	status = spi_sync_locked(spi, &host->m);
722 	if (status < 0) {
723 		dev_dbg(&spi->dev, "read error %d\n", status);
724 		return status;
725 	}
726 
727 	if (bitshift) {
728 		/* Walk through the data and the crc and do
729 		 * all the magic to get byte-aligned data.
730 		 */
731 		u8 *cp = t->rx_buf;
732 		unsigned int len;
733 		unsigned int bitright = 8 - bitshift;
734 		u8 temp;
735 		for (len = t->len; len; len--) {
736 			temp = *cp;
737 			*cp++ = leftover | (temp >> bitshift);
738 			leftover = temp << bitright;
739 		}
740 		cp = (u8 *) &scratch->crc_val;
741 		temp = *cp;
742 		*cp++ = leftover | (temp >> bitshift);
743 		leftover = temp << bitright;
744 		temp = *cp;
745 		*cp = leftover | (temp >> bitshift);
746 	}
747 
748 	if (host->mmc->use_spi_crc) {
749 		u16 crc = crc_itu_t(0, t->rx_buf, t->len);
750 
751 		be16_to_cpus(&scratch->crc_val);
752 		if (scratch->crc_val != crc) {
753 			dev_dbg(&spi->dev,
754 				"read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n",
755 				scratch->crc_val, crc, t->len);
756 			return -EILSEQ;
757 		}
758 	}
759 
760 	t->rx_buf += t->len;
761 
762 	return 0;
763 }
764 
765 /*
766  * An MMC/SD data stage includes one or more blocks, optional CRCs,
767  * and inline handshaking.  That handhaking makes it unlike most
768  * other SPI protocol stacks.
769  */
770 static void
mmc_spi_data_do(struct mmc_spi_host * host,struct mmc_command * cmd,struct mmc_data * data,u32 blk_size)771 mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
772 		struct mmc_data *data, u32 blk_size)
773 {
774 	struct spi_device	*spi = host->spi;
775 	struct spi_transfer	*t;
776 	struct scatterlist	*sg;
777 	unsigned		n_sg;
778 	bool			multiple = (data->blocks > 1);
779 	bool			write = (data->flags & MMC_DATA_WRITE);
780 	const char		*write_or_read = write ? "write" : "read";
781 	u32			clock_rate;
782 	unsigned long		timeout;
783 
784 	mmc_spi_setup_data_message(host, multiple, write);
785 	t = &host->t;
786 
787 	if (t->speed_hz)
788 		clock_rate = t->speed_hz;
789 	else
790 		clock_rate = spi->max_speed_hz;
791 
792 	timeout = data->timeout_ns / 1000 +
793 		  data->timeout_clks * 1000000 / clock_rate;
794 	timeout = usecs_to_jiffies((unsigned int)timeout) + 1;
795 
796 	/* Handle scatterlist segments one at a time, with synch for
797 	 * each 512-byte block
798 	 */
799 	for_each_sg(data->sg, sg, data->sg_len, n_sg) {
800 		int			status = 0;
801 		void			*kmap_addr;
802 		unsigned		length = sg->length;
803 
804 		/* allow pio too; we don't allow highmem */
805 		kmap_addr = kmap(sg_page(sg));
806 		if (write)
807 			t->tx_buf = kmap_addr + sg->offset;
808 		else
809 			t->rx_buf = kmap_addr + sg->offset;
810 
811 		/* transfer each block, and update request status */
812 		while (length) {
813 			t->len = min(length, blk_size);
814 
815 			dev_dbg(&spi->dev, "    %s block, %d bytes\n", write_or_read, t->len);
816 
817 			if (write)
818 				status = mmc_spi_writeblock(host, t, timeout);
819 			else
820 				status = mmc_spi_readblock(host, t, timeout);
821 			if (status < 0)
822 				break;
823 
824 			data->bytes_xfered += t->len;
825 			length -= t->len;
826 
827 			if (!multiple)
828 				break;
829 		}
830 
831 		/* discard mappings */
832 		if (write)
833 			/* nothing to do */;
834 		else
835 			flush_dcache_page(sg_page(sg));
836 		kunmap(sg_page(sg));
837 
838 		if (status < 0) {
839 			data->error = status;
840 			dev_dbg(&spi->dev, "%s status %d\n", write_or_read, status);
841 			break;
842 		}
843 	}
844 
845 	/* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that
846 	 * can be issued before multiblock writes.  Unlike its more widely
847 	 * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23),
848 	 * that can affect the STOP_TRAN logic.   Complete (and current)
849 	 * MMC specs should sort that out before Linux starts using CMD23.
850 	 */
851 	if (write && multiple) {
852 		struct scratch	*scratch = host->data;
853 		int		tmp;
854 		const unsigned	statlen = sizeof(scratch->status);
855 
856 		dev_dbg(&spi->dev, "    STOP_TRAN\n");
857 
858 		/* Tweak the per-block message we set up earlier by morphing
859 		 * it to hold single buffer with the token followed by some
860 		 * all-ones bytes ... skip N(BR) (0..1), scan the rest for
861 		 * "not busy any longer" status, and leave chip selected.
862 		 */
863 		INIT_LIST_HEAD(&host->m.transfers);
864 		list_add(&host->early_status.transfer_list,
865 				&host->m.transfers);
866 
867 		memset(scratch->status, 0xff, statlen);
868 		scratch->status[0] = SPI_TOKEN_STOP_TRAN;
869 
870 		host->early_status.tx_buf = host->early_status.rx_buf;
871 		host->early_status.len = statlen;
872 
873 		tmp = spi_sync_locked(spi, &host->m);
874 		if (tmp < 0) {
875 			if (!data->error)
876 				data->error = tmp;
877 			return;
878 		}
879 
880 		/* Ideally we collected "not busy" status with one I/O,
881 		 * avoiding wasteful byte-at-a-time scanning... but more
882 		 * I/O is often needed.
883 		 */
884 		for (tmp = 2; tmp < statlen; tmp++) {
885 			if (scratch->status[tmp] != 0)
886 				return;
887 		}
888 		tmp = mmc_spi_wait_unbusy(host, timeout);
889 		if (tmp < 0 && !data->error)
890 			data->error = tmp;
891 	}
892 }
893 
894 /****************************************************************************/
895 
896 /*
897  * MMC driver implementation -- the interface to the MMC stack
898  */
899 
mmc_spi_request(struct mmc_host * mmc,struct mmc_request * mrq)900 static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
901 {
902 	struct mmc_spi_host	*host = mmc_priv(mmc);
903 	int			status = -EINVAL;
904 	int			crc_retry = 5;
905 	struct mmc_command	stop;
906 
907 #ifdef DEBUG
908 	/* MMC core and layered drivers *MUST* issue SPI-aware commands */
909 	{
910 		struct mmc_command	*cmd;
911 		int			invalid = 0;
912 
913 		cmd = mrq->cmd;
914 		if (!mmc_spi_resp_type(cmd)) {
915 			dev_dbg(&host->spi->dev, "bogus command\n");
916 			cmd->error = -EINVAL;
917 			invalid = 1;
918 		}
919 
920 		cmd = mrq->stop;
921 		if (cmd && !mmc_spi_resp_type(cmd)) {
922 			dev_dbg(&host->spi->dev, "bogus STOP command\n");
923 			cmd->error = -EINVAL;
924 			invalid = 1;
925 		}
926 
927 		if (invalid) {
928 			dump_stack();
929 			mmc_request_done(host->mmc, mrq);
930 			return;
931 		}
932 	}
933 #endif
934 
935 	/* request exclusive bus access */
936 	spi_bus_lock(host->spi->controller);
937 
938 crc_recover:
939 	/* issue command; then optionally data and stop */
940 	status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
941 	if (status == 0 && mrq->data) {
942 		mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
943 
944 		/*
945 		 * The SPI bus is not always reliable for large data transfers.
946 		 * If an occasional crc error is reported by the SD device with
947 		 * data read/write over SPI, it may be recovered by repeating
948 		 * the last SD command again. The retry count is set to 5 to
949 		 * ensure the driver passes stress tests.
950 		 */
951 		if (mrq->data->error == -EILSEQ && crc_retry) {
952 			stop.opcode = MMC_STOP_TRANSMISSION;
953 			stop.arg = 0;
954 			stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
955 			status = mmc_spi_command_send(host, mrq, &stop, 0);
956 			crc_retry--;
957 			mrq->data->error = 0;
958 			goto crc_recover;
959 		}
960 
961 		if (mrq->stop)
962 			status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
963 		else
964 			mmc_cs_off(host);
965 	}
966 
967 	/* release the bus */
968 	spi_bus_unlock(host->spi->controller);
969 
970 	mmc_request_done(host->mmc, mrq);
971 }
972 
973 /* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0"
974  *
975  * NOTE that here we can't know that the card has just been powered up;
976  * not all MMC/SD sockets support power switching.
977  *
978  * FIXME when the card is still in SPI mode, e.g. from a previous kernel,
979  * this doesn't seem to do the right thing at all...
980  */
mmc_spi_initsequence(struct mmc_spi_host * host)981 static void mmc_spi_initsequence(struct mmc_spi_host *host)
982 {
983 	/* Try to be very sure any previous command has completed;
984 	 * wait till not-busy, skip debris from any old commands.
985 	 */
986 	mmc_spi_wait_unbusy(host, msecs_to_jiffies(MMC_SPI_INIT_TIMEOUT_MS));
987 	mmc_spi_readbytes(host, 10);
988 
989 	/*
990 	 * Do a burst with chipselect active-high.  We need to do this to
991 	 * meet the requirement of 74 clock cycles with both chipselect
992 	 * and CMD (MOSI) high before CMD0 ... after the card has been
993 	 * powered up to Vdd(min), and so is ready to take commands.
994 	 *
995 	 * Some cards are particularly needy of this (e.g. Viking "SD256")
996 	 * while most others don't seem to care.
997 	 *
998 	 * Note that this is one of the places MMC/SD plays games with the
999 	 * SPI protocol.  Another is that when chipselect is released while
1000 	 * the card returns BUSY status, the clock must issue several cycles
1001 	 * with chipselect high before the card will stop driving its output.
1002 	 *
1003 	 * SPI_CS_HIGH means "asserted" here. In some cases like when using
1004 	 * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
1005 	 * inverted by gpiolib, so if we want to ascertain to drive it high
1006 	 * we should toggle the default with an XOR as we do here.
1007 	 */
1008 	host->spi->mode ^= SPI_CS_HIGH;
1009 	if (spi_setup(host->spi) != 0) {
1010 		/* Just warn; most cards work without it. */
1011 		dev_warn(&host->spi->dev,
1012 				"can't change chip-select polarity\n");
1013 		host->spi->mode ^= SPI_CS_HIGH;
1014 	} else {
1015 		mmc_spi_readbytes(host, 18);
1016 
1017 		host->spi->mode ^= SPI_CS_HIGH;
1018 		if (spi_setup(host->spi) != 0) {
1019 			/* Wot, we can't get the same setup we had before? */
1020 			dev_err(&host->spi->dev,
1021 					"can't restore chip-select polarity\n");
1022 		}
1023 	}
1024 }
1025 
mmc_powerstring(u8 power_mode)1026 static char *mmc_powerstring(u8 power_mode)
1027 {
1028 	switch (power_mode) {
1029 	case MMC_POWER_OFF: return "off";
1030 	case MMC_POWER_UP:  return "up";
1031 	case MMC_POWER_ON:  return "on";
1032 	}
1033 	return "?";
1034 }
1035 
mmc_spi_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)1036 static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1037 {
1038 	struct mmc_spi_host *host = mmc_priv(mmc);
1039 
1040 	if (host->power_mode != ios->power_mode) {
1041 		int		canpower;
1042 
1043 		canpower = host->pdata && host->pdata->setpower;
1044 
1045 		dev_dbg(&host->spi->dev, "power %s (%d)%s\n",
1046 				mmc_powerstring(ios->power_mode),
1047 				ios->vdd,
1048 				canpower ? ", can switch" : "");
1049 
1050 		/* switch power on/off if possible, accounting for
1051 		 * max 250msec powerup time if needed.
1052 		 */
1053 		if (canpower) {
1054 			switch (ios->power_mode) {
1055 			case MMC_POWER_OFF:
1056 			case MMC_POWER_UP:
1057 				host->pdata->setpower(&host->spi->dev,
1058 						ios->vdd);
1059 				if (ios->power_mode == MMC_POWER_UP)
1060 					msleep(host->powerup_msecs);
1061 			}
1062 		}
1063 
1064 		/* See 6.4.1 in the simplified SD card physical spec 2.0 */
1065 		if (ios->power_mode == MMC_POWER_ON)
1066 			mmc_spi_initsequence(host);
1067 
1068 		/* If powering down, ground all card inputs to avoid power
1069 		 * delivery from data lines!  On a shared SPI bus, this
1070 		 * will probably be temporary; 6.4.2 of the simplified SD
1071 		 * spec says this must last at least 1msec.
1072 		 *
1073 		 *   - Clock low means CPOL 0, e.g. mode 0
1074 		 *   - MOSI low comes from writing zero
1075 		 *   - Chipselect is usually active low...
1076 		 */
1077 		if (canpower && ios->power_mode == MMC_POWER_OFF) {
1078 			int mres;
1079 			u8 nullbyte = 0;
1080 
1081 			host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
1082 			mres = spi_setup(host->spi);
1083 			if (mres < 0)
1084 				dev_dbg(&host->spi->dev,
1085 					"switch to SPI mode 0 failed\n");
1086 
1087 			if (spi_write(host->spi, &nullbyte, 1) < 0)
1088 				dev_dbg(&host->spi->dev,
1089 					"put spi signals to low failed\n");
1090 
1091 			/*
1092 			 * Now clock should be low due to spi mode 0;
1093 			 * MOSI should be low because of written 0x00;
1094 			 * chipselect should be low (it is active low)
1095 			 * power supply is off, so now MMC is off too!
1096 			 *
1097 			 * FIXME no, chipselect can be high since the
1098 			 * device is inactive and SPI_CS_HIGH is clear...
1099 			 */
1100 			msleep(10);
1101 			if (mres == 0) {
1102 				host->spi->mode |= (SPI_CPOL|SPI_CPHA);
1103 				mres = spi_setup(host->spi);
1104 				if (mres < 0)
1105 					dev_dbg(&host->spi->dev,
1106 						"switch back to SPI mode 3 failed\n");
1107 			}
1108 		}
1109 
1110 		host->power_mode = ios->power_mode;
1111 	}
1112 
1113 	if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
1114 		int		status;
1115 
1116 		host->spi->max_speed_hz = ios->clock;
1117 		status = spi_setup(host->spi);
1118 		dev_dbg(&host->spi->dev, "  clock to %d Hz, %d\n",
1119 			host->spi->max_speed_hz, status);
1120 	}
1121 }
1122 
1123 static const struct mmc_host_ops mmc_spi_ops = {
1124 	.request	= mmc_spi_request,
1125 	.set_ios	= mmc_spi_set_ios,
1126 	.get_ro		= mmc_gpio_get_ro,
1127 	.get_cd		= mmc_gpio_get_cd,
1128 };
1129 
1130 
1131 /****************************************************************************/
1132 
1133 /*
1134  * SPI driver implementation
1135  */
1136 
1137 static irqreturn_t
mmc_spi_detect_irq(int irq,void * mmc)1138 mmc_spi_detect_irq(int irq, void *mmc)
1139 {
1140 	struct mmc_spi_host *host = mmc_priv(mmc);
1141 	u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
1142 
1143 	mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
1144 	return IRQ_HANDLED;
1145 }
1146 
mmc_spi_probe(struct spi_device * spi)1147 static int mmc_spi_probe(struct spi_device *spi)
1148 {
1149 	void			*ones;
1150 	struct mmc_host		*mmc;
1151 	struct mmc_spi_host	*host;
1152 	int			status;
1153 	bool			has_ro = false;
1154 
1155 	/* We rely on full duplex transfers, mostly to reduce
1156 	 * per-transfer overheads (by making fewer transfers).
1157 	 */
1158 	if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
1159 		return -EINVAL;
1160 
1161 	/* MMC and SD specs only seem to care that sampling is on the
1162 	 * rising edge ... meaning SPI modes 0 or 3.  So either SPI mode
1163 	 * should be legit.  We'll use mode 0 since the steady state is 0,
1164 	 * which is appropriate for hotplugging, unless the platform data
1165 	 * specify mode 3 (if hardware is not compatible to mode 0).
1166 	 */
1167 	if (spi->mode != SPI_MODE_3)
1168 		spi->mode = SPI_MODE_0;
1169 	spi->bits_per_word = 8;
1170 
1171 	status = spi_setup(spi);
1172 	if (status < 0) {
1173 		dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
1174 				spi->mode, spi->max_speed_hz / 1000,
1175 				status);
1176 		return status;
1177 	}
1178 
1179 	/* We need a supply of ones to transmit.  This is the only time
1180 	 * the CPU touches these, so cache coherency isn't a concern.
1181 	 *
1182 	 * NOTE if many systems use more than one MMC-over-SPI connector
1183 	 * it'd save some memory to share this.  That's evidently rare.
1184 	 */
1185 	status = -ENOMEM;
1186 	ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
1187 	if (!ones)
1188 		goto nomem;
1189 	memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
1190 
1191 	mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
1192 	if (!mmc)
1193 		goto nomem;
1194 
1195 	mmc->ops = &mmc_spi_ops;
1196 	mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1197 	mmc->max_segs = MMC_SPI_BLOCKSATONCE;
1198 	mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
1199 	mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
1200 
1201 	mmc->caps = MMC_CAP_SPI;
1202 
1203 	/* SPI doesn't need the lowspeed device identification thing for
1204 	 * MMC or SD cards, since it never comes up in open drain mode.
1205 	 * That's good; some SPI masters can't handle very low speeds!
1206 	 *
1207 	 * However, low speed SDIO cards need not handle over 400 KHz;
1208 	 * that's the only reason not to use a few MHz for f_min (until
1209 	 * the upper layer reads the target frequency from the CSD).
1210 	 */
1211 	if (spi->controller->min_speed_hz > 400000)
1212 		dev_warn(&spi->dev,"Controller unable to reduce bus clock to 400 KHz\n");
1213 
1214 	mmc->f_min = max(spi->controller->min_speed_hz, 400000);
1215 	mmc->f_max = spi->max_speed_hz;
1216 
1217 	host = mmc_priv(mmc);
1218 	host->mmc = mmc;
1219 	host->spi = spi;
1220 
1221 	host->ones = ones;
1222 
1223 	dev_set_drvdata(&spi->dev, mmc);
1224 
1225 	/* Platform data is used to hook up things like card sensing
1226 	 * and power switching gpios.
1227 	 */
1228 	host->pdata = mmc_spi_get_pdata(spi);
1229 	if (host->pdata)
1230 		mmc->ocr_avail = host->pdata->ocr_mask;
1231 	if (!mmc->ocr_avail) {
1232 		dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
1233 		mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1234 	}
1235 	if (host->pdata && host->pdata->setpower) {
1236 		host->powerup_msecs = host->pdata->powerup_msecs;
1237 		if (!host->powerup_msecs || host->powerup_msecs > 250)
1238 			host->powerup_msecs = 250;
1239 	}
1240 
1241 	/* Preallocate buffers */
1242 	host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
1243 	if (!host->data)
1244 		goto fail_nobuf1;
1245 
1246 	/* setup message for status/busy readback */
1247 	spi_message_init(&host->readback);
1248 
1249 	spi_message_add_tail(&host->status, &host->readback);
1250 	host->status.tx_buf = host->ones;
1251 	host->status.rx_buf = &host->data->status;
1252 	host->status.cs_change = 1;
1253 
1254 	/* register card detect irq */
1255 	if (host->pdata && host->pdata->init) {
1256 		status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
1257 		if (status != 0)
1258 			goto fail_glue_init;
1259 	}
1260 
1261 	/* pass platform capabilities, if any */
1262 	if (host->pdata) {
1263 		mmc->caps |= host->pdata->caps;
1264 		mmc->caps2 |= host->pdata->caps2;
1265 	}
1266 
1267 	status = mmc_add_host(mmc);
1268 	if (status != 0)
1269 		goto fail_glue_init;
1270 
1271 	/*
1272 	 * Index 0 is card detect
1273 	 * Old boardfiles were specifying 1 ms as debounce
1274 	 */
1275 	status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
1276 	if (status == -EPROBE_DEFER)
1277 		goto fail_gpiod_request;
1278 	if (!status) {
1279 		/*
1280 		 * The platform has a CD GPIO signal that may support
1281 		 * interrupts, so let mmc_gpiod_request_cd_irq() decide
1282 		 * if polling is needed or not.
1283 		 */
1284 		mmc->caps &= ~MMC_CAP_NEEDS_POLL;
1285 		mmc_gpiod_request_cd_irq(mmc);
1286 	}
1287 	mmc_detect_change(mmc, 0);
1288 
1289 	/* Index 1 is write protect/read only */
1290 	status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
1291 	if (status == -EPROBE_DEFER)
1292 		goto fail_gpiod_request;
1293 	if (!status)
1294 		has_ro = true;
1295 
1296 	dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n",
1297 			dev_name(&mmc->class_dev),
1298 			has_ro ? "" : ", no WP",
1299 			(host->pdata && host->pdata->setpower)
1300 				? "" : ", no poweroff",
1301 			(mmc->caps & MMC_CAP_NEEDS_POLL)
1302 				? ", cd polling" : "");
1303 	return 0;
1304 
1305 fail_gpiod_request:
1306 	mmc_remove_host(mmc);
1307 fail_glue_init:
1308 	kfree(host->data);
1309 fail_nobuf1:
1310 	mmc_spi_put_pdata(spi);
1311 	mmc_free_host(mmc);
1312 nomem:
1313 	kfree(ones);
1314 	return status;
1315 }
1316 
1317 
mmc_spi_remove(struct spi_device * spi)1318 static void mmc_spi_remove(struct spi_device *spi)
1319 {
1320 	struct mmc_host		*mmc = dev_get_drvdata(&spi->dev);
1321 	struct mmc_spi_host	*host = mmc_priv(mmc);
1322 
1323 	/* prevent new mmc_detect_change() calls */
1324 	if (host->pdata && host->pdata->exit)
1325 		host->pdata->exit(&spi->dev, mmc);
1326 
1327 	mmc_remove_host(mmc);
1328 
1329 	kfree(host->data);
1330 	kfree(host->ones);
1331 
1332 	spi->max_speed_hz = mmc->f_max;
1333 	mmc_spi_put_pdata(spi);
1334 	mmc_free_host(mmc);
1335 }
1336 
1337 static const struct spi_device_id mmc_spi_dev_ids[] = {
1338 	{ "mmc-spi-slot"},
1339 	{ },
1340 };
1341 MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids);
1342 
1343 static const struct of_device_id mmc_spi_of_match_table[] = {
1344 	{ .compatible = "mmc-spi-slot", },
1345 	{},
1346 };
1347 MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
1348 
1349 static struct spi_driver mmc_spi_driver = {
1350 	.driver = {
1351 		.name =		"mmc_spi",
1352 		.of_match_table = mmc_spi_of_match_table,
1353 	},
1354 	.id_table =	mmc_spi_dev_ids,
1355 	.probe =	mmc_spi_probe,
1356 	.remove =	mmc_spi_remove,
1357 };
1358 
1359 module_spi_driver(mmc_spi_driver);
1360 
1361 MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko");
1362 MODULE_DESCRIPTION("SPI SD/MMC host driver");
1363 MODULE_LICENSE("GPL");
1364 MODULE_ALIAS("spi:mmc_spi");
1365