1  // SPDX-License-Identifier: GPL-2.0-or-later
2  // SPI init/core code
3  //
4  // Copyright (C) 2005 David Brownell
5  // Copyright (C) 2008 Secret Lab Technologies Ltd.
6  
7  #include <linux/acpi.h>
8  #include <linux/cache.h>
9  #include <linux/clk/clk-conf.h>
10  #include <linux/delay.h>
11  #include <linux/device.h>
12  #include <linux/dmaengine.h>
13  #include <linux/dma-mapping.h>
14  #include <linux/export.h>
15  #include <linux/gpio/consumer.h>
16  #include <linux/highmem.h>
17  #include <linux/idr.h>
18  #include <linux/init.h>
19  #include <linux/ioport.h>
20  #include <linux/kernel.h>
21  #include <linux/kthread.h>
22  #include <linux/mod_devicetable.h>
23  #include <linux/mutex.h>
24  #include <linux/of_device.h>
25  #include <linux/of_irq.h>
26  #include <linux/percpu.h>
27  #include <linux/platform_data/x86/apple.h>
28  #include <linux/pm_domain.h>
29  #include <linux/pm_runtime.h>
30  #include <linux/property.h>
31  #include <linux/ptp_clock_kernel.h>
32  #include <linux/sched/rt.h>
33  #include <linux/slab.h>
34  #include <linux/spi/spi.h>
35  #include <linux/spi/spi-mem.h>
36  #include <uapi/linux/sched/types.h>
37  
38  #define CREATE_TRACE_POINTS
39  #include <trace/events/spi.h>
40  EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41  EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42  
43  #include "internals.h"
44  
45  static DEFINE_IDR(spi_master_idr);
46  
spidev_release(struct device * dev)47  static void spidev_release(struct device *dev)
48  {
49  	struct spi_device	*spi = to_spi_device(dev);
50  
51  	spi_controller_put(spi->controller);
52  	kfree(spi->driver_override);
53  	free_percpu(spi->pcpu_statistics);
54  	kfree(spi);
55  }
56  
57  static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)58  modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59  {
60  	const struct spi_device	*spi = to_spi_device(dev);
61  	int len;
62  
63  	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64  	if (len != -ENODEV)
65  		return len;
66  
67  	return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68  }
69  static DEVICE_ATTR_RO(modalias);
70  
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)71  static ssize_t driver_override_store(struct device *dev,
72  				     struct device_attribute *a,
73  				     const char *buf, size_t count)
74  {
75  	struct spi_device *spi = to_spi_device(dev);
76  	int ret;
77  
78  	ret = driver_set_override(dev, &spi->driver_override, buf, count);
79  	if (ret)
80  		return ret;
81  
82  	return count;
83  }
84  
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)85  static ssize_t driver_override_show(struct device *dev,
86  				    struct device_attribute *a, char *buf)
87  {
88  	const struct spi_device *spi = to_spi_device(dev);
89  	ssize_t len;
90  
91  	device_lock(dev);
92  	len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
93  	device_unlock(dev);
94  	return len;
95  }
96  static DEVICE_ATTR_RW(driver_override);
97  
spi_alloc_pcpu_stats(struct device * dev)98  static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99  {
100  	struct spi_statistics __percpu *pcpu_stats;
101  
102  	if (dev)
103  		pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104  	else
105  		pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106  
107  	if (pcpu_stats) {
108  		int cpu;
109  
110  		for_each_possible_cpu(cpu) {
111  			struct spi_statistics *stat;
112  
113  			stat = per_cpu_ptr(pcpu_stats, cpu);
114  			u64_stats_init(&stat->syncp);
115  		}
116  	}
117  	return pcpu_stats;
118  }
119  
spi_emit_pcpu_stats(struct spi_statistics __percpu * stat,char * buf,size_t offset)120  static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
121  				   char *buf, size_t offset)
122  {
123  	u64 val = 0;
124  	int i;
125  
126  	for_each_possible_cpu(i) {
127  		const struct spi_statistics *pcpu_stats;
128  		u64_stats_t *field;
129  		unsigned int start;
130  		u64 inc;
131  
132  		pcpu_stats = per_cpu_ptr(stat, i);
133  		field = (void *)pcpu_stats + offset;
134  		do {
135  			start = u64_stats_fetch_begin(&pcpu_stats->syncp);
136  			inc = u64_stats_read(field);
137  		} while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
138  		val += inc;
139  	}
140  	return sysfs_emit(buf, "%llu\n", val);
141  }
142  
143  #define SPI_STATISTICS_ATTRS(field, file)				\
144  static ssize_t spi_controller_##field##_show(struct device *dev,	\
145  					     struct device_attribute *attr, \
146  					     char *buf)			\
147  {									\
148  	struct spi_controller *ctlr = container_of(dev,			\
149  					 struct spi_controller, dev);	\
150  	return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
151  }									\
152  static struct device_attribute dev_attr_spi_controller_##field = {	\
153  	.attr = { .name = file, .mode = 0444 },				\
154  	.show = spi_controller_##field##_show,				\
155  };									\
156  static ssize_t spi_device_##field##_show(struct device *dev,		\
157  					 struct device_attribute *attr,	\
158  					char *buf)			\
159  {									\
160  	struct spi_device *spi = to_spi_device(dev);			\
161  	return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
162  }									\
163  static struct device_attribute dev_attr_spi_device_##field = {		\
164  	.attr = { .name = file, .mode = 0444 },				\
165  	.show = spi_device_##field##_show,				\
166  }
167  
168  #define SPI_STATISTICS_SHOW_NAME(name, file, field)			\
169  static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
170  					    char *buf)			\
171  {									\
172  	return spi_emit_pcpu_stats(stat, buf,				\
173  			offsetof(struct spi_statistics, field));	\
174  }									\
175  SPI_STATISTICS_ATTRS(name, file)
176  
177  #define SPI_STATISTICS_SHOW(field)					\
178  	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
179  				 field)
180  
181  SPI_STATISTICS_SHOW(messages);
182  SPI_STATISTICS_SHOW(transfers);
183  SPI_STATISTICS_SHOW(errors);
184  SPI_STATISTICS_SHOW(timedout);
185  
186  SPI_STATISTICS_SHOW(spi_sync);
187  SPI_STATISTICS_SHOW(spi_sync_immediate);
188  SPI_STATISTICS_SHOW(spi_async);
189  
190  SPI_STATISTICS_SHOW(bytes);
191  SPI_STATISTICS_SHOW(bytes_rx);
192  SPI_STATISTICS_SHOW(bytes_tx);
193  
194  #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
195  	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
196  				 "transfer_bytes_histo_" number,	\
197  				 transfer_bytes_histo[index])
198  SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
199  SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
200  SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
201  SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
202  SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
203  SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
204  SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
205  SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
206  SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
207  SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
208  SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209  SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210  SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211  SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212  SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213  SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
214  SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
215  
216  SPI_STATISTICS_SHOW(transfers_split_maxsize);
217  
218  static struct attribute *spi_dev_attrs[] = {
219  	&dev_attr_modalias.attr,
220  	&dev_attr_driver_override.attr,
221  	NULL,
222  };
223  
224  static const struct attribute_group spi_dev_group = {
225  	.attrs  = spi_dev_attrs,
226  };
227  
228  static struct attribute *spi_device_statistics_attrs[] = {
229  	&dev_attr_spi_device_messages.attr,
230  	&dev_attr_spi_device_transfers.attr,
231  	&dev_attr_spi_device_errors.attr,
232  	&dev_attr_spi_device_timedout.attr,
233  	&dev_attr_spi_device_spi_sync.attr,
234  	&dev_attr_spi_device_spi_sync_immediate.attr,
235  	&dev_attr_spi_device_spi_async.attr,
236  	&dev_attr_spi_device_bytes.attr,
237  	&dev_attr_spi_device_bytes_rx.attr,
238  	&dev_attr_spi_device_bytes_tx.attr,
239  	&dev_attr_spi_device_transfer_bytes_histo0.attr,
240  	&dev_attr_spi_device_transfer_bytes_histo1.attr,
241  	&dev_attr_spi_device_transfer_bytes_histo2.attr,
242  	&dev_attr_spi_device_transfer_bytes_histo3.attr,
243  	&dev_attr_spi_device_transfer_bytes_histo4.attr,
244  	&dev_attr_spi_device_transfer_bytes_histo5.attr,
245  	&dev_attr_spi_device_transfer_bytes_histo6.attr,
246  	&dev_attr_spi_device_transfer_bytes_histo7.attr,
247  	&dev_attr_spi_device_transfer_bytes_histo8.attr,
248  	&dev_attr_spi_device_transfer_bytes_histo9.attr,
249  	&dev_attr_spi_device_transfer_bytes_histo10.attr,
250  	&dev_attr_spi_device_transfer_bytes_histo11.attr,
251  	&dev_attr_spi_device_transfer_bytes_histo12.attr,
252  	&dev_attr_spi_device_transfer_bytes_histo13.attr,
253  	&dev_attr_spi_device_transfer_bytes_histo14.attr,
254  	&dev_attr_spi_device_transfer_bytes_histo15.attr,
255  	&dev_attr_spi_device_transfer_bytes_histo16.attr,
256  	&dev_attr_spi_device_transfers_split_maxsize.attr,
257  	NULL,
258  };
259  
260  static const struct attribute_group spi_device_statistics_group = {
261  	.name  = "statistics",
262  	.attrs  = spi_device_statistics_attrs,
263  };
264  
265  static const struct attribute_group *spi_dev_groups[] = {
266  	&spi_dev_group,
267  	&spi_device_statistics_group,
268  	NULL,
269  };
270  
271  static struct attribute *spi_controller_statistics_attrs[] = {
272  	&dev_attr_spi_controller_messages.attr,
273  	&dev_attr_spi_controller_transfers.attr,
274  	&dev_attr_spi_controller_errors.attr,
275  	&dev_attr_spi_controller_timedout.attr,
276  	&dev_attr_spi_controller_spi_sync.attr,
277  	&dev_attr_spi_controller_spi_sync_immediate.attr,
278  	&dev_attr_spi_controller_spi_async.attr,
279  	&dev_attr_spi_controller_bytes.attr,
280  	&dev_attr_spi_controller_bytes_rx.attr,
281  	&dev_attr_spi_controller_bytes_tx.attr,
282  	&dev_attr_spi_controller_transfer_bytes_histo0.attr,
283  	&dev_attr_spi_controller_transfer_bytes_histo1.attr,
284  	&dev_attr_spi_controller_transfer_bytes_histo2.attr,
285  	&dev_attr_spi_controller_transfer_bytes_histo3.attr,
286  	&dev_attr_spi_controller_transfer_bytes_histo4.attr,
287  	&dev_attr_spi_controller_transfer_bytes_histo5.attr,
288  	&dev_attr_spi_controller_transfer_bytes_histo6.attr,
289  	&dev_attr_spi_controller_transfer_bytes_histo7.attr,
290  	&dev_attr_spi_controller_transfer_bytes_histo8.attr,
291  	&dev_attr_spi_controller_transfer_bytes_histo9.attr,
292  	&dev_attr_spi_controller_transfer_bytes_histo10.attr,
293  	&dev_attr_spi_controller_transfer_bytes_histo11.attr,
294  	&dev_attr_spi_controller_transfer_bytes_histo12.attr,
295  	&dev_attr_spi_controller_transfer_bytes_histo13.attr,
296  	&dev_attr_spi_controller_transfer_bytes_histo14.attr,
297  	&dev_attr_spi_controller_transfer_bytes_histo15.attr,
298  	&dev_attr_spi_controller_transfer_bytes_histo16.attr,
299  	&dev_attr_spi_controller_transfers_split_maxsize.attr,
300  	NULL,
301  };
302  
303  static const struct attribute_group spi_controller_statistics_group = {
304  	.name  = "statistics",
305  	.attrs  = spi_controller_statistics_attrs,
306  };
307  
308  static const struct attribute_group *spi_master_groups[] = {
309  	&spi_controller_statistics_group,
310  	NULL,
311  };
312  
spi_statistics_add_transfer_stats(struct spi_statistics __percpu * pcpu_stats,struct spi_transfer * xfer,struct spi_message * msg)313  static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
314  					      struct spi_transfer *xfer,
315  					      struct spi_message *msg)
316  {
317  	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
318  	struct spi_statistics *stats;
319  
320  	if (l2len < 0)
321  		l2len = 0;
322  
323  	get_cpu();
324  	stats = this_cpu_ptr(pcpu_stats);
325  	u64_stats_update_begin(&stats->syncp);
326  
327  	u64_stats_inc(&stats->transfers);
328  	u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
329  
330  	u64_stats_add(&stats->bytes, xfer->len);
331  	if (spi_valid_txbuf(msg, xfer))
332  		u64_stats_add(&stats->bytes_tx, xfer->len);
333  	if (spi_valid_rxbuf(msg, xfer))
334  		u64_stats_add(&stats->bytes_rx, xfer->len);
335  
336  	u64_stats_update_end(&stats->syncp);
337  	put_cpu();
338  }
339  
340  /*
341   * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
342   * and the sysfs version makes coldplug work too.
343   */
spi_match_id(const struct spi_device_id * id,const char * name)344  static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
345  {
346  	while (id->name[0]) {
347  		if (!strcmp(name, id->name))
348  			return id;
349  		id++;
350  	}
351  	return NULL;
352  }
353  
spi_get_device_id(const struct spi_device * sdev)354  const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
355  {
356  	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
357  
358  	return spi_match_id(sdrv->id_table, sdev->modalias);
359  }
360  EXPORT_SYMBOL_GPL(spi_get_device_id);
361  
spi_get_device_match_data(const struct spi_device * sdev)362  const void *spi_get_device_match_data(const struct spi_device *sdev)
363  {
364  	const void *match;
365  
366  	match = device_get_match_data(&sdev->dev);
367  	if (match)
368  		return match;
369  
370  	return (const void *)spi_get_device_id(sdev)->driver_data;
371  }
372  EXPORT_SYMBOL_GPL(spi_get_device_match_data);
373  
spi_match_device(struct device * dev,const struct device_driver * drv)374  static int spi_match_device(struct device *dev, const struct device_driver *drv)
375  {
376  	const struct spi_device	*spi = to_spi_device(dev);
377  	const struct spi_driver	*sdrv = to_spi_driver(drv);
378  
379  	/* Check override first, and if set, only use the named driver */
380  	if (spi->driver_override)
381  		return strcmp(spi->driver_override, drv->name) == 0;
382  
383  	/* Attempt an OF style match */
384  	if (of_driver_match_device(dev, drv))
385  		return 1;
386  
387  	/* Then try ACPI */
388  	if (acpi_driver_match_device(dev, drv))
389  		return 1;
390  
391  	if (sdrv->id_table)
392  		return !!spi_match_id(sdrv->id_table, spi->modalias);
393  
394  	return strcmp(spi->modalias, drv->name) == 0;
395  }
396  
spi_uevent(const struct device * dev,struct kobj_uevent_env * env)397  static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
398  {
399  	const struct spi_device		*spi = to_spi_device(dev);
400  	int rc;
401  
402  	rc = acpi_device_uevent_modalias(dev, env);
403  	if (rc != -ENODEV)
404  		return rc;
405  
406  	return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
407  }
408  
spi_probe(struct device * dev)409  static int spi_probe(struct device *dev)
410  {
411  	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
412  	struct spi_device		*spi = to_spi_device(dev);
413  	int ret;
414  
415  	ret = of_clk_set_defaults(dev->of_node, false);
416  	if (ret)
417  		return ret;
418  
419  	if (dev->of_node) {
420  		spi->irq = of_irq_get(dev->of_node, 0);
421  		if (spi->irq == -EPROBE_DEFER)
422  			return -EPROBE_DEFER;
423  		if (spi->irq < 0)
424  			spi->irq = 0;
425  	}
426  
427  	ret = dev_pm_domain_attach(dev, true);
428  	if (ret)
429  		return ret;
430  
431  	if (sdrv->probe) {
432  		ret = sdrv->probe(spi);
433  		if (ret)
434  			dev_pm_domain_detach(dev, true);
435  	}
436  
437  	return ret;
438  }
439  
spi_remove(struct device * dev)440  static void spi_remove(struct device *dev)
441  {
442  	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
443  
444  	if (sdrv->remove)
445  		sdrv->remove(to_spi_device(dev));
446  
447  	dev_pm_domain_detach(dev, true);
448  }
449  
spi_shutdown(struct device * dev)450  static void spi_shutdown(struct device *dev)
451  {
452  	if (dev->driver) {
453  		const struct spi_driver	*sdrv = to_spi_driver(dev->driver);
454  
455  		if (sdrv->shutdown)
456  			sdrv->shutdown(to_spi_device(dev));
457  	}
458  }
459  
460  const struct bus_type spi_bus_type = {
461  	.name		= "spi",
462  	.dev_groups	= spi_dev_groups,
463  	.match		= spi_match_device,
464  	.uevent		= spi_uevent,
465  	.probe		= spi_probe,
466  	.remove		= spi_remove,
467  	.shutdown	= spi_shutdown,
468  };
469  EXPORT_SYMBOL_GPL(spi_bus_type);
470  
471  /**
472   * __spi_register_driver - register a SPI driver
473   * @owner: owner module of the driver to register
474   * @sdrv: the driver to register
475   * Context: can sleep
476   *
477   * Return: zero on success, else a negative error code.
478   */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)479  int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
480  {
481  	sdrv->driver.owner = owner;
482  	sdrv->driver.bus = &spi_bus_type;
483  
484  	/*
485  	 * For Really Good Reasons we use spi: modaliases not of:
486  	 * modaliases for DT so module autoloading won't work if we
487  	 * don't have a spi_device_id as well as a compatible string.
488  	 */
489  	if (sdrv->driver.of_match_table) {
490  		const struct of_device_id *of_id;
491  
492  		for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
493  		     of_id++) {
494  			const char *of_name;
495  
496  			/* Strip off any vendor prefix */
497  			of_name = strnchr(of_id->compatible,
498  					  sizeof(of_id->compatible), ',');
499  			if (of_name)
500  				of_name++;
501  			else
502  				of_name = of_id->compatible;
503  
504  			if (sdrv->id_table) {
505  				const struct spi_device_id *spi_id;
506  
507  				spi_id = spi_match_id(sdrv->id_table, of_name);
508  				if (spi_id)
509  					continue;
510  			} else {
511  				if (strcmp(sdrv->driver.name, of_name) == 0)
512  					continue;
513  			}
514  
515  			pr_warn("SPI driver %s has no spi_device_id for %s\n",
516  				sdrv->driver.name, of_id->compatible);
517  		}
518  	}
519  
520  	return driver_register(&sdrv->driver);
521  }
522  EXPORT_SYMBOL_GPL(__spi_register_driver);
523  
524  /*-------------------------------------------------------------------------*/
525  
526  /*
527   * SPI devices should normally not be created by SPI device drivers; that
528   * would make them board-specific.  Similarly with SPI controller drivers.
529   * Device registration normally goes into like arch/.../mach.../board-YYY.c
530   * with other readonly (flashable) information about mainboard devices.
531   */
532  
533  struct boardinfo {
534  	struct list_head	list;
535  	struct spi_board_info	board_info;
536  };
537  
538  static LIST_HEAD(board_list);
539  static LIST_HEAD(spi_controller_list);
540  
541  /*
542   * Used to protect add/del operation for board_info list and
543   * spi_controller list, and their matching process also used
544   * to protect object of type struct idr.
545   */
546  static DEFINE_MUTEX(board_lock);
547  
548  /**
549   * spi_alloc_device - Allocate a new SPI device
550   * @ctlr: Controller to which device is connected
551   * Context: can sleep
552   *
553   * Allows a driver to allocate and initialize a spi_device without
554   * registering it immediately.  This allows a driver to directly
555   * fill the spi_device with device parameters before calling
556   * spi_add_device() on it.
557   *
558   * Caller is responsible to call spi_add_device() on the returned
559   * spi_device structure to add it to the SPI controller.  If the caller
560   * needs to discard the spi_device without adding it, then it should
561   * call spi_dev_put() on it.
562   *
563   * Return: a pointer to the new device, or NULL.
564   */
spi_alloc_device(struct spi_controller * ctlr)565  struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
566  {
567  	struct spi_device	*spi;
568  
569  	if (!spi_controller_get(ctlr))
570  		return NULL;
571  
572  	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
573  	if (!spi) {
574  		spi_controller_put(ctlr);
575  		return NULL;
576  	}
577  
578  	spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
579  	if (!spi->pcpu_statistics) {
580  		kfree(spi);
581  		spi_controller_put(ctlr);
582  		return NULL;
583  	}
584  
585  	spi->controller = ctlr;
586  	spi->dev.parent = &ctlr->dev;
587  	spi->dev.bus = &spi_bus_type;
588  	spi->dev.release = spidev_release;
589  	spi->mode = ctlr->buswidth_override_bits;
590  
591  	device_initialize(&spi->dev);
592  	return spi;
593  }
594  EXPORT_SYMBOL_GPL(spi_alloc_device);
595  
spi_dev_set_name(struct spi_device * spi)596  static void spi_dev_set_name(struct spi_device *spi)
597  {
598  	struct device *dev = &spi->dev;
599  	struct fwnode_handle *fwnode = dev_fwnode(dev);
600  
601  	if (is_acpi_device_node(fwnode)) {
602  		dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
603  		return;
604  	}
605  
606  	if (is_software_node(fwnode)) {
607  		dev_set_name(dev, "spi-%pfwP", fwnode);
608  		return;
609  	}
610  
611  	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
612  		     spi_get_chipselect(spi, 0));
613  }
614  
615  /*
616   * Zero(0) is a valid physical CS value and can be located at any
617   * logical CS in the spi->chip_select[]. If all the physical CS
618   * are initialized to 0 then It would be difficult to differentiate
619   * between a valid physical CS 0 & an unused logical CS whose physical
620   * CS can be 0. As a solution to this issue initialize all the CS to -1.
621   * Now all the unused logical CS will have -1 physical CS value & can be
622   * ignored while performing physical CS validity checks.
623   */
624  #define SPI_INVALID_CS		((s8)-1)
625  
is_valid_cs(s8 chip_select)626  static inline bool is_valid_cs(s8 chip_select)
627  {
628  	return chip_select != SPI_INVALID_CS;
629  }
630  
spi_dev_check_cs(struct device * dev,struct spi_device * spi,u8 idx,struct spi_device * new_spi,u8 new_idx)631  static inline int spi_dev_check_cs(struct device *dev,
632  				   struct spi_device *spi, u8 idx,
633  				   struct spi_device *new_spi, u8 new_idx)
634  {
635  	u8 cs, cs_new;
636  	u8 idx_new;
637  
638  	cs = spi_get_chipselect(spi, idx);
639  	for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
640  		cs_new = spi_get_chipselect(new_spi, idx_new);
641  		if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
642  			dev_err(dev, "chipselect %u already in use\n", cs_new);
643  			return -EBUSY;
644  		}
645  	}
646  	return 0;
647  }
648  
spi_dev_check(struct device * dev,void * data)649  static int spi_dev_check(struct device *dev, void *data)
650  {
651  	struct spi_device *spi = to_spi_device(dev);
652  	struct spi_device *new_spi = data;
653  	int status, idx;
654  
655  	if (spi->controller == new_spi->controller) {
656  		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
657  			status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
658  			if (status)
659  				return status;
660  		}
661  	}
662  	return 0;
663  }
664  
spi_cleanup(struct spi_device * spi)665  static void spi_cleanup(struct spi_device *spi)
666  {
667  	if (spi->controller->cleanup)
668  		spi->controller->cleanup(spi);
669  }
670  
__spi_add_device(struct spi_device * spi)671  static int __spi_add_device(struct spi_device *spi)
672  {
673  	struct spi_controller *ctlr = spi->controller;
674  	struct device *dev = ctlr->dev.parent;
675  	int status, idx;
676  	u8 cs;
677  
678  	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
679  		/* Chipselects are numbered 0..max; validate. */
680  		cs = spi_get_chipselect(spi, idx);
681  		if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
682  			dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
683  				ctlr->num_chipselect);
684  			return -EINVAL;
685  		}
686  	}
687  
688  	/*
689  	 * Make sure that multiple logical CS doesn't map to the same physical CS.
690  	 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
691  	 */
692  	if (!spi_controller_is_target(ctlr)) {
693  		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
694  			status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
695  			if (status)
696  				return status;
697  		}
698  	}
699  
700  	/* Set the bus ID string */
701  	spi_dev_set_name(spi);
702  
703  	/*
704  	 * We need to make sure there's no other device with this
705  	 * chipselect **BEFORE** we call setup(), else we'll trash
706  	 * its configuration.
707  	 */
708  	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
709  	if (status)
710  		return status;
711  
712  	/* Controller may unregister concurrently */
713  	if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
714  	    !device_is_registered(&ctlr->dev)) {
715  		return -ENODEV;
716  	}
717  
718  	if (ctlr->cs_gpiods) {
719  		u8 cs;
720  
721  		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
722  			cs = spi_get_chipselect(spi, idx);
723  			if (is_valid_cs(cs))
724  				spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
725  		}
726  	}
727  
728  	/*
729  	 * Drivers may modify this initial i/o setup, but will
730  	 * normally rely on the device being setup.  Devices
731  	 * using SPI_CS_HIGH can't coexist well otherwise...
732  	 */
733  	status = spi_setup(spi);
734  	if (status < 0) {
735  		dev_err(dev, "can't setup %s, status %d\n",
736  				dev_name(&spi->dev), status);
737  		return status;
738  	}
739  
740  	/* Device may be bound to an active driver when this returns */
741  	status = device_add(&spi->dev);
742  	if (status < 0) {
743  		dev_err(dev, "can't add %s, status %d\n",
744  				dev_name(&spi->dev), status);
745  		spi_cleanup(spi);
746  	} else {
747  		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
748  	}
749  
750  	return status;
751  }
752  
753  /**
754   * spi_add_device - Add spi_device allocated with spi_alloc_device
755   * @spi: spi_device to register
756   *
757   * Companion function to spi_alloc_device.  Devices allocated with
758   * spi_alloc_device can be added onto the SPI bus with this function.
759   *
760   * Return: 0 on success; negative errno on failure
761   */
spi_add_device(struct spi_device * spi)762  int spi_add_device(struct spi_device *spi)
763  {
764  	struct spi_controller *ctlr = spi->controller;
765  	int status;
766  
767  	/* Set the bus ID string */
768  	spi_dev_set_name(spi);
769  
770  	mutex_lock(&ctlr->add_lock);
771  	status = __spi_add_device(spi);
772  	mutex_unlock(&ctlr->add_lock);
773  	return status;
774  }
775  EXPORT_SYMBOL_GPL(spi_add_device);
776  
spi_set_all_cs_unused(struct spi_device * spi)777  static void spi_set_all_cs_unused(struct spi_device *spi)
778  {
779  	u8 idx;
780  
781  	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
782  		spi_set_chipselect(spi, idx, SPI_INVALID_CS);
783  }
784  
785  /**
786   * spi_new_device - instantiate one new SPI device
787   * @ctlr: Controller to which device is connected
788   * @chip: Describes the SPI device
789   * Context: can sleep
790   *
791   * On typical mainboards, this is purely internal; and it's not needed
792   * after board init creates the hard-wired devices.  Some development
793   * platforms may not be able to use spi_register_board_info though, and
794   * this is exported so that for example a USB or parport based adapter
795   * driver could add devices (which it would learn about out-of-band).
796   *
797   * Return: the new device, or NULL.
798   */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)799  struct spi_device *spi_new_device(struct spi_controller *ctlr,
800  				  struct spi_board_info *chip)
801  {
802  	struct spi_device	*proxy;
803  	int			status;
804  
805  	/*
806  	 * NOTE:  caller did any chip->bus_num checks necessary.
807  	 *
808  	 * Also, unless we change the return value convention to use
809  	 * error-or-pointer (not NULL-or-pointer), troubleshootability
810  	 * suggests syslogged diagnostics are best here (ugh).
811  	 */
812  
813  	proxy = spi_alloc_device(ctlr);
814  	if (!proxy)
815  		return NULL;
816  
817  	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
818  
819  	/* Use provided chip-select for proxy device */
820  	spi_set_all_cs_unused(proxy);
821  	spi_set_chipselect(proxy, 0, chip->chip_select);
822  
823  	proxy->max_speed_hz = chip->max_speed_hz;
824  	proxy->mode = chip->mode;
825  	proxy->irq = chip->irq;
826  	strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
827  	proxy->dev.platform_data = (void *) chip->platform_data;
828  	proxy->controller_data = chip->controller_data;
829  	proxy->controller_state = NULL;
830  	/*
831  	 * By default spi->chip_select[0] will hold the physical CS number,
832  	 * so set bit 0 in spi->cs_index_mask.
833  	 */
834  	proxy->cs_index_mask = BIT(0);
835  
836  	if (chip->swnode) {
837  		status = device_add_software_node(&proxy->dev, chip->swnode);
838  		if (status) {
839  			dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
840  				chip->modalias, status);
841  			goto err_dev_put;
842  		}
843  	}
844  
845  	status = spi_add_device(proxy);
846  	if (status < 0)
847  		goto err_dev_put;
848  
849  	return proxy;
850  
851  err_dev_put:
852  	device_remove_software_node(&proxy->dev);
853  	spi_dev_put(proxy);
854  	return NULL;
855  }
856  EXPORT_SYMBOL_GPL(spi_new_device);
857  
858  /**
859   * spi_unregister_device - unregister a single SPI device
860   * @spi: spi_device to unregister
861   *
862   * Start making the passed SPI device vanish. Normally this would be handled
863   * by spi_unregister_controller().
864   */
spi_unregister_device(struct spi_device * spi)865  void spi_unregister_device(struct spi_device *spi)
866  {
867  	if (!spi)
868  		return;
869  
870  	if (spi->dev.of_node) {
871  		of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
872  		of_node_put(spi->dev.of_node);
873  	}
874  	if (ACPI_COMPANION(&spi->dev))
875  		acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
876  	device_remove_software_node(&spi->dev);
877  	device_del(&spi->dev);
878  	spi_cleanup(spi);
879  	put_device(&spi->dev);
880  }
881  EXPORT_SYMBOL_GPL(spi_unregister_device);
882  
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)883  static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
884  					      struct spi_board_info *bi)
885  {
886  	struct spi_device *dev;
887  
888  	if (ctlr->bus_num != bi->bus_num)
889  		return;
890  
891  	dev = spi_new_device(ctlr, bi);
892  	if (!dev)
893  		dev_err(ctlr->dev.parent, "can't create new device for %s\n",
894  			bi->modalias);
895  }
896  
897  /**
898   * spi_register_board_info - register SPI devices for a given board
899   * @info: array of chip descriptors
900   * @n: how many descriptors are provided
901   * Context: can sleep
902   *
903   * Board-specific early init code calls this (probably during arch_initcall)
904   * with segments of the SPI device table.  Any device nodes are created later,
905   * after the relevant parent SPI controller (bus_num) is defined.  We keep
906   * this table of devices forever, so that reloading a controller driver will
907   * not make Linux forget about these hard-wired devices.
908   *
909   * Other code can also call this, e.g. a particular add-on board might provide
910   * SPI devices through its expansion connector, so code initializing that board
911   * would naturally declare its SPI devices.
912   *
913   * The board info passed can safely be __initdata ... but be careful of
914   * any embedded pointers (platform_data, etc), they're copied as-is.
915   *
916   * Return: zero on success, else a negative error code.
917   */
spi_register_board_info(struct spi_board_info const * info,unsigned n)918  int spi_register_board_info(struct spi_board_info const *info, unsigned n)
919  {
920  	struct boardinfo *bi;
921  	int i;
922  
923  	if (!n)
924  		return 0;
925  
926  	bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
927  	if (!bi)
928  		return -ENOMEM;
929  
930  	for (i = 0; i < n; i++, bi++, info++) {
931  		struct spi_controller *ctlr;
932  
933  		memcpy(&bi->board_info, info, sizeof(*info));
934  
935  		mutex_lock(&board_lock);
936  		list_add_tail(&bi->list, &board_list);
937  		list_for_each_entry(ctlr, &spi_controller_list, list)
938  			spi_match_controller_to_boardinfo(ctlr,
939  							  &bi->board_info);
940  		mutex_unlock(&board_lock);
941  	}
942  
943  	return 0;
944  }
945  
946  /*-------------------------------------------------------------------------*/
947  
948  /* Core methods for SPI resource management */
949  
950  /**
951   * spi_res_alloc - allocate a spi resource that is life-cycle managed
952   *                 during the processing of a spi_message while using
953   *                 spi_transfer_one
954   * @spi:     the SPI device for which we allocate memory
955   * @release: the release code to execute for this resource
956   * @size:    size to alloc and return
957   * @gfp:     GFP allocation flags
958   *
959   * Return: the pointer to the allocated data
960   *
961   * This may get enhanced in the future to allocate from a memory pool
962   * of the @spi_device or @spi_controller to avoid repeated allocations.
963   */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)964  static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
965  			   size_t size, gfp_t gfp)
966  {
967  	struct spi_res *sres;
968  
969  	sres = kzalloc(sizeof(*sres) + size, gfp);
970  	if (!sres)
971  		return NULL;
972  
973  	INIT_LIST_HEAD(&sres->entry);
974  	sres->release = release;
975  
976  	return sres->data;
977  }
978  
979  /**
980   * spi_res_free - free an SPI resource
981   * @res: pointer to the custom data of a resource
982   */
spi_res_free(void * res)983  static void spi_res_free(void *res)
984  {
985  	struct spi_res *sres = container_of(res, struct spi_res, data);
986  
987  	if (!res)
988  		return;
989  
990  	WARN_ON(!list_empty(&sres->entry));
991  	kfree(sres);
992  }
993  
994  /**
995   * spi_res_add - add a spi_res to the spi_message
996   * @message: the SPI message
997   * @res:     the spi_resource
998   */
spi_res_add(struct spi_message * message,void * res)999  static void spi_res_add(struct spi_message *message, void *res)
1000  {
1001  	struct spi_res *sres = container_of(res, struct spi_res, data);
1002  
1003  	WARN_ON(!list_empty(&sres->entry));
1004  	list_add_tail(&sres->entry, &message->resources);
1005  }
1006  
1007  /**
1008   * spi_res_release - release all SPI resources for this message
1009   * @ctlr:  the @spi_controller
1010   * @message: the @spi_message
1011   */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)1012  static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
1013  {
1014  	struct spi_res *res, *tmp;
1015  
1016  	list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1017  		if (res->release)
1018  			res->release(ctlr, message, res->data);
1019  
1020  		list_del(&res->entry);
1021  
1022  		kfree(res);
1023  	}
1024  }
1025  
1026  /*-------------------------------------------------------------------------*/
1027  #define spi_for_each_valid_cs(spi, idx)				\
1028  	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)		\
1029  		if (!(spi->cs_index_mask & BIT(idx))) {} else
1030  
spi_is_last_cs(struct spi_device * spi)1031  static inline bool spi_is_last_cs(struct spi_device *spi)
1032  {
1033  	u8 idx;
1034  	bool last = false;
1035  
1036  	spi_for_each_valid_cs(spi, idx) {
1037  		if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1038  			last = true;
1039  	}
1040  	return last;
1041  }
1042  
spi_toggle_csgpiod(struct spi_device * spi,u8 idx,bool enable,bool activate)1043  static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
1044  {
1045  	/*
1046  	 * Historically ACPI has no means of the GPIO polarity and
1047  	 * thus the SPISerialBus() resource defines it on the per-chip
1048  	 * basis. In order to avoid a chain of negations, the GPIO
1049  	 * polarity is considered being Active High. Even for the cases
1050  	 * when _DSD() is involved (in the updated versions of ACPI)
1051  	 * the GPIO CS polarity must be defined Active High to avoid
1052  	 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1053  	 * into account.
1054  	 */
1055  	if (has_acpi_companion(&spi->dev))
1056  		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
1057  	else
1058  		/* Polarity handled by GPIO library */
1059  		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
1060  
1061  	if (activate)
1062  		spi_delay_exec(&spi->cs_setup, NULL);
1063  	else
1064  		spi_delay_exec(&spi->cs_inactive, NULL);
1065  }
1066  
spi_set_cs(struct spi_device * spi,bool enable,bool force)1067  static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1068  {
1069  	bool activate = enable;
1070  	u8 idx;
1071  
1072  	/*
1073  	 * Avoid calling into the driver (or doing delays) if the chip select
1074  	 * isn't actually changing from the last time this was called.
1075  	 */
1076  	if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1077  			spi_is_last_cs(spi)) ||
1078  		       (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1079  			!spi_is_last_cs(spi))) &&
1080  	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1081  		return;
1082  
1083  	trace_spi_set_cs(spi, activate);
1084  
1085  	spi->controller->last_cs_index_mask = spi->cs_index_mask;
1086  	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
1087  		spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
1088  	spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1089  
1090  	if (spi->mode & SPI_CS_HIGH)
1091  		enable = !enable;
1092  
1093  	/*
1094  	 * Handle chip select delays for GPIO based CS or controllers without
1095  	 * programmable chip select timing.
1096  	 */
1097  	if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1098  		spi_delay_exec(&spi->cs_hold, NULL);
1099  
1100  	if (spi_is_csgpiod(spi)) {
1101  		if (!(spi->mode & SPI_NO_CS)) {
1102  			spi_for_each_valid_cs(spi, idx) {
1103  				if (spi_get_csgpiod(spi, idx))
1104  					spi_toggle_csgpiod(spi, idx, enable, activate);
1105  			}
1106  		}
1107  		/* Some SPI masters need both GPIO CS & slave_select */
1108  		if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1109  		    spi->controller->set_cs)
1110  			spi->controller->set_cs(spi, !enable);
1111  	} else if (spi->controller->set_cs) {
1112  		spi->controller->set_cs(spi, !enable);
1113  	}
1114  
1115  	if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1116  		if (activate)
1117  			spi_delay_exec(&spi->cs_setup, NULL);
1118  		else
1119  			spi_delay_exec(&spi->cs_inactive, NULL);
1120  	}
1121  }
1122  
1123  #ifdef CONFIG_HAS_DMA
spi_map_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir,unsigned long attrs)1124  static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1125  			     struct sg_table *sgt, void *buf, size_t len,
1126  			     enum dma_data_direction dir, unsigned long attrs)
1127  {
1128  	const bool vmalloced_buf = is_vmalloc_addr(buf);
1129  	unsigned int max_seg_size = dma_get_max_seg_size(dev);
1130  #ifdef CONFIG_HIGHMEM
1131  	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1132  				(unsigned long)buf < (PKMAP_BASE +
1133  					(LAST_PKMAP * PAGE_SIZE)));
1134  #else
1135  	const bool kmap_buf = false;
1136  #endif
1137  	int desc_len;
1138  	int sgs;
1139  	struct page *vm_page;
1140  	struct scatterlist *sg;
1141  	void *sg_buf;
1142  	size_t min;
1143  	int i, ret;
1144  
1145  	if (vmalloced_buf || kmap_buf) {
1146  		desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1147  		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1148  	} else if (virt_addr_valid(buf)) {
1149  		desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1150  		sgs = DIV_ROUND_UP(len, desc_len);
1151  	} else {
1152  		return -EINVAL;
1153  	}
1154  
1155  	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1156  	if (ret != 0)
1157  		return ret;
1158  
1159  	sg = &sgt->sgl[0];
1160  	for (i = 0; i < sgs; i++) {
1161  
1162  		if (vmalloced_buf || kmap_buf) {
1163  			/*
1164  			 * Next scatterlist entry size is the minimum between
1165  			 * the desc_len and the remaining buffer length that
1166  			 * fits in a page.
1167  			 */
1168  			min = min_t(size_t, desc_len,
1169  				    min_t(size_t, len,
1170  					  PAGE_SIZE - offset_in_page(buf)));
1171  			if (vmalloced_buf)
1172  				vm_page = vmalloc_to_page(buf);
1173  			else
1174  				vm_page = kmap_to_page(buf);
1175  			if (!vm_page) {
1176  				sg_free_table(sgt);
1177  				return -ENOMEM;
1178  			}
1179  			sg_set_page(sg, vm_page,
1180  				    min, offset_in_page(buf));
1181  		} else {
1182  			min = min_t(size_t, len, desc_len);
1183  			sg_buf = buf;
1184  			sg_set_buf(sg, sg_buf, min);
1185  		}
1186  
1187  		buf += min;
1188  		len -= min;
1189  		sg = sg_next(sg);
1190  	}
1191  
1192  	ret = dma_map_sgtable(dev, sgt, dir, attrs);
1193  	if (ret < 0) {
1194  		sg_free_table(sgt);
1195  		return ret;
1196  	}
1197  
1198  	return 0;
1199  }
1200  
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)1201  int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1202  		struct sg_table *sgt, void *buf, size_t len,
1203  		enum dma_data_direction dir)
1204  {
1205  	return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1206  }
1207  
spi_unmap_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)1208  static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1209  				struct device *dev, struct sg_table *sgt,
1210  				enum dma_data_direction dir,
1211  				unsigned long attrs)
1212  {
1213  	dma_unmap_sgtable(dev, sgt, dir, attrs);
1214  	sg_free_table(sgt);
1215  	sgt->orig_nents = 0;
1216  	sgt->nents = 0;
1217  }
1218  
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1219  void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1220  		   struct sg_table *sgt, enum dma_data_direction dir)
1221  {
1222  	spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1223  }
1224  
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1225  static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1226  {
1227  	struct device *tx_dev, *rx_dev;
1228  	struct spi_transfer *xfer;
1229  	int ret;
1230  
1231  	if (!ctlr->can_dma)
1232  		return 0;
1233  
1234  	if (ctlr->dma_tx)
1235  		tx_dev = ctlr->dma_tx->device->dev;
1236  	else if (ctlr->dma_map_dev)
1237  		tx_dev = ctlr->dma_map_dev;
1238  	else
1239  		tx_dev = ctlr->dev.parent;
1240  
1241  	if (ctlr->dma_rx)
1242  		rx_dev = ctlr->dma_rx->device->dev;
1243  	else if (ctlr->dma_map_dev)
1244  		rx_dev = ctlr->dma_map_dev;
1245  	else
1246  		rx_dev = ctlr->dev.parent;
1247  
1248  	ret = -ENOMSG;
1249  	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1250  		/* The sync is done before each transfer. */
1251  		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1252  
1253  		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1254  			continue;
1255  
1256  		if (xfer->tx_buf != NULL) {
1257  			ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1258  						(void *)xfer->tx_buf,
1259  						xfer->len, DMA_TO_DEVICE,
1260  						attrs);
1261  			if (ret != 0)
1262  				return ret;
1263  
1264  			xfer->tx_sg_mapped = true;
1265  		}
1266  
1267  		if (xfer->rx_buf != NULL) {
1268  			ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1269  						xfer->rx_buf, xfer->len,
1270  						DMA_FROM_DEVICE, attrs);
1271  			if (ret != 0) {
1272  				spi_unmap_buf_attrs(ctlr, tx_dev,
1273  						&xfer->tx_sg, DMA_TO_DEVICE,
1274  						attrs);
1275  
1276  				return ret;
1277  			}
1278  
1279  			xfer->rx_sg_mapped = true;
1280  		}
1281  	}
1282  	/* No transfer has been mapped, bail out with success */
1283  	if (ret)
1284  		return 0;
1285  
1286  	ctlr->cur_rx_dma_dev = rx_dev;
1287  	ctlr->cur_tx_dma_dev = tx_dev;
1288  
1289  	return 0;
1290  }
1291  
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1292  static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1293  {
1294  	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1295  	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1296  	struct spi_transfer *xfer;
1297  
1298  	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1299  		/* The sync has already been done after each transfer. */
1300  		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1301  
1302  		if (xfer->rx_sg_mapped)
1303  			spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1304  					    DMA_FROM_DEVICE, attrs);
1305  		xfer->rx_sg_mapped = false;
1306  
1307  		if (xfer->tx_sg_mapped)
1308  			spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1309  					    DMA_TO_DEVICE, attrs);
1310  		xfer->tx_sg_mapped = false;
1311  	}
1312  
1313  	return 0;
1314  }
1315  
spi_dma_sync_for_device(struct spi_controller * ctlr,struct spi_transfer * xfer)1316  static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1317  				    struct spi_transfer *xfer)
1318  {
1319  	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1320  	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1321  
1322  	if (xfer->tx_sg_mapped)
1323  		dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1324  	if (xfer->rx_sg_mapped)
1325  		dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1326  }
1327  
spi_dma_sync_for_cpu(struct spi_controller * ctlr,struct spi_transfer * xfer)1328  static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1329  				 struct spi_transfer *xfer)
1330  {
1331  	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1332  	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1333  
1334  	if (xfer->rx_sg_mapped)
1335  		dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1336  	if (xfer->tx_sg_mapped)
1337  		dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1338  }
1339  #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1340  static inline int __spi_map_msg(struct spi_controller *ctlr,
1341  				struct spi_message *msg)
1342  {
1343  	return 0;
1344  }
1345  
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1346  static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1347  				  struct spi_message *msg)
1348  {
1349  	return 0;
1350  }
1351  
spi_dma_sync_for_device(struct spi_controller * ctrl,struct spi_transfer * xfer)1352  static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1353  				    struct spi_transfer *xfer)
1354  {
1355  }
1356  
spi_dma_sync_for_cpu(struct spi_controller * ctrl,struct spi_transfer * xfer)1357  static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1358  				 struct spi_transfer *xfer)
1359  {
1360  }
1361  #endif /* !CONFIG_HAS_DMA */
1362  
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1363  static inline int spi_unmap_msg(struct spi_controller *ctlr,
1364  				struct spi_message *msg)
1365  {
1366  	struct spi_transfer *xfer;
1367  
1368  	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1369  		/*
1370  		 * Restore the original value of tx_buf or rx_buf if they are
1371  		 * NULL.
1372  		 */
1373  		if (xfer->tx_buf == ctlr->dummy_tx)
1374  			xfer->tx_buf = NULL;
1375  		if (xfer->rx_buf == ctlr->dummy_rx)
1376  			xfer->rx_buf = NULL;
1377  	}
1378  
1379  	return __spi_unmap_msg(ctlr, msg);
1380  }
1381  
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1382  static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1383  {
1384  	struct spi_transfer *xfer;
1385  	void *tmp;
1386  	unsigned int max_tx, max_rx;
1387  
1388  	if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1389  		&& !(msg->spi->mode & SPI_3WIRE)) {
1390  		max_tx = 0;
1391  		max_rx = 0;
1392  
1393  		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1394  			if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1395  			    !xfer->tx_buf)
1396  				max_tx = max(xfer->len, max_tx);
1397  			if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1398  			    !xfer->rx_buf)
1399  				max_rx = max(xfer->len, max_rx);
1400  		}
1401  
1402  		if (max_tx) {
1403  			tmp = krealloc(ctlr->dummy_tx, max_tx,
1404  				       GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1405  			if (!tmp)
1406  				return -ENOMEM;
1407  			ctlr->dummy_tx = tmp;
1408  		}
1409  
1410  		if (max_rx) {
1411  			tmp = krealloc(ctlr->dummy_rx, max_rx,
1412  				       GFP_KERNEL | GFP_DMA);
1413  			if (!tmp)
1414  				return -ENOMEM;
1415  			ctlr->dummy_rx = tmp;
1416  		}
1417  
1418  		if (max_tx || max_rx) {
1419  			list_for_each_entry(xfer, &msg->transfers,
1420  					    transfer_list) {
1421  				if (!xfer->len)
1422  					continue;
1423  				if (!xfer->tx_buf)
1424  					xfer->tx_buf = ctlr->dummy_tx;
1425  				if (!xfer->rx_buf)
1426  					xfer->rx_buf = ctlr->dummy_rx;
1427  			}
1428  		}
1429  	}
1430  
1431  	return __spi_map_msg(ctlr, msg);
1432  }
1433  
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1434  static int spi_transfer_wait(struct spi_controller *ctlr,
1435  			     struct spi_message *msg,
1436  			     struct spi_transfer *xfer)
1437  {
1438  	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1439  	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1440  	u32 speed_hz = xfer->speed_hz;
1441  	unsigned long long ms;
1442  
1443  	if (spi_controller_is_target(ctlr)) {
1444  		if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1445  			dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1446  			return -EINTR;
1447  		}
1448  	} else {
1449  		if (!speed_hz)
1450  			speed_hz = 100000;
1451  
1452  		/*
1453  		 * For each byte we wait for 8 cycles of the SPI clock.
1454  		 * Since speed is defined in Hz and we want milliseconds,
1455  		 * use respective multiplier, but before the division,
1456  		 * otherwise we may get 0 for short transfers.
1457  		 */
1458  		ms = 8LL * MSEC_PER_SEC * xfer->len;
1459  		do_div(ms, speed_hz);
1460  
1461  		/*
1462  		 * Increase it twice and add 200 ms tolerance, use
1463  		 * predefined maximum in case of overflow.
1464  		 */
1465  		ms += ms + 200;
1466  		if (ms > UINT_MAX)
1467  			ms = UINT_MAX;
1468  
1469  		ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1470  						 msecs_to_jiffies(ms));
1471  
1472  		if (ms == 0) {
1473  			SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1474  			SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1475  			dev_err(&msg->spi->dev,
1476  				"SPI transfer timed out\n");
1477  			return -ETIMEDOUT;
1478  		}
1479  
1480  		if (xfer->error & SPI_TRANS_FAIL_IO)
1481  			return -EIO;
1482  	}
1483  
1484  	return 0;
1485  }
1486  
_spi_transfer_delay_ns(u32 ns)1487  static void _spi_transfer_delay_ns(u32 ns)
1488  {
1489  	if (!ns)
1490  		return;
1491  	if (ns <= NSEC_PER_USEC) {
1492  		ndelay(ns);
1493  	} else {
1494  		u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1495  
1496  		if (us <= 10)
1497  			udelay(us);
1498  		else
1499  			usleep_range(us, us + DIV_ROUND_UP(us, 10));
1500  	}
1501  }
1502  
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1503  int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1504  {
1505  	u32 delay = _delay->value;
1506  	u32 unit = _delay->unit;
1507  	u32 hz;
1508  
1509  	if (!delay)
1510  		return 0;
1511  
1512  	switch (unit) {
1513  	case SPI_DELAY_UNIT_USECS:
1514  		delay *= NSEC_PER_USEC;
1515  		break;
1516  	case SPI_DELAY_UNIT_NSECS:
1517  		/* Nothing to do here */
1518  		break;
1519  	case SPI_DELAY_UNIT_SCK:
1520  		/* Clock cycles need to be obtained from spi_transfer */
1521  		if (!xfer)
1522  			return -EINVAL;
1523  		/*
1524  		 * If there is unknown effective speed, approximate it
1525  		 * by underestimating with half of the requested Hz.
1526  		 */
1527  		hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1528  		if (!hz)
1529  			return -EINVAL;
1530  
1531  		/* Convert delay to nanoseconds */
1532  		delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1533  		break;
1534  	default:
1535  		return -EINVAL;
1536  	}
1537  
1538  	return delay;
1539  }
1540  EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1541  
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1542  int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1543  {
1544  	int delay;
1545  
1546  	might_sleep();
1547  
1548  	if (!_delay)
1549  		return -EINVAL;
1550  
1551  	delay = spi_delay_to_ns(_delay, xfer);
1552  	if (delay < 0)
1553  		return delay;
1554  
1555  	_spi_transfer_delay_ns(delay);
1556  
1557  	return 0;
1558  }
1559  EXPORT_SYMBOL_GPL(spi_delay_exec);
1560  
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1561  static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1562  					  struct spi_transfer *xfer)
1563  {
1564  	u32 default_delay_ns = 10 * NSEC_PER_USEC;
1565  	u32 delay = xfer->cs_change_delay.value;
1566  	u32 unit = xfer->cs_change_delay.unit;
1567  	int ret;
1568  
1569  	/* Return early on "fast" mode - for everything but USECS */
1570  	if (!delay) {
1571  		if (unit == SPI_DELAY_UNIT_USECS)
1572  			_spi_transfer_delay_ns(default_delay_ns);
1573  		return;
1574  	}
1575  
1576  	ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1577  	if (ret) {
1578  		dev_err_once(&msg->spi->dev,
1579  			     "Use of unsupported delay unit %i, using default of %luus\n",
1580  			     unit, default_delay_ns / NSEC_PER_USEC);
1581  		_spi_transfer_delay_ns(default_delay_ns);
1582  	}
1583  }
1584  
spi_transfer_cs_change_delay_exec(struct spi_message * msg,struct spi_transfer * xfer)1585  void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1586  						  struct spi_transfer *xfer)
1587  {
1588  	_spi_transfer_cs_change_delay(msg, xfer);
1589  }
1590  EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1591  
1592  /*
1593   * spi_transfer_one_message - Default implementation of transfer_one_message()
1594   *
1595   * This is a standard implementation of transfer_one_message() for
1596   * drivers which implement a transfer_one() operation.  It provides
1597   * standard handling of delays and chip select management.
1598   */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1599  static int spi_transfer_one_message(struct spi_controller *ctlr,
1600  				    struct spi_message *msg)
1601  {
1602  	struct spi_transfer *xfer;
1603  	bool keep_cs = false;
1604  	int ret = 0;
1605  	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1606  	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1607  
1608  	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1609  	spi_set_cs(msg->spi, !xfer->cs_off, false);
1610  
1611  	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1612  	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1613  
1614  	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1615  		trace_spi_transfer_start(msg, xfer);
1616  
1617  		spi_statistics_add_transfer_stats(statm, xfer, msg);
1618  		spi_statistics_add_transfer_stats(stats, xfer, msg);
1619  
1620  		if (!ctlr->ptp_sts_supported) {
1621  			xfer->ptp_sts_word_pre = 0;
1622  			ptp_read_system_prets(xfer->ptp_sts);
1623  		}
1624  
1625  		if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1626  			reinit_completion(&ctlr->xfer_completion);
1627  
1628  fallback_pio:
1629  			spi_dma_sync_for_device(ctlr, xfer);
1630  			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1631  			if (ret < 0) {
1632  				spi_dma_sync_for_cpu(ctlr, xfer);
1633  
1634  				if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
1635  				    (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1636  					__spi_unmap_msg(ctlr, msg);
1637  					ctlr->fallback = true;
1638  					xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1639  					goto fallback_pio;
1640  				}
1641  
1642  				SPI_STATISTICS_INCREMENT_FIELD(statm,
1643  							       errors);
1644  				SPI_STATISTICS_INCREMENT_FIELD(stats,
1645  							       errors);
1646  				dev_err(&msg->spi->dev,
1647  					"SPI transfer failed: %d\n", ret);
1648  				goto out;
1649  			}
1650  
1651  			if (ret > 0) {
1652  				ret = spi_transfer_wait(ctlr, msg, xfer);
1653  				if (ret < 0)
1654  					msg->status = ret;
1655  			}
1656  
1657  			spi_dma_sync_for_cpu(ctlr, xfer);
1658  		} else {
1659  			if (xfer->len)
1660  				dev_err(&msg->spi->dev,
1661  					"Bufferless transfer has length %u\n",
1662  					xfer->len);
1663  		}
1664  
1665  		if (!ctlr->ptp_sts_supported) {
1666  			ptp_read_system_postts(xfer->ptp_sts);
1667  			xfer->ptp_sts_word_post = xfer->len;
1668  		}
1669  
1670  		trace_spi_transfer_stop(msg, xfer);
1671  
1672  		if (msg->status != -EINPROGRESS)
1673  			goto out;
1674  
1675  		spi_transfer_delay_exec(xfer);
1676  
1677  		if (xfer->cs_change) {
1678  			if (list_is_last(&xfer->transfer_list,
1679  					 &msg->transfers)) {
1680  				keep_cs = true;
1681  			} else {
1682  				if (!xfer->cs_off)
1683  					spi_set_cs(msg->spi, false, false);
1684  				_spi_transfer_cs_change_delay(msg, xfer);
1685  				if (!list_next_entry(xfer, transfer_list)->cs_off)
1686  					spi_set_cs(msg->spi, true, false);
1687  			}
1688  		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1689  			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1690  			spi_set_cs(msg->spi, xfer->cs_off, false);
1691  		}
1692  
1693  		msg->actual_length += xfer->len;
1694  	}
1695  
1696  out:
1697  	if (ret != 0 || !keep_cs)
1698  		spi_set_cs(msg->spi, false, false);
1699  
1700  	if (msg->status == -EINPROGRESS)
1701  		msg->status = ret;
1702  
1703  	if (msg->status && ctlr->handle_err)
1704  		ctlr->handle_err(ctlr, msg);
1705  
1706  	spi_finalize_current_message(ctlr);
1707  
1708  	return ret;
1709  }
1710  
1711  /**
1712   * spi_finalize_current_transfer - report completion of a transfer
1713   * @ctlr: the controller reporting completion
1714   *
1715   * Called by SPI drivers using the core transfer_one_message()
1716   * implementation to notify it that the current interrupt driven
1717   * transfer has finished and the next one may be scheduled.
1718   */
spi_finalize_current_transfer(struct spi_controller * ctlr)1719  void spi_finalize_current_transfer(struct spi_controller *ctlr)
1720  {
1721  	complete(&ctlr->xfer_completion);
1722  }
1723  EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1724  
spi_idle_runtime_pm(struct spi_controller * ctlr)1725  static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1726  {
1727  	if (ctlr->auto_runtime_pm) {
1728  		pm_runtime_mark_last_busy(ctlr->dev.parent);
1729  		pm_runtime_put_autosuspend(ctlr->dev.parent);
1730  	}
1731  }
1732  
__spi_pump_transfer_message(struct spi_controller * ctlr,struct spi_message * msg,bool was_busy)1733  static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1734  		struct spi_message *msg, bool was_busy)
1735  {
1736  	struct spi_transfer *xfer;
1737  	int ret;
1738  
1739  	if (!was_busy && ctlr->auto_runtime_pm) {
1740  		ret = pm_runtime_get_sync(ctlr->dev.parent);
1741  		if (ret < 0) {
1742  			pm_runtime_put_noidle(ctlr->dev.parent);
1743  			dev_err(&ctlr->dev, "Failed to power device: %d\n",
1744  				ret);
1745  
1746  			msg->status = ret;
1747  			spi_finalize_current_message(ctlr);
1748  
1749  			return ret;
1750  		}
1751  	}
1752  
1753  	if (!was_busy)
1754  		trace_spi_controller_busy(ctlr);
1755  
1756  	if (!was_busy && ctlr->prepare_transfer_hardware) {
1757  		ret = ctlr->prepare_transfer_hardware(ctlr);
1758  		if (ret) {
1759  			dev_err(&ctlr->dev,
1760  				"failed to prepare transfer hardware: %d\n",
1761  				ret);
1762  
1763  			if (ctlr->auto_runtime_pm)
1764  				pm_runtime_put(ctlr->dev.parent);
1765  
1766  			msg->status = ret;
1767  			spi_finalize_current_message(ctlr);
1768  
1769  			return ret;
1770  		}
1771  	}
1772  
1773  	trace_spi_message_start(msg);
1774  
1775  	if (ctlr->prepare_message) {
1776  		ret = ctlr->prepare_message(ctlr, msg);
1777  		if (ret) {
1778  			dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1779  				ret);
1780  			msg->status = ret;
1781  			spi_finalize_current_message(ctlr);
1782  			return ret;
1783  		}
1784  		msg->prepared = true;
1785  	}
1786  
1787  	ret = spi_map_msg(ctlr, msg);
1788  	if (ret) {
1789  		msg->status = ret;
1790  		spi_finalize_current_message(ctlr);
1791  		return ret;
1792  	}
1793  
1794  	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1795  		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1796  			xfer->ptp_sts_word_pre = 0;
1797  			ptp_read_system_prets(xfer->ptp_sts);
1798  		}
1799  	}
1800  
1801  	/*
1802  	 * Drivers implementation of transfer_one_message() must arrange for
1803  	 * spi_finalize_current_message() to get called. Most drivers will do
1804  	 * this in the calling context, but some don't. For those cases, a
1805  	 * completion is used to guarantee that this function does not return
1806  	 * until spi_finalize_current_message() is done accessing
1807  	 * ctlr->cur_msg.
1808  	 * Use of the following two flags enable to opportunistically skip the
1809  	 * use of the completion since its use involves expensive spin locks.
1810  	 * In case of a race with the context that calls
1811  	 * spi_finalize_current_message() the completion will always be used,
1812  	 * due to strict ordering of these flags using barriers.
1813  	 */
1814  	WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1815  	WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1816  	reinit_completion(&ctlr->cur_msg_completion);
1817  	smp_wmb(); /* Make these available to spi_finalize_current_message() */
1818  
1819  	ret = ctlr->transfer_one_message(ctlr, msg);
1820  	if (ret) {
1821  		dev_err(&ctlr->dev,
1822  			"failed to transfer one message from queue\n");
1823  		return ret;
1824  	}
1825  
1826  	WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1827  	smp_mb(); /* See spi_finalize_current_message()... */
1828  	if (READ_ONCE(ctlr->cur_msg_incomplete))
1829  		wait_for_completion(&ctlr->cur_msg_completion);
1830  
1831  	return 0;
1832  }
1833  
1834  /**
1835   * __spi_pump_messages - function which processes SPI message queue
1836   * @ctlr: controller to process queue for
1837   * @in_kthread: true if we are in the context of the message pump thread
1838   *
1839   * This function checks if there is any SPI message in the queue that
1840   * needs processing and if so call out to the driver to initialize hardware
1841   * and transfer each message.
1842   *
1843   * Note that it is called both from the kthread itself and also from
1844   * inside spi_sync(); the queue extraction handling at the top of the
1845   * function should deal with this safely.
1846   */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1847  static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1848  {
1849  	struct spi_message *msg;
1850  	bool was_busy = false;
1851  	unsigned long flags;
1852  	int ret;
1853  
1854  	/* Take the I/O mutex */
1855  	mutex_lock(&ctlr->io_mutex);
1856  
1857  	/* Lock queue */
1858  	spin_lock_irqsave(&ctlr->queue_lock, flags);
1859  
1860  	/* Make sure we are not already running a message */
1861  	if (ctlr->cur_msg)
1862  		goto out_unlock;
1863  
1864  	/* Check if the queue is idle */
1865  	if (list_empty(&ctlr->queue) || !ctlr->running) {
1866  		if (!ctlr->busy)
1867  			goto out_unlock;
1868  
1869  		/* Defer any non-atomic teardown to the thread */
1870  		if (!in_kthread) {
1871  			if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1872  			    !ctlr->unprepare_transfer_hardware) {
1873  				spi_idle_runtime_pm(ctlr);
1874  				ctlr->busy = false;
1875  				ctlr->queue_empty = true;
1876  				trace_spi_controller_idle(ctlr);
1877  			} else {
1878  				kthread_queue_work(ctlr->kworker,
1879  						   &ctlr->pump_messages);
1880  			}
1881  			goto out_unlock;
1882  		}
1883  
1884  		ctlr->busy = false;
1885  		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1886  
1887  		kfree(ctlr->dummy_rx);
1888  		ctlr->dummy_rx = NULL;
1889  		kfree(ctlr->dummy_tx);
1890  		ctlr->dummy_tx = NULL;
1891  		if (ctlr->unprepare_transfer_hardware &&
1892  		    ctlr->unprepare_transfer_hardware(ctlr))
1893  			dev_err(&ctlr->dev,
1894  				"failed to unprepare transfer hardware\n");
1895  		spi_idle_runtime_pm(ctlr);
1896  		trace_spi_controller_idle(ctlr);
1897  
1898  		spin_lock_irqsave(&ctlr->queue_lock, flags);
1899  		ctlr->queue_empty = true;
1900  		goto out_unlock;
1901  	}
1902  
1903  	/* Extract head of queue */
1904  	msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1905  	ctlr->cur_msg = msg;
1906  
1907  	list_del_init(&msg->queue);
1908  	if (ctlr->busy)
1909  		was_busy = true;
1910  	else
1911  		ctlr->busy = true;
1912  	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1913  
1914  	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1915  	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1916  
1917  	ctlr->cur_msg = NULL;
1918  	ctlr->fallback = false;
1919  
1920  	mutex_unlock(&ctlr->io_mutex);
1921  
1922  	/* Prod the scheduler in case transfer_one() was busy waiting */
1923  	if (!ret)
1924  		cond_resched();
1925  	return;
1926  
1927  out_unlock:
1928  	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1929  	mutex_unlock(&ctlr->io_mutex);
1930  }
1931  
1932  /**
1933   * spi_pump_messages - kthread work function which processes spi message queue
1934   * @work: pointer to kthread work struct contained in the controller struct
1935   */
spi_pump_messages(struct kthread_work * work)1936  static void spi_pump_messages(struct kthread_work *work)
1937  {
1938  	struct spi_controller *ctlr =
1939  		container_of(work, struct spi_controller, pump_messages);
1940  
1941  	__spi_pump_messages(ctlr, true);
1942  }
1943  
1944  /**
1945   * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1946   * @ctlr: Pointer to the spi_controller structure of the driver
1947   * @xfer: Pointer to the transfer being timestamped
1948   * @progress: How many words (not bytes) have been transferred so far
1949   * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1950   *	      transfer, for less jitter in time measurement. Only compatible
1951   *	      with PIO drivers. If true, must follow up with
1952   *	      spi_take_timestamp_post or otherwise system will crash.
1953   *	      WARNING: for fully predictable results, the CPU frequency must
1954   *	      also be under control (governor).
1955   *
1956   * This is a helper for drivers to collect the beginning of the TX timestamp
1957   * for the requested byte from the SPI transfer. The frequency with which this
1958   * function must be called (once per word, once for the whole transfer, once
1959   * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1960   * greater than or equal to the requested byte at the time of the call. The
1961   * timestamp is only taken once, at the first such call. It is assumed that
1962   * the driver advances its @tx buffer pointer monotonically.
1963   */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1964  void spi_take_timestamp_pre(struct spi_controller *ctlr,
1965  			    struct spi_transfer *xfer,
1966  			    size_t progress, bool irqs_off)
1967  {
1968  	if (!xfer->ptp_sts)
1969  		return;
1970  
1971  	if (xfer->timestamped)
1972  		return;
1973  
1974  	if (progress > xfer->ptp_sts_word_pre)
1975  		return;
1976  
1977  	/* Capture the resolution of the timestamp */
1978  	xfer->ptp_sts_word_pre = progress;
1979  
1980  	if (irqs_off) {
1981  		local_irq_save(ctlr->irq_flags);
1982  		preempt_disable();
1983  	}
1984  
1985  	ptp_read_system_prets(xfer->ptp_sts);
1986  }
1987  EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1988  
1989  /**
1990   * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1991   * @ctlr: Pointer to the spi_controller structure of the driver
1992   * @xfer: Pointer to the transfer being timestamped
1993   * @progress: How many words (not bytes) have been transferred so far
1994   * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1995   *
1996   * This is a helper for drivers to collect the end of the TX timestamp for
1997   * the requested byte from the SPI transfer. Can be called with an arbitrary
1998   * frequency: only the first call where @tx exceeds or is equal to the
1999   * requested word will be timestamped.
2000   */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)2001  void spi_take_timestamp_post(struct spi_controller *ctlr,
2002  			     struct spi_transfer *xfer,
2003  			     size_t progress, bool irqs_off)
2004  {
2005  	if (!xfer->ptp_sts)
2006  		return;
2007  
2008  	if (xfer->timestamped)
2009  		return;
2010  
2011  	if (progress < xfer->ptp_sts_word_post)
2012  		return;
2013  
2014  	ptp_read_system_postts(xfer->ptp_sts);
2015  
2016  	if (irqs_off) {
2017  		local_irq_restore(ctlr->irq_flags);
2018  		preempt_enable();
2019  	}
2020  
2021  	/* Capture the resolution of the timestamp */
2022  	xfer->ptp_sts_word_post = progress;
2023  
2024  	xfer->timestamped = 1;
2025  }
2026  EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2027  
2028  /**
2029   * spi_set_thread_rt - set the controller to pump at realtime priority
2030   * @ctlr: controller to boost priority of
2031   *
2032   * This can be called because the controller requested realtime priority
2033   * (by setting the ->rt value before calling spi_register_controller()) or
2034   * because a device on the bus said that its transfers needed realtime
2035   * priority.
2036   *
2037   * NOTE: at the moment if any device on a bus says it needs realtime then
2038   * the thread will be at realtime priority for all transfers on that
2039   * controller.  If this eventually becomes a problem we may see if we can
2040   * find a way to boost the priority only temporarily during relevant
2041   * transfers.
2042   */
spi_set_thread_rt(struct spi_controller * ctlr)2043  static void spi_set_thread_rt(struct spi_controller *ctlr)
2044  {
2045  	dev_info(&ctlr->dev,
2046  		"will run message pump with realtime priority\n");
2047  	sched_set_fifo(ctlr->kworker->task);
2048  }
2049  
spi_init_queue(struct spi_controller * ctlr)2050  static int spi_init_queue(struct spi_controller *ctlr)
2051  {
2052  	ctlr->running = false;
2053  	ctlr->busy = false;
2054  	ctlr->queue_empty = true;
2055  
2056  	ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
2057  	if (IS_ERR(ctlr->kworker)) {
2058  		dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2059  		return PTR_ERR(ctlr->kworker);
2060  	}
2061  
2062  	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2063  
2064  	/*
2065  	 * Controller config will indicate if this controller should run the
2066  	 * message pump with high (realtime) priority to reduce the transfer
2067  	 * latency on the bus by minimising the delay between a transfer
2068  	 * request and the scheduling of the message pump thread. Without this
2069  	 * setting the message pump thread will remain at default priority.
2070  	 */
2071  	if (ctlr->rt)
2072  		spi_set_thread_rt(ctlr);
2073  
2074  	return 0;
2075  }
2076  
2077  /**
2078   * spi_get_next_queued_message() - called by driver to check for queued
2079   * messages
2080   * @ctlr: the controller to check for queued messages
2081   *
2082   * If there are more messages in the queue, the next message is returned from
2083   * this call.
2084   *
2085   * Return: the next message in the queue, else NULL if the queue is empty.
2086   */
spi_get_next_queued_message(struct spi_controller * ctlr)2087  struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2088  {
2089  	struct spi_message *next;
2090  	unsigned long flags;
2091  
2092  	/* Get a pointer to the next message, if any */
2093  	spin_lock_irqsave(&ctlr->queue_lock, flags);
2094  	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2095  					queue);
2096  	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2097  
2098  	return next;
2099  }
2100  EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2101  
2102  /*
2103   * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2104   *                            and spi_maybe_unoptimize_message()
2105   * @msg: the message to unoptimize
2106   *
2107   * Peripheral drivers should use spi_unoptimize_message() and callers inside
2108   * core should use spi_maybe_unoptimize_message() rather than calling this
2109   * function directly.
2110   *
2111   * It is not valid to call this on a message that is not currently optimized.
2112   */
__spi_unoptimize_message(struct spi_message * msg)2113  static void __spi_unoptimize_message(struct spi_message *msg)
2114  {
2115  	struct spi_controller *ctlr = msg->spi->controller;
2116  
2117  	if (ctlr->unoptimize_message)
2118  		ctlr->unoptimize_message(msg);
2119  
2120  	spi_res_release(ctlr, msg);
2121  
2122  	msg->optimized = false;
2123  	msg->opt_state = NULL;
2124  }
2125  
2126  /*
2127   * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2128   * @msg: the message to unoptimize
2129   *
2130   * This function is used to unoptimize a message if and only if it was
2131   * optimized by the core (via spi_maybe_optimize_message()).
2132   */
spi_maybe_unoptimize_message(struct spi_message * msg)2133  static void spi_maybe_unoptimize_message(struct spi_message *msg)
2134  {
2135  	if (!msg->pre_optimized && msg->optimized &&
2136  	    !msg->spi->controller->defer_optimize_message)
2137  		__spi_unoptimize_message(msg);
2138  }
2139  
2140  /**
2141   * spi_finalize_current_message() - the current message is complete
2142   * @ctlr: the controller to return the message to
2143   *
2144   * Called by the driver to notify the core that the message in the front of the
2145   * queue is complete and can be removed from the queue.
2146   */
spi_finalize_current_message(struct spi_controller * ctlr)2147  void spi_finalize_current_message(struct spi_controller *ctlr)
2148  {
2149  	struct spi_transfer *xfer;
2150  	struct spi_message *mesg;
2151  	int ret;
2152  
2153  	mesg = ctlr->cur_msg;
2154  
2155  	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2156  		list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2157  			ptp_read_system_postts(xfer->ptp_sts);
2158  			xfer->ptp_sts_word_post = xfer->len;
2159  		}
2160  	}
2161  
2162  	if (unlikely(ctlr->ptp_sts_supported))
2163  		list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2164  			WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2165  
2166  	spi_unmap_msg(ctlr, mesg);
2167  
2168  	if (mesg->prepared && ctlr->unprepare_message) {
2169  		ret = ctlr->unprepare_message(ctlr, mesg);
2170  		if (ret) {
2171  			dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2172  				ret);
2173  		}
2174  	}
2175  
2176  	mesg->prepared = false;
2177  
2178  	spi_maybe_unoptimize_message(mesg);
2179  
2180  	WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2181  	smp_mb(); /* See __spi_pump_transfer_message()... */
2182  	if (READ_ONCE(ctlr->cur_msg_need_completion))
2183  		complete(&ctlr->cur_msg_completion);
2184  
2185  	trace_spi_message_done(mesg);
2186  
2187  	mesg->state = NULL;
2188  	if (mesg->complete)
2189  		mesg->complete(mesg->context);
2190  }
2191  EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2192  
spi_start_queue(struct spi_controller * ctlr)2193  static int spi_start_queue(struct spi_controller *ctlr)
2194  {
2195  	unsigned long flags;
2196  
2197  	spin_lock_irqsave(&ctlr->queue_lock, flags);
2198  
2199  	if (ctlr->running || ctlr->busy) {
2200  		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2201  		return -EBUSY;
2202  	}
2203  
2204  	ctlr->running = true;
2205  	ctlr->cur_msg = NULL;
2206  	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2207  
2208  	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2209  
2210  	return 0;
2211  }
2212  
spi_stop_queue(struct spi_controller * ctlr)2213  static int spi_stop_queue(struct spi_controller *ctlr)
2214  {
2215  	unsigned int limit = 500;
2216  	unsigned long flags;
2217  
2218  	/*
2219  	 * This is a bit lame, but is optimized for the common execution path.
2220  	 * A wait_queue on the ctlr->busy could be used, but then the common
2221  	 * execution path (pump_messages) would be required to call wake_up or
2222  	 * friends on every SPI message. Do this instead.
2223  	 */
2224  	do {
2225  		spin_lock_irqsave(&ctlr->queue_lock, flags);
2226  		if (list_empty(&ctlr->queue) && !ctlr->busy) {
2227  			ctlr->running = false;
2228  			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2229  			return 0;
2230  		}
2231  		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2232  		usleep_range(10000, 11000);
2233  	} while (--limit);
2234  
2235  	return -EBUSY;
2236  }
2237  
spi_destroy_queue(struct spi_controller * ctlr)2238  static int spi_destroy_queue(struct spi_controller *ctlr)
2239  {
2240  	int ret;
2241  
2242  	ret = spi_stop_queue(ctlr);
2243  
2244  	/*
2245  	 * kthread_flush_worker will block until all work is done.
2246  	 * If the reason that stop_queue timed out is that the work will never
2247  	 * finish, then it does no good to call flush/stop thread, so
2248  	 * return anyway.
2249  	 */
2250  	if (ret) {
2251  		dev_err(&ctlr->dev, "problem destroying queue\n");
2252  		return ret;
2253  	}
2254  
2255  	kthread_destroy_worker(ctlr->kworker);
2256  
2257  	return 0;
2258  }
2259  
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)2260  static int __spi_queued_transfer(struct spi_device *spi,
2261  				 struct spi_message *msg,
2262  				 bool need_pump)
2263  {
2264  	struct spi_controller *ctlr = spi->controller;
2265  	unsigned long flags;
2266  
2267  	spin_lock_irqsave(&ctlr->queue_lock, flags);
2268  
2269  	if (!ctlr->running) {
2270  		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2271  		return -ESHUTDOWN;
2272  	}
2273  	msg->actual_length = 0;
2274  	msg->status = -EINPROGRESS;
2275  
2276  	list_add_tail(&msg->queue, &ctlr->queue);
2277  	ctlr->queue_empty = false;
2278  	if (!ctlr->busy && need_pump)
2279  		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2280  
2281  	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2282  	return 0;
2283  }
2284  
2285  /**
2286   * spi_queued_transfer - transfer function for queued transfers
2287   * @spi: SPI device which is requesting transfer
2288   * @msg: SPI message which is to handled is queued to driver queue
2289   *
2290   * Return: zero on success, else a negative error code.
2291   */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)2292  static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2293  {
2294  	return __spi_queued_transfer(spi, msg, true);
2295  }
2296  
spi_controller_initialize_queue(struct spi_controller * ctlr)2297  static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2298  {
2299  	int ret;
2300  
2301  	ctlr->transfer = spi_queued_transfer;
2302  	if (!ctlr->transfer_one_message)
2303  		ctlr->transfer_one_message = spi_transfer_one_message;
2304  
2305  	/* Initialize and start queue */
2306  	ret = spi_init_queue(ctlr);
2307  	if (ret) {
2308  		dev_err(&ctlr->dev, "problem initializing queue\n");
2309  		goto err_init_queue;
2310  	}
2311  	ctlr->queued = true;
2312  	ret = spi_start_queue(ctlr);
2313  	if (ret) {
2314  		dev_err(&ctlr->dev, "problem starting queue\n");
2315  		goto err_start_queue;
2316  	}
2317  
2318  	return 0;
2319  
2320  err_start_queue:
2321  	spi_destroy_queue(ctlr);
2322  err_init_queue:
2323  	return ret;
2324  }
2325  
2326  /**
2327   * spi_flush_queue - Send all pending messages in the queue from the callers'
2328   *		     context
2329   * @ctlr: controller to process queue for
2330   *
2331   * This should be used when one wants to ensure all pending messages have been
2332   * sent before doing something. Is used by the spi-mem code to make sure SPI
2333   * memory operations do not preempt regular SPI transfers that have been queued
2334   * before the spi-mem operation.
2335   */
spi_flush_queue(struct spi_controller * ctlr)2336  void spi_flush_queue(struct spi_controller *ctlr)
2337  {
2338  	if (ctlr->transfer == spi_queued_transfer)
2339  		__spi_pump_messages(ctlr, false);
2340  }
2341  
2342  /*-------------------------------------------------------------------------*/
2343  
2344  #if defined(CONFIG_OF)
of_spi_parse_dt_cs_delay(struct device_node * nc,struct spi_delay * delay,const char * prop)2345  static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2346  				     struct spi_delay *delay, const char *prop)
2347  {
2348  	u32 value;
2349  
2350  	if (!of_property_read_u32(nc, prop, &value)) {
2351  		if (value > U16_MAX) {
2352  			delay->value = DIV_ROUND_UP(value, 1000);
2353  			delay->unit = SPI_DELAY_UNIT_USECS;
2354  		} else {
2355  			delay->value = value;
2356  			delay->unit = SPI_DELAY_UNIT_NSECS;
2357  		}
2358  	}
2359  }
2360  
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2361  static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2362  			   struct device_node *nc)
2363  {
2364  	u32 value, cs[SPI_CS_CNT_MAX];
2365  	int rc, idx;
2366  
2367  	/* Mode (clock phase/polarity/etc.) */
2368  	if (of_property_read_bool(nc, "spi-cpha"))
2369  		spi->mode |= SPI_CPHA;
2370  	if (of_property_read_bool(nc, "spi-cpol"))
2371  		spi->mode |= SPI_CPOL;
2372  	if (of_property_read_bool(nc, "spi-3wire"))
2373  		spi->mode |= SPI_3WIRE;
2374  	if (of_property_read_bool(nc, "spi-lsb-first"))
2375  		spi->mode |= SPI_LSB_FIRST;
2376  	if (of_property_read_bool(nc, "spi-cs-high"))
2377  		spi->mode |= SPI_CS_HIGH;
2378  
2379  	/* Device DUAL/QUAD mode */
2380  	if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2381  		switch (value) {
2382  		case 0:
2383  			spi->mode |= SPI_NO_TX;
2384  			break;
2385  		case 1:
2386  			break;
2387  		case 2:
2388  			spi->mode |= SPI_TX_DUAL;
2389  			break;
2390  		case 4:
2391  			spi->mode |= SPI_TX_QUAD;
2392  			break;
2393  		case 8:
2394  			spi->mode |= SPI_TX_OCTAL;
2395  			break;
2396  		default:
2397  			dev_warn(&ctlr->dev,
2398  				"spi-tx-bus-width %d not supported\n",
2399  				value);
2400  			break;
2401  		}
2402  	}
2403  
2404  	if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2405  		switch (value) {
2406  		case 0:
2407  			spi->mode |= SPI_NO_RX;
2408  			break;
2409  		case 1:
2410  			break;
2411  		case 2:
2412  			spi->mode |= SPI_RX_DUAL;
2413  			break;
2414  		case 4:
2415  			spi->mode |= SPI_RX_QUAD;
2416  			break;
2417  		case 8:
2418  			spi->mode |= SPI_RX_OCTAL;
2419  			break;
2420  		default:
2421  			dev_warn(&ctlr->dev,
2422  				"spi-rx-bus-width %d not supported\n",
2423  				value);
2424  			break;
2425  		}
2426  	}
2427  
2428  	if (spi_controller_is_target(ctlr)) {
2429  		if (!of_node_name_eq(nc, "slave")) {
2430  			dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2431  				nc);
2432  			return -EINVAL;
2433  		}
2434  		return 0;
2435  	}
2436  
2437  	if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2438  		dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2439  		return -EINVAL;
2440  	}
2441  
2442  	spi_set_all_cs_unused(spi);
2443  
2444  	/* Device address */
2445  	rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2446  						 SPI_CS_CNT_MAX);
2447  	if (rc < 0) {
2448  		dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2449  			nc, rc);
2450  		return rc;
2451  	}
2452  	if (rc > ctlr->num_chipselect) {
2453  		dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2454  			nc, rc);
2455  		return rc;
2456  	}
2457  	if ((of_property_read_bool(nc, "parallel-memories")) &&
2458  	    (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2459  		dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2460  		return -EINVAL;
2461  	}
2462  	for (idx = 0; idx < rc; idx++)
2463  		spi_set_chipselect(spi, idx, cs[idx]);
2464  
2465  	/*
2466  	 * By default spi->chip_select[0] will hold the physical CS number,
2467  	 * so set bit 0 in spi->cs_index_mask.
2468  	 */
2469  	spi->cs_index_mask = BIT(0);
2470  
2471  	/* Device speed */
2472  	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2473  		spi->max_speed_hz = value;
2474  
2475  	/* Device CS delays */
2476  	of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2477  	of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2478  	of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2479  
2480  	return 0;
2481  }
2482  
2483  static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2484  of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2485  {
2486  	struct spi_device *spi;
2487  	int rc;
2488  
2489  	/* Alloc an spi_device */
2490  	spi = spi_alloc_device(ctlr);
2491  	if (!spi) {
2492  		dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2493  		rc = -ENOMEM;
2494  		goto err_out;
2495  	}
2496  
2497  	/* Select device driver */
2498  	rc = of_alias_from_compatible(nc, spi->modalias,
2499  				      sizeof(spi->modalias));
2500  	if (rc < 0) {
2501  		dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2502  		goto err_out;
2503  	}
2504  
2505  	rc = of_spi_parse_dt(ctlr, spi, nc);
2506  	if (rc)
2507  		goto err_out;
2508  
2509  	/* Store a pointer to the node in the device structure */
2510  	of_node_get(nc);
2511  
2512  	device_set_node(&spi->dev, of_fwnode_handle(nc));
2513  
2514  	/* Register the new device */
2515  	rc = spi_add_device(spi);
2516  	if (rc) {
2517  		dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2518  		goto err_of_node_put;
2519  	}
2520  
2521  	return spi;
2522  
2523  err_of_node_put:
2524  	of_node_put(nc);
2525  err_out:
2526  	spi_dev_put(spi);
2527  	return ERR_PTR(rc);
2528  }
2529  
2530  /**
2531   * of_register_spi_devices() - Register child devices onto the SPI bus
2532   * @ctlr:	Pointer to spi_controller device
2533   *
2534   * Registers an spi_device for each child node of controller node which
2535   * represents a valid SPI slave.
2536   */
of_register_spi_devices(struct spi_controller * ctlr)2537  static void of_register_spi_devices(struct spi_controller *ctlr)
2538  {
2539  	struct spi_device *spi;
2540  	struct device_node *nc;
2541  
2542  	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2543  		if (of_node_test_and_set_flag(nc, OF_POPULATED))
2544  			continue;
2545  		spi = of_register_spi_device(ctlr, nc);
2546  		if (IS_ERR(spi)) {
2547  			dev_warn(&ctlr->dev,
2548  				 "Failed to create SPI device for %pOF\n", nc);
2549  			of_node_clear_flag(nc, OF_POPULATED);
2550  		}
2551  	}
2552  }
2553  #else
of_register_spi_devices(struct spi_controller * ctlr)2554  static void of_register_spi_devices(struct spi_controller *ctlr) { }
2555  #endif
2556  
2557  /**
2558   * spi_new_ancillary_device() - Register ancillary SPI device
2559   * @spi:         Pointer to the main SPI device registering the ancillary device
2560   * @chip_select: Chip Select of the ancillary device
2561   *
2562   * Register an ancillary SPI device; for example some chips have a chip-select
2563   * for normal device usage and another one for setup/firmware upload.
2564   *
2565   * This may only be called from main SPI device's probe routine.
2566   *
2567   * Return: 0 on success; negative errno on failure
2568   */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2569  struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2570  					     u8 chip_select)
2571  {
2572  	struct spi_controller *ctlr = spi->controller;
2573  	struct spi_device *ancillary;
2574  	int rc;
2575  
2576  	/* Alloc an spi_device */
2577  	ancillary = spi_alloc_device(ctlr);
2578  	if (!ancillary) {
2579  		rc = -ENOMEM;
2580  		goto err_out;
2581  	}
2582  
2583  	strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2584  
2585  	/* Use provided chip-select for ancillary device */
2586  	spi_set_all_cs_unused(ancillary);
2587  	spi_set_chipselect(ancillary, 0, chip_select);
2588  
2589  	/* Take over SPI mode/speed from SPI main device */
2590  	ancillary->max_speed_hz = spi->max_speed_hz;
2591  	ancillary->mode = spi->mode;
2592  	/*
2593  	 * By default spi->chip_select[0] will hold the physical CS number,
2594  	 * so set bit 0 in spi->cs_index_mask.
2595  	 */
2596  	ancillary->cs_index_mask = BIT(0);
2597  
2598  	WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2599  
2600  	/* Register the new device */
2601  	rc = __spi_add_device(ancillary);
2602  	if (rc) {
2603  		dev_err(&spi->dev, "failed to register ancillary device\n");
2604  		goto err_out;
2605  	}
2606  
2607  	return ancillary;
2608  
2609  err_out:
2610  	spi_dev_put(ancillary);
2611  	return ERR_PTR(rc);
2612  }
2613  EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2614  
2615  #ifdef CONFIG_ACPI
2616  struct acpi_spi_lookup {
2617  	struct spi_controller 	*ctlr;
2618  	u32			max_speed_hz;
2619  	u32			mode;
2620  	int			irq;
2621  	u8			bits_per_word;
2622  	u8			chip_select;
2623  	int			n;
2624  	int			index;
2625  };
2626  
acpi_spi_count(struct acpi_resource * ares,void * data)2627  static int acpi_spi_count(struct acpi_resource *ares, void *data)
2628  {
2629  	struct acpi_resource_spi_serialbus *sb;
2630  	int *count = data;
2631  
2632  	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2633  		return 1;
2634  
2635  	sb = &ares->data.spi_serial_bus;
2636  	if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2637  		return 1;
2638  
2639  	*count = *count + 1;
2640  
2641  	return 1;
2642  }
2643  
2644  /**
2645   * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2646   * @adev:	ACPI device
2647   *
2648   * Return: the number of SpiSerialBus resources in the ACPI-device's
2649   * resource-list; or a negative error code.
2650   */
acpi_spi_count_resources(struct acpi_device * adev)2651  int acpi_spi_count_resources(struct acpi_device *adev)
2652  {
2653  	LIST_HEAD(r);
2654  	int count = 0;
2655  	int ret;
2656  
2657  	ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2658  	if (ret < 0)
2659  		return ret;
2660  
2661  	acpi_dev_free_resource_list(&r);
2662  
2663  	return count;
2664  }
2665  EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2666  
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2667  static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2668  					    struct acpi_spi_lookup *lookup)
2669  {
2670  	const union acpi_object *obj;
2671  
2672  	if (!x86_apple_machine)
2673  		return;
2674  
2675  	if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2676  	    && obj->buffer.length >= 4)
2677  		lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2678  
2679  	if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2680  	    && obj->buffer.length == 8)
2681  		lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2682  
2683  	if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2684  	    && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2685  		lookup->mode |= SPI_LSB_FIRST;
2686  
2687  	if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2688  	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2689  		lookup->mode |= SPI_CPOL;
2690  
2691  	if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2692  	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2693  		lookup->mode |= SPI_CPHA;
2694  }
2695  
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2696  static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2697  {
2698  	struct acpi_spi_lookup *lookup = data;
2699  	struct spi_controller *ctlr = lookup->ctlr;
2700  
2701  	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2702  		struct acpi_resource_spi_serialbus *sb;
2703  		acpi_handle parent_handle;
2704  		acpi_status status;
2705  
2706  		sb = &ares->data.spi_serial_bus;
2707  		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2708  
2709  			if (lookup->index != -1 && lookup->n++ != lookup->index)
2710  				return 1;
2711  
2712  			status = acpi_get_handle(NULL,
2713  						 sb->resource_source.string_ptr,
2714  						 &parent_handle);
2715  
2716  			if (ACPI_FAILURE(status))
2717  				return -ENODEV;
2718  
2719  			if (ctlr) {
2720  				if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
2721  					return -ENODEV;
2722  			} else {
2723  				struct acpi_device *adev;
2724  
2725  				adev = acpi_fetch_acpi_dev(parent_handle);
2726  				if (!adev)
2727  					return -ENODEV;
2728  
2729  				ctlr = acpi_spi_find_controller_by_adev(adev);
2730  				if (!ctlr)
2731  					return -EPROBE_DEFER;
2732  
2733  				lookup->ctlr = ctlr;
2734  			}
2735  
2736  			/*
2737  			 * ACPI DeviceSelection numbering is handled by the
2738  			 * host controller driver in Windows and can vary
2739  			 * from driver to driver. In Linux we always expect
2740  			 * 0 .. max - 1 so we need to ask the driver to
2741  			 * translate between the two schemes.
2742  			 */
2743  			if (ctlr->fw_translate_cs) {
2744  				int cs = ctlr->fw_translate_cs(ctlr,
2745  						sb->device_selection);
2746  				if (cs < 0)
2747  					return cs;
2748  				lookup->chip_select = cs;
2749  			} else {
2750  				lookup->chip_select = sb->device_selection;
2751  			}
2752  
2753  			lookup->max_speed_hz = sb->connection_speed;
2754  			lookup->bits_per_word = sb->data_bit_length;
2755  
2756  			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2757  				lookup->mode |= SPI_CPHA;
2758  			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2759  				lookup->mode |= SPI_CPOL;
2760  			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2761  				lookup->mode |= SPI_CS_HIGH;
2762  		}
2763  	} else if (lookup->irq < 0) {
2764  		struct resource r;
2765  
2766  		if (acpi_dev_resource_interrupt(ares, 0, &r))
2767  			lookup->irq = r.start;
2768  	}
2769  
2770  	/* Always tell the ACPI core to skip this resource */
2771  	return 1;
2772  }
2773  
2774  /**
2775   * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2776   * @ctlr: controller to which the spi device belongs
2777   * @adev: ACPI Device for the spi device
2778   * @index: Index of the spi resource inside the ACPI Node
2779   *
2780   * This should be used to allocate a new SPI device from and ACPI Device node.
2781   * The caller is responsible for calling spi_add_device to register the SPI device.
2782   *
2783   * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2784   * using the resource.
2785   * If index is set to -1, index is not used.
2786   * Note: If index is -1, ctlr must be set.
2787   *
2788   * Return: a pointer to the new device, or ERR_PTR on error.
2789   */
acpi_spi_device_alloc(struct spi_controller * ctlr,struct acpi_device * adev,int index)2790  struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2791  					 struct acpi_device *adev,
2792  					 int index)
2793  {
2794  	acpi_handle parent_handle = NULL;
2795  	struct list_head resource_list;
2796  	struct acpi_spi_lookup lookup = {};
2797  	struct spi_device *spi;
2798  	int ret;
2799  
2800  	if (!ctlr && index == -1)
2801  		return ERR_PTR(-EINVAL);
2802  
2803  	lookup.ctlr		= ctlr;
2804  	lookup.irq		= -1;
2805  	lookup.index		= index;
2806  	lookup.n		= 0;
2807  
2808  	INIT_LIST_HEAD(&resource_list);
2809  	ret = acpi_dev_get_resources(adev, &resource_list,
2810  				     acpi_spi_add_resource, &lookup);
2811  	acpi_dev_free_resource_list(&resource_list);
2812  
2813  	if (ret < 0)
2814  		/* Found SPI in _CRS but it points to another controller */
2815  		return ERR_PTR(ret);
2816  
2817  	if (!lookup.max_speed_hz &&
2818  	    ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2819  	    device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
2820  		/* Apple does not use _CRS but nested devices for SPI slaves */
2821  		acpi_spi_parse_apple_properties(adev, &lookup);
2822  	}
2823  
2824  	if (!lookup.max_speed_hz)
2825  		return ERR_PTR(-ENODEV);
2826  
2827  	spi = spi_alloc_device(lookup.ctlr);
2828  	if (!spi) {
2829  		dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2830  			dev_name(&adev->dev));
2831  		return ERR_PTR(-ENOMEM);
2832  	}
2833  
2834  	spi_set_all_cs_unused(spi);
2835  	spi_set_chipselect(spi, 0, lookup.chip_select);
2836  
2837  	ACPI_COMPANION_SET(&spi->dev, adev);
2838  	spi->max_speed_hz	= lookup.max_speed_hz;
2839  	spi->mode		|= lookup.mode;
2840  	spi->irq		= lookup.irq;
2841  	spi->bits_per_word	= lookup.bits_per_word;
2842  	/*
2843  	 * By default spi->chip_select[0] will hold the physical CS number,
2844  	 * so set bit 0 in spi->cs_index_mask.
2845  	 */
2846  	spi->cs_index_mask	= BIT(0);
2847  
2848  	return spi;
2849  }
2850  EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2851  
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2852  static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2853  					    struct acpi_device *adev)
2854  {
2855  	struct spi_device *spi;
2856  
2857  	if (acpi_bus_get_status(adev) || !adev->status.present ||
2858  	    acpi_device_enumerated(adev))
2859  		return AE_OK;
2860  
2861  	spi = acpi_spi_device_alloc(ctlr, adev, -1);
2862  	if (IS_ERR(spi)) {
2863  		if (PTR_ERR(spi) == -ENOMEM)
2864  			return AE_NO_MEMORY;
2865  		else
2866  			return AE_OK;
2867  	}
2868  
2869  	acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2870  			  sizeof(spi->modalias));
2871  
2872  	if (spi->irq < 0)
2873  		spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2874  
2875  	acpi_device_set_enumerated(adev);
2876  
2877  	adev->power.flags.ignore_parent = true;
2878  	if (spi_add_device(spi)) {
2879  		adev->power.flags.ignore_parent = false;
2880  		dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2881  			dev_name(&adev->dev));
2882  		spi_dev_put(spi);
2883  	}
2884  
2885  	return AE_OK;
2886  }
2887  
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2888  static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2889  				       void *data, void **return_value)
2890  {
2891  	struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2892  	struct spi_controller *ctlr = data;
2893  
2894  	if (!adev)
2895  		return AE_OK;
2896  
2897  	return acpi_register_spi_device(ctlr, adev);
2898  }
2899  
2900  #define SPI_ACPI_ENUMERATE_MAX_DEPTH		32
2901  
acpi_register_spi_devices(struct spi_controller * ctlr)2902  static void acpi_register_spi_devices(struct spi_controller *ctlr)
2903  {
2904  	acpi_status status;
2905  	acpi_handle handle;
2906  
2907  	handle = ACPI_HANDLE(ctlr->dev.parent);
2908  	if (!handle)
2909  		return;
2910  
2911  	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2912  				     SPI_ACPI_ENUMERATE_MAX_DEPTH,
2913  				     acpi_spi_add_device, NULL, ctlr, NULL);
2914  	if (ACPI_FAILURE(status))
2915  		dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2916  }
2917  #else
acpi_register_spi_devices(struct spi_controller * ctlr)2918  static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2919  #endif /* CONFIG_ACPI */
2920  
spi_controller_release(struct device * dev)2921  static void spi_controller_release(struct device *dev)
2922  {
2923  	struct spi_controller *ctlr;
2924  
2925  	ctlr = container_of(dev, struct spi_controller, dev);
2926  	kfree(ctlr);
2927  }
2928  
2929  static struct class spi_master_class = {
2930  	.name		= "spi_master",
2931  	.dev_release	= spi_controller_release,
2932  	.dev_groups	= spi_master_groups,
2933  };
2934  
2935  #ifdef CONFIG_SPI_SLAVE
2936  /**
2937   * spi_target_abort - abort the ongoing transfer request on an SPI slave
2938   *		     controller
2939   * @spi: device used for the current transfer
2940   */
spi_target_abort(struct spi_device * spi)2941  int spi_target_abort(struct spi_device *spi)
2942  {
2943  	struct spi_controller *ctlr = spi->controller;
2944  
2945  	if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2946  		return ctlr->target_abort(ctlr);
2947  
2948  	return -ENOTSUPP;
2949  }
2950  EXPORT_SYMBOL_GPL(spi_target_abort);
2951  
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2952  static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2953  			  char *buf)
2954  {
2955  	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2956  						   dev);
2957  	struct device *child;
2958  
2959  	child = device_find_any_child(&ctlr->dev);
2960  	return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2961  }
2962  
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2963  static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2964  			   const char *buf, size_t count)
2965  {
2966  	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2967  						   dev);
2968  	struct spi_device *spi;
2969  	struct device *child;
2970  	char name[32];
2971  	int rc;
2972  
2973  	rc = sscanf(buf, "%31s", name);
2974  	if (rc != 1 || !name[0])
2975  		return -EINVAL;
2976  
2977  	child = device_find_any_child(&ctlr->dev);
2978  	if (child) {
2979  		/* Remove registered slave */
2980  		device_unregister(child);
2981  		put_device(child);
2982  	}
2983  
2984  	if (strcmp(name, "(null)")) {
2985  		/* Register new slave */
2986  		spi = spi_alloc_device(ctlr);
2987  		if (!spi)
2988  			return -ENOMEM;
2989  
2990  		strscpy(spi->modalias, name, sizeof(spi->modalias));
2991  
2992  		rc = spi_add_device(spi);
2993  		if (rc) {
2994  			spi_dev_put(spi);
2995  			return rc;
2996  		}
2997  	}
2998  
2999  	return count;
3000  }
3001  
3002  static DEVICE_ATTR_RW(slave);
3003  
3004  static struct attribute *spi_slave_attrs[] = {
3005  	&dev_attr_slave.attr,
3006  	NULL,
3007  };
3008  
3009  static const struct attribute_group spi_slave_group = {
3010  	.attrs = spi_slave_attrs,
3011  };
3012  
3013  static const struct attribute_group *spi_slave_groups[] = {
3014  	&spi_controller_statistics_group,
3015  	&spi_slave_group,
3016  	NULL,
3017  };
3018  
3019  static struct class spi_slave_class = {
3020  	.name		= "spi_slave",
3021  	.dev_release	= spi_controller_release,
3022  	.dev_groups	= spi_slave_groups,
3023  };
3024  #else
3025  extern struct class spi_slave_class;	/* dummy */
3026  #endif
3027  
3028  /**
3029   * __spi_alloc_controller - allocate an SPI master or slave controller
3030   * @dev: the controller, possibly using the platform_bus
3031   * @size: how much zeroed driver-private data to allocate; the pointer to this
3032   *	memory is in the driver_data field of the returned device, accessible
3033   *	with spi_controller_get_devdata(); the memory is cacheline aligned;
3034   *	drivers granting DMA access to portions of their private data need to
3035   *	round up @size using ALIGN(size, dma_get_cache_alignment()).
3036   * @slave: flag indicating whether to allocate an SPI master (false) or SPI
3037   *	slave (true) controller
3038   * Context: can sleep
3039   *
3040   * This call is used only by SPI controller drivers, which are the
3041   * only ones directly touching chip registers.  It's how they allocate
3042   * an spi_controller structure, prior to calling spi_register_controller().
3043   *
3044   * This must be called from context that can sleep.
3045   *
3046   * The caller is responsible for assigning the bus number and initializing the
3047   * controller's methods before calling spi_register_controller(); and (after
3048   * errors adding the device) calling spi_controller_put() to prevent a memory
3049   * leak.
3050   *
3051   * Return: the SPI controller structure on success, else NULL.
3052   */
__spi_alloc_controller(struct device * dev,unsigned int size,bool slave)3053  struct spi_controller *__spi_alloc_controller(struct device *dev,
3054  					      unsigned int size, bool slave)
3055  {
3056  	struct spi_controller	*ctlr;
3057  	size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3058  
3059  	if (!dev)
3060  		return NULL;
3061  
3062  	ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3063  	if (!ctlr)
3064  		return NULL;
3065  
3066  	device_initialize(&ctlr->dev);
3067  	INIT_LIST_HEAD(&ctlr->queue);
3068  	spin_lock_init(&ctlr->queue_lock);
3069  	spin_lock_init(&ctlr->bus_lock_spinlock);
3070  	mutex_init(&ctlr->bus_lock_mutex);
3071  	mutex_init(&ctlr->io_mutex);
3072  	mutex_init(&ctlr->add_lock);
3073  	ctlr->bus_num = -1;
3074  	ctlr->num_chipselect = 1;
3075  	ctlr->slave = slave;
3076  	if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
3077  		ctlr->dev.class = &spi_slave_class;
3078  	else
3079  		ctlr->dev.class = &spi_master_class;
3080  	ctlr->dev.parent = dev;
3081  	pm_suspend_ignore_children(&ctlr->dev, true);
3082  	spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3083  
3084  	return ctlr;
3085  }
3086  EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3087  
devm_spi_release_controller(struct device * dev,void * ctlr)3088  static void devm_spi_release_controller(struct device *dev, void *ctlr)
3089  {
3090  	spi_controller_put(*(struct spi_controller **)ctlr);
3091  }
3092  
3093  /**
3094   * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3095   * @dev: physical device of SPI controller
3096   * @size: how much zeroed driver-private data to allocate
3097   * @slave: whether to allocate an SPI master (false) or SPI slave (true)
3098   * Context: can sleep
3099   *
3100   * Allocate an SPI controller and automatically release a reference on it
3101   * when @dev is unbound from its driver.  Drivers are thus relieved from
3102   * having to call spi_controller_put().
3103   *
3104   * The arguments to this function are identical to __spi_alloc_controller().
3105   *
3106   * Return: the SPI controller structure on success, else NULL.
3107   */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool slave)3108  struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3109  						   unsigned int size,
3110  						   bool slave)
3111  {
3112  	struct spi_controller **ptr, *ctlr;
3113  
3114  	ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3115  			   GFP_KERNEL);
3116  	if (!ptr)
3117  		return NULL;
3118  
3119  	ctlr = __spi_alloc_controller(dev, size, slave);
3120  	if (ctlr) {
3121  		ctlr->devm_allocated = true;
3122  		*ptr = ctlr;
3123  		devres_add(dev, ptr);
3124  	} else {
3125  		devres_free(ptr);
3126  	}
3127  
3128  	return ctlr;
3129  }
3130  EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3131  
3132  /**
3133   * spi_get_gpio_descs() - grab chip select GPIOs for the master
3134   * @ctlr: The SPI master to grab GPIO descriptors for
3135   */
spi_get_gpio_descs(struct spi_controller * ctlr)3136  static int spi_get_gpio_descs(struct spi_controller *ctlr)
3137  {
3138  	int nb, i;
3139  	struct gpio_desc **cs;
3140  	struct device *dev = &ctlr->dev;
3141  	unsigned long native_cs_mask = 0;
3142  	unsigned int num_cs_gpios = 0;
3143  
3144  	nb = gpiod_count(dev, "cs");
3145  	if (nb < 0) {
3146  		/* No GPIOs at all is fine, else return the error */
3147  		if (nb == -ENOENT)
3148  			return 0;
3149  		return nb;
3150  	}
3151  
3152  	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3153  
3154  	cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3155  			  GFP_KERNEL);
3156  	if (!cs)
3157  		return -ENOMEM;
3158  	ctlr->cs_gpiods = cs;
3159  
3160  	for (i = 0; i < nb; i++) {
3161  		/*
3162  		 * Most chipselects are active low, the inverted
3163  		 * semantics are handled by special quirks in gpiolib,
3164  		 * so initializing them GPIOD_OUT_LOW here means
3165  		 * "unasserted", in most cases this will drive the physical
3166  		 * line high.
3167  		 */
3168  		cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3169  						      GPIOD_OUT_LOW);
3170  		if (IS_ERR(cs[i]))
3171  			return PTR_ERR(cs[i]);
3172  
3173  		if (cs[i]) {
3174  			/*
3175  			 * If we find a CS GPIO, name it after the device and
3176  			 * chip select line.
3177  			 */
3178  			char *gpioname;
3179  
3180  			gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3181  						  dev_name(dev), i);
3182  			if (!gpioname)
3183  				return -ENOMEM;
3184  			gpiod_set_consumer_name(cs[i], gpioname);
3185  			num_cs_gpios++;
3186  			continue;
3187  		}
3188  
3189  		if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3190  			dev_err(dev, "Invalid native chip select %d\n", i);
3191  			return -EINVAL;
3192  		}
3193  		native_cs_mask |= BIT(i);
3194  	}
3195  
3196  	ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3197  
3198  	if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3199  	    ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3200  		dev_err(dev, "No unused native chip select available\n");
3201  		return -EINVAL;
3202  	}
3203  
3204  	return 0;
3205  }
3206  
spi_controller_check_ops(struct spi_controller * ctlr)3207  static int spi_controller_check_ops(struct spi_controller *ctlr)
3208  {
3209  	/*
3210  	 * The controller may implement only the high-level SPI-memory like
3211  	 * operations if it does not support regular SPI transfers, and this is
3212  	 * valid use case.
3213  	 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3214  	 * one of the ->transfer_xxx() method be implemented.
3215  	 */
3216  	if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3217  		if (!ctlr->transfer && !ctlr->transfer_one &&
3218  		   !ctlr->transfer_one_message) {
3219  			return -EINVAL;
3220  		}
3221  	}
3222  
3223  	return 0;
3224  }
3225  
3226  /* Allocate dynamic bus number using Linux idr */
spi_controller_id_alloc(struct spi_controller * ctlr,int start,int end)3227  static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3228  {
3229  	int id;
3230  
3231  	mutex_lock(&board_lock);
3232  	id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3233  	mutex_unlock(&board_lock);
3234  	if (WARN(id < 0, "couldn't get idr"))
3235  		return id == -ENOSPC ? -EBUSY : id;
3236  	ctlr->bus_num = id;
3237  	return 0;
3238  }
3239  
3240  /**
3241   * spi_register_controller - register SPI master or slave controller
3242   * @ctlr: initialized master, originally from spi_alloc_master() or
3243   *	spi_alloc_slave()
3244   * Context: can sleep
3245   *
3246   * SPI controllers connect to their drivers using some non-SPI bus,
3247   * such as the platform bus.  The final stage of probe() in that code
3248   * includes calling spi_register_controller() to hook up to this SPI bus glue.
3249   *
3250   * SPI controllers use board specific (often SOC specific) bus numbers,
3251   * and board-specific addressing for SPI devices combines those numbers
3252   * with chip select numbers.  Since SPI does not directly support dynamic
3253   * device identification, boards need configuration tables telling which
3254   * chip is at which address.
3255   *
3256   * This must be called from context that can sleep.  It returns zero on
3257   * success, else a negative error code (dropping the controller's refcount).
3258   * After a successful return, the caller is responsible for calling
3259   * spi_unregister_controller().
3260   *
3261   * Return: zero on success, else a negative error code.
3262   */
spi_register_controller(struct spi_controller * ctlr)3263  int spi_register_controller(struct spi_controller *ctlr)
3264  {
3265  	struct device		*dev = ctlr->dev.parent;
3266  	struct boardinfo	*bi;
3267  	int			first_dynamic;
3268  	int			status;
3269  	int			idx;
3270  
3271  	if (!dev)
3272  		return -ENODEV;
3273  
3274  	/*
3275  	 * Make sure all necessary hooks are implemented before registering
3276  	 * the SPI controller.
3277  	 */
3278  	status = spi_controller_check_ops(ctlr);
3279  	if (status)
3280  		return status;
3281  
3282  	if (ctlr->bus_num < 0)
3283  		ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3284  	if (ctlr->bus_num >= 0) {
3285  		/* Devices with a fixed bus num must check-in with the num */
3286  		status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3287  		if (status)
3288  			return status;
3289  	}
3290  	if (ctlr->bus_num < 0) {
3291  		first_dynamic = of_alias_get_highest_id("spi");
3292  		if (first_dynamic < 0)
3293  			first_dynamic = 0;
3294  		else
3295  			first_dynamic++;
3296  
3297  		status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3298  		if (status)
3299  			return status;
3300  	}
3301  	ctlr->bus_lock_flag = 0;
3302  	init_completion(&ctlr->xfer_completion);
3303  	init_completion(&ctlr->cur_msg_completion);
3304  	if (!ctlr->max_dma_len)
3305  		ctlr->max_dma_len = INT_MAX;
3306  
3307  	/*
3308  	 * Register the device, then userspace will see it.
3309  	 * Registration fails if the bus ID is in use.
3310  	 */
3311  	dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3312  
3313  	if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
3314  		status = spi_get_gpio_descs(ctlr);
3315  		if (status)
3316  			goto free_bus_id;
3317  		/*
3318  		 * A controller using GPIO descriptors always
3319  		 * supports SPI_CS_HIGH if need be.
3320  		 */
3321  		ctlr->mode_bits |= SPI_CS_HIGH;
3322  	}
3323  
3324  	/*
3325  	 * Even if it's just one always-selected device, there must
3326  	 * be at least one chipselect.
3327  	 */
3328  	if (!ctlr->num_chipselect) {
3329  		status = -EINVAL;
3330  		goto free_bus_id;
3331  	}
3332  
3333  	/* Setting last_cs to SPI_INVALID_CS means no chip selected */
3334  	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
3335  		ctlr->last_cs[idx] = SPI_INVALID_CS;
3336  
3337  	status = device_add(&ctlr->dev);
3338  	if (status < 0)
3339  		goto free_bus_id;
3340  	dev_dbg(dev, "registered %s %s\n",
3341  			spi_controller_is_target(ctlr) ? "target" : "host",
3342  			dev_name(&ctlr->dev));
3343  
3344  	/*
3345  	 * If we're using a queued driver, start the queue. Note that we don't
3346  	 * need the queueing logic if the driver is only supporting high-level
3347  	 * memory operations.
3348  	 */
3349  	if (ctlr->transfer) {
3350  		dev_info(dev, "controller is unqueued, this is deprecated\n");
3351  	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3352  		status = spi_controller_initialize_queue(ctlr);
3353  		if (status) {
3354  			device_del(&ctlr->dev);
3355  			goto free_bus_id;
3356  		}
3357  	}
3358  	/* Add statistics */
3359  	ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3360  	if (!ctlr->pcpu_statistics) {
3361  		dev_err(dev, "Error allocating per-cpu statistics\n");
3362  		status = -ENOMEM;
3363  		goto destroy_queue;
3364  	}
3365  
3366  	mutex_lock(&board_lock);
3367  	list_add_tail(&ctlr->list, &spi_controller_list);
3368  	list_for_each_entry(bi, &board_list, list)
3369  		spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3370  	mutex_unlock(&board_lock);
3371  
3372  	/* Register devices from the device tree and ACPI */
3373  	of_register_spi_devices(ctlr);
3374  	acpi_register_spi_devices(ctlr);
3375  	return status;
3376  
3377  destroy_queue:
3378  	spi_destroy_queue(ctlr);
3379  free_bus_id:
3380  	mutex_lock(&board_lock);
3381  	idr_remove(&spi_master_idr, ctlr->bus_num);
3382  	mutex_unlock(&board_lock);
3383  	return status;
3384  }
3385  EXPORT_SYMBOL_GPL(spi_register_controller);
3386  
devm_spi_unregister(struct device * dev,void * res)3387  static void devm_spi_unregister(struct device *dev, void *res)
3388  {
3389  	spi_unregister_controller(*(struct spi_controller **)res);
3390  }
3391  
3392  /**
3393   * devm_spi_register_controller - register managed SPI master or slave
3394   *	controller
3395   * @dev:    device managing SPI controller
3396   * @ctlr: initialized controller, originally from spi_alloc_master() or
3397   *	spi_alloc_slave()
3398   * Context: can sleep
3399   *
3400   * Register a SPI device as with spi_register_controller() which will
3401   * automatically be unregistered and freed.
3402   *
3403   * Return: zero on success, else a negative error code.
3404   */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)3405  int devm_spi_register_controller(struct device *dev,
3406  				 struct spi_controller *ctlr)
3407  {
3408  	struct spi_controller **ptr;
3409  	int ret;
3410  
3411  	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3412  	if (!ptr)
3413  		return -ENOMEM;
3414  
3415  	ret = spi_register_controller(ctlr);
3416  	if (!ret) {
3417  		*ptr = ctlr;
3418  		devres_add(dev, ptr);
3419  	} else {
3420  		devres_free(ptr);
3421  	}
3422  
3423  	return ret;
3424  }
3425  EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3426  
__unregister(struct device * dev,void * null)3427  static int __unregister(struct device *dev, void *null)
3428  {
3429  	spi_unregister_device(to_spi_device(dev));
3430  	return 0;
3431  }
3432  
3433  /**
3434   * spi_unregister_controller - unregister SPI master or slave controller
3435   * @ctlr: the controller being unregistered
3436   * Context: can sleep
3437   *
3438   * This call is used only by SPI controller drivers, which are the
3439   * only ones directly touching chip registers.
3440   *
3441   * This must be called from context that can sleep.
3442   *
3443   * Note that this function also drops a reference to the controller.
3444   */
spi_unregister_controller(struct spi_controller * ctlr)3445  void spi_unregister_controller(struct spi_controller *ctlr)
3446  {
3447  	struct spi_controller *found;
3448  	int id = ctlr->bus_num;
3449  
3450  	/* Prevent addition of new devices, unregister existing ones */
3451  	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3452  		mutex_lock(&ctlr->add_lock);
3453  
3454  	device_for_each_child(&ctlr->dev, NULL, __unregister);
3455  
3456  	/* First make sure that this controller was ever added */
3457  	mutex_lock(&board_lock);
3458  	found = idr_find(&spi_master_idr, id);
3459  	mutex_unlock(&board_lock);
3460  	if (ctlr->queued) {
3461  		if (spi_destroy_queue(ctlr))
3462  			dev_err(&ctlr->dev, "queue remove failed\n");
3463  	}
3464  	mutex_lock(&board_lock);
3465  	list_del(&ctlr->list);
3466  	mutex_unlock(&board_lock);
3467  
3468  	device_del(&ctlr->dev);
3469  
3470  	/* Free bus id */
3471  	mutex_lock(&board_lock);
3472  	if (found == ctlr)
3473  		idr_remove(&spi_master_idr, id);
3474  	mutex_unlock(&board_lock);
3475  
3476  	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3477  		mutex_unlock(&ctlr->add_lock);
3478  
3479  	/*
3480  	 * Release the last reference on the controller if its driver
3481  	 * has not yet been converted to devm_spi_alloc_master/slave().
3482  	 */
3483  	if (!ctlr->devm_allocated)
3484  		put_device(&ctlr->dev);
3485  }
3486  EXPORT_SYMBOL_GPL(spi_unregister_controller);
3487  
__spi_check_suspended(const struct spi_controller * ctlr)3488  static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3489  {
3490  	return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3491  }
3492  
__spi_mark_suspended(struct spi_controller * ctlr)3493  static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3494  {
3495  	mutex_lock(&ctlr->bus_lock_mutex);
3496  	ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3497  	mutex_unlock(&ctlr->bus_lock_mutex);
3498  }
3499  
__spi_mark_resumed(struct spi_controller * ctlr)3500  static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3501  {
3502  	mutex_lock(&ctlr->bus_lock_mutex);
3503  	ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3504  	mutex_unlock(&ctlr->bus_lock_mutex);
3505  }
3506  
spi_controller_suspend(struct spi_controller * ctlr)3507  int spi_controller_suspend(struct spi_controller *ctlr)
3508  {
3509  	int ret = 0;
3510  
3511  	/* Basically no-ops for non-queued controllers */
3512  	if (ctlr->queued) {
3513  		ret = spi_stop_queue(ctlr);
3514  		if (ret)
3515  			dev_err(&ctlr->dev, "queue stop failed\n");
3516  	}
3517  
3518  	__spi_mark_suspended(ctlr);
3519  	return ret;
3520  }
3521  EXPORT_SYMBOL_GPL(spi_controller_suspend);
3522  
spi_controller_resume(struct spi_controller * ctlr)3523  int spi_controller_resume(struct spi_controller *ctlr)
3524  {
3525  	int ret = 0;
3526  
3527  	__spi_mark_resumed(ctlr);
3528  
3529  	if (ctlr->queued) {
3530  		ret = spi_start_queue(ctlr);
3531  		if (ret)
3532  			dev_err(&ctlr->dev, "queue restart failed\n");
3533  	}
3534  	return ret;
3535  }
3536  EXPORT_SYMBOL_GPL(spi_controller_resume);
3537  
3538  /*-------------------------------------------------------------------------*/
3539  
3540  /* Core methods for spi_message alterations */
3541  
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3542  static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3543  					    struct spi_message *msg,
3544  					    void *res)
3545  {
3546  	struct spi_replaced_transfers *rxfer = res;
3547  	size_t i;
3548  
3549  	/* Call extra callback if requested */
3550  	if (rxfer->release)
3551  		rxfer->release(ctlr, msg, res);
3552  
3553  	/* Insert replaced transfers back into the message */
3554  	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3555  
3556  	/* Remove the formerly inserted entries */
3557  	for (i = 0; i < rxfer->inserted; i++)
3558  		list_del(&rxfer->inserted_transfers[i].transfer_list);
3559  }
3560  
3561  /**
3562   * spi_replace_transfers - replace transfers with several transfers
3563   *                         and register change with spi_message.resources
3564   * @msg:           the spi_message we work upon
3565   * @xfer_first:    the first spi_transfer we want to replace
3566   * @remove:        number of transfers to remove
3567   * @insert:        the number of transfers we want to insert instead
3568   * @release:       extra release code necessary in some circumstances
3569   * @extradatasize: extra data to allocate (with alignment guarantees
3570   *                 of struct @spi_transfer)
3571   * @gfp:           gfp flags
3572   *
3573   * Returns: pointer to @spi_replaced_transfers,
3574   *          PTR_ERR(...) in case of errors.
3575   */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3576  static struct spi_replaced_transfers *spi_replace_transfers(
3577  	struct spi_message *msg,
3578  	struct spi_transfer *xfer_first,
3579  	size_t remove,
3580  	size_t insert,
3581  	spi_replaced_release_t release,
3582  	size_t extradatasize,
3583  	gfp_t gfp)
3584  {
3585  	struct spi_replaced_transfers *rxfer;
3586  	struct spi_transfer *xfer;
3587  	size_t i;
3588  
3589  	/* Allocate the structure using spi_res */
3590  	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3591  			      struct_size(rxfer, inserted_transfers, insert)
3592  			      + extradatasize,
3593  			      gfp);
3594  	if (!rxfer)
3595  		return ERR_PTR(-ENOMEM);
3596  
3597  	/* The release code to invoke before running the generic release */
3598  	rxfer->release = release;
3599  
3600  	/* Assign extradata */
3601  	if (extradatasize)
3602  		rxfer->extradata =
3603  			&rxfer->inserted_transfers[insert];
3604  
3605  	/* Init the replaced_transfers list */
3606  	INIT_LIST_HEAD(&rxfer->replaced_transfers);
3607  
3608  	/*
3609  	 * Assign the list_entry after which we should reinsert
3610  	 * the @replaced_transfers - it may be spi_message.messages!
3611  	 */
3612  	rxfer->replaced_after = xfer_first->transfer_list.prev;
3613  
3614  	/* Remove the requested number of transfers */
3615  	for (i = 0; i < remove; i++) {
3616  		/*
3617  		 * If the entry after replaced_after it is msg->transfers
3618  		 * then we have been requested to remove more transfers
3619  		 * than are in the list.
3620  		 */
3621  		if (rxfer->replaced_after->next == &msg->transfers) {
3622  			dev_err(&msg->spi->dev,
3623  				"requested to remove more spi_transfers than are available\n");
3624  			/* Insert replaced transfers back into the message */
3625  			list_splice(&rxfer->replaced_transfers,
3626  				    rxfer->replaced_after);
3627  
3628  			/* Free the spi_replace_transfer structure... */
3629  			spi_res_free(rxfer);
3630  
3631  			/* ...and return with an error */
3632  			return ERR_PTR(-EINVAL);
3633  		}
3634  
3635  		/*
3636  		 * Remove the entry after replaced_after from list of
3637  		 * transfers and add it to list of replaced_transfers.
3638  		 */
3639  		list_move_tail(rxfer->replaced_after->next,
3640  			       &rxfer->replaced_transfers);
3641  	}
3642  
3643  	/*
3644  	 * Create copy of the given xfer with identical settings
3645  	 * based on the first transfer to get removed.
3646  	 */
3647  	for (i = 0; i < insert; i++) {
3648  		/* We need to run in reverse order */
3649  		xfer = &rxfer->inserted_transfers[insert - 1 - i];
3650  
3651  		/* Copy all spi_transfer data */
3652  		memcpy(xfer, xfer_first, sizeof(*xfer));
3653  
3654  		/* Add to list */
3655  		list_add(&xfer->transfer_list, rxfer->replaced_after);
3656  
3657  		/* Clear cs_change and delay for all but the last */
3658  		if (i) {
3659  			xfer->cs_change = false;
3660  			xfer->delay.value = 0;
3661  		}
3662  	}
3663  
3664  	/* Set up inserted... */
3665  	rxfer->inserted = insert;
3666  
3667  	/* ...and register it with spi_res/spi_message */
3668  	spi_res_add(msg, rxfer);
3669  
3670  	return rxfer;
3671  }
3672  
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize)3673  static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3674  					struct spi_message *msg,
3675  					struct spi_transfer **xferp,
3676  					size_t maxsize)
3677  {
3678  	struct spi_transfer *xfer = *xferp, *xfers;
3679  	struct spi_replaced_transfers *srt;
3680  	size_t offset;
3681  	size_t count, i;
3682  
3683  	/* Calculate how many we have to replace */
3684  	count = DIV_ROUND_UP(xfer->len, maxsize);
3685  
3686  	/* Create replacement */
3687  	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
3688  	if (IS_ERR(srt))
3689  		return PTR_ERR(srt);
3690  	xfers = srt->inserted_transfers;
3691  
3692  	/*
3693  	 * Now handle each of those newly inserted spi_transfers.
3694  	 * Note that the replacements spi_transfers all are preset
3695  	 * to the same values as *xferp, so tx_buf, rx_buf and len
3696  	 * are all identical (as well as most others)
3697  	 * so we just have to fix up len and the pointers.
3698  	 */
3699  
3700  	/*
3701  	 * The first transfer just needs the length modified, so we
3702  	 * run it outside the loop.
3703  	 */
3704  	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3705  
3706  	/* All the others need rx_buf/tx_buf also set */
3707  	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3708  		/* Update rx_buf, tx_buf and DMA */
3709  		if (xfers[i].rx_buf)
3710  			xfers[i].rx_buf += offset;
3711  		if (xfers[i].tx_buf)
3712  			xfers[i].tx_buf += offset;
3713  
3714  		/* Update length */
3715  		xfers[i].len = min(maxsize, xfers[i].len - offset);
3716  	}
3717  
3718  	/*
3719  	 * We set up xferp to the last entry we have inserted,
3720  	 * so that we skip those already split transfers.
3721  	 */
3722  	*xferp = &xfers[count - 1];
3723  
3724  	/* Increment statistics counters */
3725  	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3726  				       transfers_split_maxsize);
3727  	SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3728  				       transfers_split_maxsize);
3729  
3730  	return 0;
3731  }
3732  
3733  /**
3734   * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3735   *                               when an individual transfer exceeds a
3736   *                               certain size
3737   * @ctlr:    the @spi_controller for this transfer
3738   * @msg:   the @spi_message to transform
3739   * @maxsize:  the maximum when to apply this
3740   *
3741   * This function allocates resources that are automatically freed during the
3742   * spi message unoptimize phase so this function should only be called from
3743   * optimize_message callbacks.
3744   *
3745   * Return: status of transformation
3746   */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize)3747  int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3748  				struct spi_message *msg,
3749  				size_t maxsize)
3750  {
3751  	struct spi_transfer *xfer;
3752  	int ret;
3753  
3754  	/*
3755  	 * Iterate over the transfer_list,
3756  	 * but note that xfer is advanced to the last transfer inserted
3757  	 * to avoid checking sizes again unnecessarily (also xfer does
3758  	 * potentially belong to a different list by the time the
3759  	 * replacement has happened).
3760  	 */
3761  	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3762  		if (xfer->len > maxsize) {
3763  			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3764  							   maxsize);
3765  			if (ret)
3766  				return ret;
3767  		}
3768  	}
3769  
3770  	return 0;
3771  }
3772  EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3773  
3774  
3775  /**
3776   * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3777   *                                when an individual transfer exceeds a
3778   *                                certain number of SPI words
3779   * @ctlr:     the @spi_controller for this transfer
3780   * @msg:      the @spi_message to transform
3781   * @maxwords: the number of words to limit each transfer to
3782   *
3783   * This function allocates resources that are automatically freed during the
3784   * spi message unoptimize phase so this function should only be called from
3785   * optimize_message callbacks.
3786   *
3787   * Return: status of transformation
3788   */
spi_split_transfers_maxwords(struct spi_controller * ctlr,struct spi_message * msg,size_t maxwords)3789  int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3790  				 struct spi_message *msg,
3791  				 size_t maxwords)
3792  {
3793  	struct spi_transfer *xfer;
3794  
3795  	/*
3796  	 * Iterate over the transfer_list,
3797  	 * but note that xfer is advanced to the last transfer inserted
3798  	 * to avoid checking sizes again unnecessarily (also xfer does
3799  	 * potentially belong to a different list by the time the
3800  	 * replacement has happened).
3801  	 */
3802  	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3803  		size_t maxsize;
3804  		int ret;
3805  
3806  		maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3807  		if (xfer->len > maxsize) {
3808  			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3809  							   maxsize);
3810  			if (ret)
3811  				return ret;
3812  		}
3813  	}
3814  
3815  	return 0;
3816  }
3817  EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3818  
3819  /*-------------------------------------------------------------------------*/
3820  
3821  /*
3822   * Core methods for SPI controller protocol drivers. Some of the
3823   * other core methods are currently defined as inline functions.
3824   */
3825  
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3826  static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3827  					u8 bits_per_word)
3828  {
3829  	if (ctlr->bits_per_word_mask) {
3830  		/* Only 32 bits fit in the mask */
3831  		if (bits_per_word > 32)
3832  			return -EINVAL;
3833  		if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3834  			return -EINVAL;
3835  	}
3836  
3837  	return 0;
3838  }
3839  
3840  /**
3841   * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3842   * @spi: the device that requires specific CS timing configuration
3843   *
3844   * Return: zero on success, else a negative error code.
3845   */
spi_set_cs_timing(struct spi_device * spi)3846  static int spi_set_cs_timing(struct spi_device *spi)
3847  {
3848  	struct device *parent = spi->controller->dev.parent;
3849  	int status = 0;
3850  
3851  	if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3852  		if (spi->controller->auto_runtime_pm) {
3853  			status = pm_runtime_get_sync(parent);
3854  			if (status < 0) {
3855  				pm_runtime_put_noidle(parent);
3856  				dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3857  					status);
3858  				return status;
3859  			}
3860  
3861  			status = spi->controller->set_cs_timing(spi);
3862  			pm_runtime_mark_last_busy(parent);
3863  			pm_runtime_put_autosuspend(parent);
3864  		} else {
3865  			status = spi->controller->set_cs_timing(spi);
3866  		}
3867  	}
3868  	return status;
3869  }
3870  
3871  /**
3872   * spi_setup - setup SPI mode and clock rate
3873   * @spi: the device whose settings are being modified
3874   * Context: can sleep, and no requests are queued to the device
3875   *
3876   * SPI protocol drivers may need to update the transfer mode if the
3877   * device doesn't work with its default.  They may likewise need
3878   * to update clock rates or word sizes from initial values.  This function
3879   * changes those settings, and must be called from a context that can sleep.
3880   * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3881   * effect the next time the device is selected and data is transferred to
3882   * or from it.  When this function returns, the SPI device is deselected.
3883   *
3884   * Note that this call will fail if the protocol driver specifies an option
3885   * that the underlying controller or its driver does not support.  For
3886   * example, not all hardware supports wire transfers using nine bit words,
3887   * LSB-first wire encoding, or active-high chipselects.
3888   *
3889   * Return: zero on success, else a negative error code.
3890   */
spi_setup(struct spi_device * spi)3891  int spi_setup(struct spi_device *spi)
3892  {
3893  	unsigned	bad_bits, ugly_bits;
3894  	int		status;
3895  
3896  	/*
3897  	 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3898  	 * are set at the same time.
3899  	 */
3900  	if ((hweight_long(spi->mode &
3901  		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3902  	    (hweight_long(spi->mode &
3903  		(SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3904  		dev_err(&spi->dev,
3905  		"setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3906  		return -EINVAL;
3907  	}
3908  	/* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3909  	if ((spi->mode & SPI_3WIRE) && (spi->mode &
3910  		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3911  		 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3912  		return -EINVAL;
3913  	/* Check against conflicting MOSI idle configuration */
3914  	if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
3915  		dev_err(&spi->dev,
3916  			"setup: MOSI configured to idle low and high at the same time.\n");
3917  		return -EINVAL;
3918  	}
3919  	/*
3920  	 * Help drivers fail *cleanly* when they need options
3921  	 * that aren't supported with their current controller.
3922  	 * SPI_CS_WORD has a fallback software implementation,
3923  	 * so it is ignored here.
3924  	 */
3925  	bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3926  				 SPI_NO_TX | SPI_NO_RX);
3927  	ugly_bits = bad_bits &
3928  		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3929  		     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3930  	if (ugly_bits) {
3931  		dev_warn(&spi->dev,
3932  			 "setup: ignoring unsupported mode bits %x\n",
3933  			 ugly_bits);
3934  		spi->mode &= ~ugly_bits;
3935  		bad_bits &= ~ugly_bits;
3936  	}
3937  	if (bad_bits) {
3938  		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3939  			bad_bits);
3940  		return -EINVAL;
3941  	}
3942  
3943  	if (!spi->bits_per_word) {
3944  		spi->bits_per_word = 8;
3945  	} else {
3946  		/*
3947  		 * Some controllers may not support the default 8 bits-per-word
3948  		 * so only perform the check when this is explicitly provided.
3949  		 */
3950  		status = __spi_validate_bits_per_word(spi->controller,
3951  						      spi->bits_per_word);
3952  		if (status)
3953  			return status;
3954  	}
3955  
3956  	if (spi->controller->max_speed_hz &&
3957  	    (!spi->max_speed_hz ||
3958  	     spi->max_speed_hz > spi->controller->max_speed_hz))
3959  		spi->max_speed_hz = spi->controller->max_speed_hz;
3960  
3961  	mutex_lock(&spi->controller->io_mutex);
3962  
3963  	if (spi->controller->setup) {
3964  		status = spi->controller->setup(spi);
3965  		if (status) {
3966  			mutex_unlock(&spi->controller->io_mutex);
3967  			dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3968  				status);
3969  			return status;
3970  		}
3971  	}
3972  
3973  	status = spi_set_cs_timing(spi);
3974  	if (status) {
3975  		mutex_unlock(&spi->controller->io_mutex);
3976  		return status;
3977  	}
3978  
3979  	if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3980  		status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3981  		if (status < 0) {
3982  			mutex_unlock(&spi->controller->io_mutex);
3983  			dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3984  				status);
3985  			return status;
3986  		}
3987  
3988  		/*
3989  		 * We do not want to return positive value from pm_runtime_get,
3990  		 * there are many instances of devices calling spi_setup() and
3991  		 * checking for a non-zero return value instead of a negative
3992  		 * return value.
3993  		 */
3994  		status = 0;
3995  
3996  		spi_set_cs(spi, false, true);
3997  		pm_runtime_mark_last_busy(spi->controller->dev.parent);
3998  		pm_runtime_put_autosuspend(spi->controller->dev.parent);
3999  	} else {
4000  		spi_set_cs(spi, false, true);
4001  	}
4002  
4003  	mutex_unlock(&spi->controller->io_mutex);
4004  
4005  	if (spi->rt && !spi->controller->rt) {
4006  		spi->controller->rt = true;
4007  		spi_set_thread_rt(spi->controller);
4008  	}
4009  
4010  	trace_spi_setup(spi, status);
4011  
4012  	dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4013  			spi->mode & SPI_MODE_X_MASK,
4014  			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4015  			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4016  			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
4017  			(spi->mode & SPI_LOOP) ? "loopback, " : "",
4018  			spi->bits_per_word, spi->max_speed_hz,
4019  			status);
4020  
4021  	return status;
4022  }
4023  EXPORT_SYMBOL_GPL(spi_setup);
4024  
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)4025  static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4026  				       struct spi_device *spi)
4027  {
4028  	int delay1, delay2;
4029  
4030  	delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4031  	if (delay1 < 0)
4032  		return delay1;
4033  
4034  	delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4035  	if (delay2 < 0)
4036  		return delay2;
4037  
4038  	if (delay1 < delay2)
4039  		memcpy(&xfer->word_delay, &spi->word_delay,
4040  		       sizeof(xfer->word_delay));
4041  
4042  	return 0;
4043  }
4044  
__spi_validate(struct spi_device * spi,struct spi_message * message)4045  static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4046  {
4047  	struct spi_controller *ctlr = spi->controller;
4048  	struct spi_transfer *xfer;
4049  	int w_size;
4050  
4051  	if (list_empty(&message->transfers))
4052  		return -EINVAL;
4053  
4054  	message->spi = spi;
4055  
4056  	/*
4057  	 * Half-duplex links include original MicroWire, and ones with
4058  	 * only one data pin like SPI_3WIRE (switches direction) or where
4059  	 * either MOSI or MISO is missing.  They can also be caused by
4060  	 * software limitations.
4061  	 */
4062  	if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4063  	    (spi->mode & SPI_3WIRE)) {
4064  		unsigned flags = ctlr->flags;
4065  
4066  		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4067  			if (xfer->rx_buf && xfer->tx_buf)
4068  				return -EINVAL;
4069  			if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4070  				return -EINVAL;
4071  			if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4072  				return -EINVAL;
4073  		}
4074  	}
4075  
4076  	/*
4077  	 * Set transfer bits_per_word and max speed as spi device default if
4078  	 * it is not set for this transfer.
4079  	 * Set transfer tx_nbits and rx_nbits as single transfer default
4080  	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4081  	 * Ensure transfer word_delay is at least as long as that required by
4082  	 * device itself.
4083  	 */
4084  	message->frame_length = 0;
4085  	list_for_each_entry(xfer, &message->transfers, transfer_list) {
4086  		xfer->effective_speed_hz = 0;
4087  		message->frame_length += xfer->len;
4088  		if (!xfer->bits_per_word)
4089  			xfer->bits_per_word = spi->bits_per_word;
4090  
4091  		if (!xfer->speed_hz)
4092  			xfer->speed_hz = spi->max_speed_hz;
4093  
4094  		if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4095  			xfer->speed_hz = ctlr->max_speed_hz;
4096  
4097  		if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4098  			return -EINVAL;
4099  
4100  		/*
4101  		 * SPI transfer length should be multiple of SPI word size
4102  		 * where SPI word size should be power-of-two multiple.
4103  		 */
4104  		if (xfer->bits_per_word <= 8)
4105  			w_size = 1;
4106  		else if (xfer->bits_per_word <= 16)
4107  			w_size = 2;
4108  		else
4109  			w_size = 4;
4110  
4111  		/* No partial transfers accepted */
4112  		if (xfer->len % w_size)
4113  			return -EINVAL;
4114  
4115  		if (xfer->speed_hz && ctlr->min_speed_hz &&
4116  		    xfer->speed_hz < ctlr->min_speed_hz)
4117  			return -EINVAL;
4118  
4119  		if (xfer->tx_buf && !xfer->tx_nbits)
4120  			xfer->tx_nbits = SPI_NBITS_SINGLE;
4121  		if (xfer->rx_buf && !xfer->rx_nbits)
4122  			xfer->rx_nbits = SPI_NBITS_SINGLE;
4123  		/*
4124  		 * Check transfer tx/rx_nbits:
4125  		 * 1. check the value matches one of single, dual and quad
4126  		 * 2. check tx/rx_nbits match the mode in spi_device
4127  		 */
4128  		if (xfer->tx_buf) {
4129  			if (spi->mode & SPI_NO_TX)
4130  				return -EINVAL;
4131  			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4132  				xfer->tx_nbits != SPI_NBITS_DUAL &&
4133  				xfer->tx_nbits != SPI_NBITS_QUAD &&
4134  				xfer->tx_nbits != SPI_NBITS_OCTAL)
4135  				return -EINVAL;
4136  			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4137  				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4138  				return -EINVAL;
4139  			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4140  				!(spi->mode & SPI_TX_QUAD))
4141  				return -EINVAL;
4142  		}
4143  		/* Check transfer rx_nbits */
4144  		if (xfer->rx_buf) {
4145  			if (spi->mode & SPI_NO_RX)
4146  				return -EINVAL;
4147  			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4148  				xfer->rx_nbits != SPI_NBITS_DUAL &&
4149  				xfer->rx_nbits != SPI_NBITS_QUAD &&
4150  				xfer->rx_nbits != SPI_NBITS_OCTAL)
4151  				return -EINVAL;
4152  			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4153  				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4154  				return -EINVAL;
4155  			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4156  				!(spi->mode & SPI_RX_QUAD))
4157  				return -EINVAL;
4158  		}
4159  
4160  		if (_spi_xfer_word_delay_update(xfer, spi))
4161  			return -EINVAL;
4162  	}
4163  
4164  	message->status = -EINPROGRESS;
4165  
4166  	return 0;
4167  }
4168  
4169  /*
4170   * spi_split_transfers - generic handling of transfer splitting
4171   * @msg: the message to split
4172   *
4173   * Under certain conditions, a SPI controller may not support arbitrary
4174   * transfer sizes or other features required by a peripheral. This function
4175   * will split the transfers in the message into smaller transfers that are
4176   * supported by the controller.
4177   *
4178   * Controllers with special requirements not covered here can also split
4179   * transfers in the optimize_message() callback.
4180   *
4181   * Context: can sleep
4182   * Return: zero on success, else a negative error code
4183   */
spi_split_transfers(struct spi_message * msg)4184  static int spi_split_transfers(struct spi_message *msg)
4185  {
4186  	struct spi_controller *ctlr = msg->spi->controller;
4187  	struct spi_transfer *xfer;
4188  	int ret;
4189  
4190  	/*
4191  	 * If an SPI controller does not support toggling the CS line on each
4192  	 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4193  	 * for the CS line, we can emulate the CS-per-word hardware function by
4194  	 * splitting transfers into one-word transfers and ensuring that
4195  	 * cs_change is set for each transfer.
4196  	 */
4197  	if ((msg->spi->mode & SPI_CS_WORD) &&
4198  	    (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4199  		ret = spi_split_transfers_maxwords(ctlr, msg, 1);
4200  		if (ret)
4201  			return ret;
4202  
4203  		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4204  			/* Don't change cs_change on the last entry in the list */
4205  			if (list_is_last(&xfer->transfer_list, &msg->transfers))
4206  				break;
4207  
4208  			xfer->cs_change = 1;
4209  		}
4210  	} else {
4211  		ret = spi_split_transfers_maxsize(ctlr, msg,
4212  						  spi_max_transfer_size(msg->spi));
4213  		if (ret)
4214  			return ret;
4215  	}
4216  
4217  	return 0;
4218  }
4219  
4220  /*
4221   * __spi_optimize_message - shared implementation for spi_optimize_message()
4222   *                          and spi_maybe_optimize_message()
4223   * @spi: the device that will be used for the message
4224   * @msg: the message to optimize
4225   *
4226   * Peripheral drivers will call spi_optimize_message() and the spi core will
4227   * call spi_maybe_optimize_message() instead of calling this directly.
4228   *
4229   * It is not valid to call this on a message that has already been optimized.
4230   *
4231   * Return: zero on success, else a negative error code
4232   */
__spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4233  static int __spi_optimize_message(struct spi_device *spi,
4234  				  struct spi_message *msg)
4235  {
4236  	struct spi_controller *ctlr = spi->controller;
4237  	int ret;
4238  
4239  	ret = __spi_validate(spi, msg);
4240  	if (ret)
4241  		return ret;
4242  
4243  	ret = spi_split_transfers(msg);
4244  	if (ret)
4245  		return ret;
4246  
4247  	if (ctlr->optimize_message) {
4248  		ret = ctlr->optimize_message(msg);
4249  		if (ret) {
4250  			spi_res_release(ctlr, msg);
4251  			return ret;
4252  		}
4253  	}
4254  
4255  	msg->optimized = true;
4256  
4257  	return 0;
4258  }
4259  
4260  /*
4261   * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4262   * @spi: the device that will be used for the message
4263   * @msg: the message to optimize
4264   * Return: zero on success, else a negative error code
4265   */
spi_maybe_optimize_message(struct spi_device * spi,struct spi_message * msg)4266  static int spi_maybe_optimize_message(struct spi_device *spi,
4267  				      struct spi_message *msg)
4268  {
4269  	if (spi->controller->defer_optimize_message) {
4270  		msg->spi = spi;
4271  		return 0;
4272  	}
4273  
4274  	if (msg->pre_optimized)
4275  		return 0;
4276  
4277  	return __spi_optimize_message(spi, msg);
4278  }
4279  
4280  /**
4281   * spi_optimize_message - do any one-time validation and setup for a SPI message
4282   * @spi: the device that will be used for the message
4283   * @msg: the message to optimize
4284   *
4285   * Peripheral drivers that reuse the same message repeatedly may call this to
4286   * perform as much message prep as possible once, rather than repeating it each
4287   * time a message transfer is performed to improve throughput and reduce CPU
4288   * usage.
4289   *
4290   * Once a message has been optimized, it cannot be modified with the exception
4291   * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4292   * only the data in the memory it points to).
4293   *
4294   * Calls to this function must be balanced with calls to spi_unoptimize_message()
4295   * to avoid leaking resources.
4296   *
4297   * Context: can sleep
4298   * Return: zero on success, else a negative error code
4299   */
spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4300  int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4301  {
4302  	int ret;
4303  
4304  	/*
4305  	 * Pre-optimization is not supported and optimization is deferred e.g.
4306  	 * when using spi-mux.
4307  	 */
4308  	if (spi->controller->defer_optimize_message)
4309  		return 0;
4310  
4311  	ret = __spi_optimize_message(spi, msg);
4312  	if (ret)
4313  		return ret;
4314  
4315  	/*
4316  	 * This flag indicates that the peripheral driver called spi_optimize_message()
4317  	 * and therefore we shouldn't unoptimize message automatically when finalizing
4318  	 * the message but rather wait until spi_unoptimize_message() is called
4319  	 * by the peripheral driver.
4320  	 */
4321  	msg->pre_optimized = true;
4322  
4323  	return 0;
4324  }
4325  EXPORT_SYMBOL_GPL(spi_optimize_message);
4326  
4327  /**
4328   * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4329   * @msg: the message to unoptimize
4330   *
4331   * Calls to this function must be balanced with calls to spi_optimize_message().
4332   *
4333   * Context: can sleep
4334   */
spi_unoptimize_message(struct spi_message * msg)4335  void spi_unoptimize_message(struct spi_message *msg)
4336  {
4337  	if (msg->spi->controller->defer_optimize_message)
4338  		return;
4339  
4340  	__spi_unoptimize_message(msg);
4341  	msg->pre_optimized = false;
4342  }
4343  EXPORT_SYMBOL_GPL(spi_unoptimize_message);
4344  
__spi_async(struct spi_device * spi,struct spi_message * message)4345  static int __spi_async(struct spi_device *spi, struct spi_message *message)
4346  {
4347  	struct spi_controller *ctlr = spi->controller;
4348  	struct spi_transfer *xfer;
4349  
4350  	/*
4351  	 * Some controllers do not support doing regular SPI transfers. Return
4352  	 * ENOTSUPP when this is the case.
4353  	 */
4354  	if (!ctlr->transfer)
4355  		return -ENOTSUPP;
4356  
4357  	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4358  	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4359  
4360  	trace_spi_message_submit(message);
4361  
4362  	if (!ctlr->ptp_sts_supported) {
4363  		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4364  			xfer->ptp_sts_word_pre = 0;
4365  			ptp_read_system_prets(xfer->ptp_sts);
4366  		}
4367  	}
4368  
4369  	return ctlr->transfer(spi, message);
4370  }
4371  
devm_spi_unoptimize_message(void * msg)4372  static void devm_spi_unoptimize_message(void *msg)
4373  {
4374  	spi_unoptimize_message(msg);
4375  }
4376  
4377  /**
4378   * devm_spi_optimize_message - managed version of spi_optimize_message()
4379   * @dev: the device that manages @msg (usually @spi->dev)
4380   * @spi: the device that will be used for the message
4381   * @msg: the message to optimize
4382   * Return: zero on success, else a negative error code
4383   *
4384   * spi_unoptimize_message() will automatically be called when the device is
4385   * removed.
4386   */
devm_spi_optimize_message(struct device * dev,struct spi_device * spi,struct spi_message * msg)4387  int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
4388  			      struct spi_message *msg)
4389  {
4390  	int ret;
4391  
4392  	ret = spi_optimize_message(spi, msg);
4393  	if (ret)
4394  		return ret;
4395  
4396  	return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg);
4397  }
4398  EXPORT_SYMBOL_GPL(devm_spi_optimize_message);
4399  
4400  /**
4401   * spi_async - asynchronous SPI transfer
4402   * @spi: device with which data will be exchanged
4403   * @message: describes the data transfers, including completion callback
4404   * Context: any (IRQs may be blocked, etc)
4405   *
4406   * This call may be used in_irq and other contexts which can't sleep,
4407   * as well as from task contexts which can sleep.
4408   *
4409   * The completion callback is invoked in a context which can't sleep.
4410   * Before that invocation, the value of message->status is undefined.
4411   * When the callback is issued, message->status holds either zero (to
4412   * indicate complete success) or a negative error code.  After that
4413   * callback returns, the driver which issued the transfer request may
4414   * deallocate the associated memory; it's no longer in use by any SPI
4415   * core or controller driver code.
4416   *
4417   * Note that although all messages to a spi_device are handled in
4418   * FIFO order, messages may go to different devices in other orders.
4419   * Some device might be higher priority, or have various "hard" access
4420   * time requirements, for example.
4421   *
4422   * On detection of any fault during the transfer, processing of
4423   * the entire message is aborted, and the device is deselected.
4424   * Until returning from the associated message completion callback,
4425   * no other spi_message queued to that device will be processed.
4426   * (This rule applies equally to all the synchronous transfer calls,
4427   * which are wrappers around this core asynchronous primitive.)
4428   *
4429   * Return: zero on success, else a negative error code.
4430   */
spi_async(struct spi_device * spi,struct spi_message * message)4431  int spi_async(struct spi_device *spi, struct spi_message *message)
4432  {
4433  	struct spi_controller *ctlr = spi->controller;
4434  	int ret;
4435  	unsigned long flags;
4436  
4437  	ret = spi_maybe_optimize_message(spi, message);
4438  	if (ret)
4439  		return ret;
4440  
4441  	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4442  
4443  	if (ctlr->bus_lock_flag)
4444  		ret = -EBUSY;
4445  	else
4446  		ret = __spi_async(spi, message);
4447  
4448  	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4449  
4450  	return ret;
4451  }
4452  EXPORT_SYMBOL_GPL(spi_async);
4453  
__spi_transfer_message_noqueue(struct spi_controller * ctlr,struct spi_message * msg)4454  static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4455  {
4456  	bool was_busy;
4457  	int ret;
4458  
4459  	mutex_lock(&ctlr->io_mutex);
4460  
4461  	was_busy = ctlr->busy;
4462  
4463  	ctlr->cur_msg = msg;
4464  	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4465  	if (ret)
4466  		dev_err(&ctlr->dev, "noqueue transfer failed\n");
4467  	ctlr->cur_msg = NULL;
4468  	ctlr->fallback = false;
4469  
4470  	if (!was_busy) {
4471  		kfree(ctlr->dummy_rx);
4472  		ctlr->dummy_rx = NULL;
4473  		kfree(ctlr->dummy_tx);
4474  		ctlr->dummy_tx = NULL;
4475  		if (ctlr->unprepare_transfer_hardware &&
4476  		    ctlr->unprepare_transfer_hardware(ctlr))
4477  			dev_err(&ctlr->dev,
4478  				"failed to unprepare transfer hardware\n");
4479  		spi_idle_runtime_pm(ctlr);
4480  	}
4481  
4482  	mutex_unlock(&ctlr->io_mutex);
4483  }
4484  
4485  /*-------------------------------------------------------------------------*/
4486  
4487  /*
4488   * Utility methods for SPI protocol drivers, layered on
4489   * top of the core.  Some other utility methods are defined as
4490   * inline functions.
4491   */
4492  
spi_complete(void * arg)4493  static void spi_complete(void *arg)
4494  {
4495  	complete(arg);
4496  }
4497  
__spi_sync(struct spi_device * spi,struct spi_message * message)4498  static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4499  {
4500  	DECLARE_COMPLETION_ONSTACK(done);
4501  	unsigned long flags;
4502  	int status;
4503  	struct spi_controller *ctlr = spi->controller;
4504  
4505  	if (__spi_check_suspended(ctlr)) {
4506  		dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4507  		return -ESHUTDOWN;
4508  	}
4509  
4510  	status = spi_maybe_optimize_message(spi, message);
4511  	if (status)
4512  		return status;
4513  
4514  	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4515  	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4516  
4517  	/*
4518  	 * Checking queue_empty here only guarantees async/sync message
4519  	 * ordering when coming from the same context. It does not need to
4520  	 * guard against reentrancy from a different context. The io_mutex
4521  	 * will catch those cases.
4522  	 */
4523  	if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4524  		message->actual_length = 0;
4525  		message->status = -EINPROGRESS;
4526  
4527  		trace_spi_message_submit(message);
4528  
4529  		SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4530  		SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4531  
4532  		__spi_transfer_message_noqueue(ctlr, message);
4533  
4534  		return message->status;
4535  	}
4536  
4537  	/*
4538  	 * There are messages in the async queue that could have originated
4539  	 * from the same context, so we need to preserve ordering.
4540  	 * Therefor we send the message to the async queue and wait until they
4541  	 * are completed.
4542  	 */
4543  	message->complete = spi_complete;
4544  	message->context = &done;
4545  
4546  	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4547  	status = __spi_async(spi, message);
4548  	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4549  
4550  	if (status == 0) {
4551  		wait_for_completion(&done);
4552  		status = message->status;
4553  	}
4554  	message->complete = NULL;
4555  	message->context = NULL;
4556  
4557  	return status;
4558  }
4559  
4560  /**
4561   * spi_sync - blocking/synchronous SPI data transfers
4562   * @spi: device with which data will be exchanged
4563   * @message: describes the data transfers
4564   * Context: can sleep
4565   *
4566   * This call may only be used from a context that may sleep.  The sleep
4567   * is non-interruptible, and has no timeout.  Low-overhead controller
4568   * drivers may DMA directly into and out of the message buffers.
4569   *
4570   * Note that the SPI device's chip select is active during the message,
4571   * and then is normally disabled between messages.  Drivers for some
4572   * frequently-used devices may want to minimize costs of selecting a chip,
4573   * by leaving it selected in anticipation that the next message will go
4574   * to the same chip.  (That may increase power usage.)
4575   *
4576   * Also, the caller is guaranteeing that the memory associated with the
4577   * message will not be freed before this call returns.
4578   *
4579   * Return: zero on success, else a negative error code.
4580   */
spi_sync(struct spi_device * spi,struct spi_message * message)4581  int spi_sync(struct spi_device *spi, struct spi_message *message)
4582  {
4583  	int ret;
4584  
4585  	mutex_lock(&spi->controller->bus_lock_mutex);
4586  	ret = __spi_sync(spi, message);
4587  	mutex_unlock(&spi->controller->bus_lock_mutex);
4588  
4589  	return ret;
4590  }
4591  EXPORT_SYMBOL_GPL(spi_sync);
4592  
4593  /**
4594   * spi_sync_locked - version of spi_sync with exclusive bus usage
4595   * @spi: device with which data will be exchanged
4596   * @message: describes the data transfers
4597   * Context: can sleep
4598   *
4599   * This call may only be used from a context that may sleep.  The sleep
4600   * is non-interruptible, and has no timeout.  Low-overhead controller
4601   * drivers may DMA directly into and out of the message buffers.
4602   *
4603   * This call should be used by drivers that require exclusive access to the
4604   * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4605   * be released by a spi_bus_unlock call when the exclusive access is over.
4606   *
4607   * Return: zero on success, else a negative error code.
4608   */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4609  int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4610  {
4611  	return __spi_sync(spi, message);
4612  }
4613  EXPORT_SYMBOL_GPL(spi_sync_locked);
4614  
4615  /**
4616   * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4617   * @ctlr: SPI bus master that should be locked for exclusive bus access
4618   * Context: can sleep
4619   *
4620   * This call may only be used from a context that may sleep.  The sleep
4621   * is non-interruptible, and has no timeout.
4622   *
4623   * This call should be used by drivers that require exclusive access to the
4624   * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4625   * exclusive access is over. Data transfer must be done by spi_sync_locked
4626   * and spi_async_locked calls when the SPI bus lock is held.
4627   *
4628   * Return: always zero.
4629   */
spi_bus_lock(struct spi_controller * ctlr)4630  int spi_bus_lock(struct spi_controller *ctlr)
4631  {
4632  	unsigned long flags;
4633  
4634  	mutex_lock(&ctlr->bus_lock_mutex);
4635  
4636  	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4637  	ctlr->bus_lock_flag = 1;
4638  	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4639  
4640  	/* Mutex remains locked until spi_bus_unlock() is called */
4641  
4642  	return 0;
4643  }
4644  EXPORT_SYMBOL_GPL(spi_bus_lock);
4645  
4646  /**
4647   * spi_bus_unlock - release the lock for exclusive SPI bus usage
4648   * @ctlr: SPI bus master that was locked for exclusive bus access
4649   * Context: can sleep
4650   *
4651   * This call may only be used from a context that may sleep.  The sleep
4652   * is non-interruptible, and has no timeout.
4653   *
4654   * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4655   * call.
4656   *
4657   * Return: always zero.
4658   */
spi_bus_unlock(struct spi_controller * ctlr)4659  int spi_bus_unlock(struct spi_controller *ctlr)
4660  {
4661  	ctlr->bus_lock_flag = 0;
4662  
4663  	mutex_unlock(&ctlr->bus_lock_mutex);
4664  
4665  	return 0;
4666  }
4667  EXPORT_SYMBOL_GPL(spi_bus_unlock);
4668  
4669  /* Portable code must never pass more than 32 bytes */
4670  #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
4671  
4672  static u8	*buf;
4673  
4674  /**
4675   * spi_write_then_read - SPI synchronous write followed by read
4676   * @spi: device with which data will be exchanged
4677   * @txbuf: data to be written (need not be DMA-safe)
4678   * @n_tx: size of txbuf, in bytes
4679   * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4680   * @n_rx: size of rxbuf, in bytes
4681   * Context: can sleep
4682   *
4683   * This performs a half duplex MicroWire style transaction with the
4684   * device, sending txbuf and then reading rxbuf.  The return value
4685   * is zero for success, else a negative errno status code.
4686   * This call may only be used from a context that may sleep.
4687   *
4688   * Parameters to this routine are always copied using a small buffer.
4689   * Performance-sensitive or bulk transfer code should instead use
4690   * spi_{async,sync}() calls with DMA-safe buffers.
4691   *
4692   * Return: zero on success, else a negative error code.
4693   */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4694  int spi_write_then_read(struct spi_device *spi,
4695  		const void *txbuf, unsigned n_tx,
4696  		void *rxbuf, unsigned n_rx)
4697  {
4698  	static DEFINE_MUTEX(lock);
4699  
4700  	int			status;
4701  	struct spi_message	message;
4702  	struct spi_transfer	x[2];
4703  	u8			*local_buf;
4704  
4705  	/*
4706  	 * Use preallocated DMA-safe buffer if we can. We can't avoid
4707  	 * copying here, (as a pure convenience thing), but we can
4708  	 * keep heap costs out of the hot path unless someone else is
4709  	 * using the pre-allocated buffer or the transfer is too large.
4710  	 */
4711  	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4712  		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4713  				    GFP_KERNEL | GFP_DMA);
4714  		if (!local_buf)
4715  			return -ENOMEM;
4716  	} else {
4717  		local_buf = buf;
4718  	}
4719  
4720  	spi_message_init(&message);
4721  	memset(x, 0, sizeof(x));
4722  	if (n_tx) {
4723  		x[0].len = n_tx;
4724  		spi_message_add_tail(&x[0], &message);
4725  	}
4726  	if (n_rx) {
4727  		x[1].len = n_rx;
4728  		spi_message_add_tail(&x[1], &message);
4729  	}
4730  
4731  	memcpy(local_buf, txbuf, n_tx);
4732  	x[0].tx_buf = local_buf;
4733  	x[1].rx_buf = local_buf + n_tx;
4734  
4735  	/* Do the I/O */
4736  	status = spi_sync(spi, &message);
4737  	if (status == 0)
4738  		memcpy(rxbuf, x[1].rx_buf, n_rx);
4739  
4740  	if (x[0].tx_buf == buf)
4741  		mutex_unlock(&lock);
4742  	else
4743  		kfree(local_buf);
4744  
4745  	return status;
4746  }
4747  EXPORT_SYMBOL_GPL(spi_write_then_read);
4748  
4749  /*-------------------------------------------------------------------------*/
4750  
4751  #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4752  /* Must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4753  static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4754  {
4755  	struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4756  
4757  	return dev ? to_spi_device(dev) : NULL;
4758  }
4759  
4760  /* The spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4761  static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4762  {
4763  	struct device *dev;
4764  
4765  	dev = class_find_device_by_of_node(&spi_master_class, node);
4766  	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4767  		dev = class_find_device_by_of_node(&spi_slave_class, node);
4768  	if (!dev)
4769  		return NULL;
4770  
4771  	/* Reference got in class_find_device */
4772  	return container_of(dev, struct spi_controller, dev);
4773  }
4774  
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4775  static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4776  			 void *arg)
4777  {
4778  	struct of_reconfig_data *rd = arg;
4779  	struct spi_controller *ctlr;
4780  	struct spi_device *spi;
4781  
4782  	switch (of_reconfig_get_state_change(action, arg)) {
4783  	case OF_RECONFIG_CHANGE_ADD:
4784  		ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4785  		if (ctlr == NULL)
4786  			return NOTIFY_OK;	/* Not for us */
4787  
4788  		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4789  			put_device(&ctlr->dev);
4790  			return NOTIFY_OK;
4791  		}
4792  
4793  		/*
4794  		 * Clear the flag before adding the device so that fw_devlink
4795  		 * doesn't skip adding consumers to this device.
4796  		 */
4797  		rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4798  		spi = of_register_spi_device(ctlr, rd->dn);
4799  		put_device(&ctlr->dev);
4800  
4801  		if (IS_ERR(spi)) {
4802  			pr_err("%s: failed to create for '%pOF'\n",
4803  					__func__, rd->dn);
4804  			of_node_clear_flag(rd->dn, OF_POPULATED);
4805  			return notifier_from_errno(PTR_ERR(spi));
4806  		}
4807  		break;
4808  
4809  	case OF_RECONFIG_CHANGE_REMOVE:
4810  		/* Already depopulated? */
4811  		if (!of_node_check_flag(rd->dn, OF_POPULATED))
4812  			return NOTIFY_OK;
4813  
4814  		/* Find our device by node */
4815  		spi = of_find_spi_device_by_node(rd->dn);
4816  		if (spi == NULL)
4817  			return NOTIFY_OK;	/* No? not meant for us */
4818  
4819  		/* Unregister takes one ref away */
4820  		spi_unregister_device(spi);
4821  
4822  		/* And put the reference of the find */
4823  		put_device(&spi->dev);
4824  		break;
4825  	}
4826  
4827  	return NOTIFY_OK;
4828  }
4829  
4830  static struct notifier_block spi_of_notifier = {
4831  	.notifier_call = of_spi_notify,
4832  };
4833  #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4834  extern struct notifier_block spi_of_notifier;
4835  #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4836  
4837  #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4838  static int spi_acpi_controller_match(struct device *dev, const void *data)
4839  {
4840  	return ACPI_COMPANION(dev->parent) == data;
4841  }
4842  
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4843  struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4844  {
4845  	struct device *dev;
4846  
4847  	dev = class_find_device(&spi_master_class, NULL, adev,
4848  				spi_acpi_controller_match);
4849  	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4850  		dev = class_find_device(&spi_slave_class, NULL, adev,
4851  					spi_acpi_controller_match);
4852  	if (!dev)
4853  		return NULL;
4854  
4855  	return container_of(dev, struct spi_controller, dev);
4856  }
4857  EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4858  
acpi_spi_find_device_by_adev(struct acpi_device * adev)4859  static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4860  {
4861  	struct device *dev;
4862  
4863  	dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4864  	return to_spi_device(dev);
4865  }
4866  
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4867  static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4868  			   void *arg)
4869  {
4870  	struct acpi_device *adev = arg;
4871  	struct spi_controller *ctlr;
4872  	struct spi_device *spi;
4873  
4874  	switch (value) {
4875  	case ACPI_RECONFIG_DEVICE_ADD:
4876  		ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4877  		if (!ctlr)
4878  			break;
4879  
4880  		acpi_register_spi_device(ctlr, adev);
4881  		put_device(&ctlr->dev);
4882  		break;
4883  	case ACPI_RECONFIG_DEVICE_REMOVE:
4884  		if (!acpi_device_enumerated(adev))
4885  			break;
4886  
4887  		spi = acpi_spi_find_device_by_adev(adev);
4888  		if (!spi)
4889  			break;
4890  
4891  		spi_unregister_device(spi);
4892  		put_device(&spi->dev);
4893  		break;
4894  	}
4895  
4896  	return NOTIFY_OK;
4897  }
4898  
4899  static struct notifier_block spi_acpi_notifier = {
4900  	.notifier_call = acpi_spi_notify,
4901  };
4902  #else
4903  extern struct notifier_block spi_acpi_notifier;
4904  #endif
4905  
spi_init(void)4906  static int __init spi_init(void)
4907  {
4908  	int	status;
4909  
4910  	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4911  	if (!buf) {
4912  		status = -ENOMEM;
4913  		goto err0;
4914  	}
4915  
4916  	status = bus_register(&spi_bus_type);
4917  	if (status < 0)
4918  		goto err1;
4919  
4920  	status = class_register(&spi_master_class);
4921  	if (status < 0)
4922  		goto err2;
4923  
4924  	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4925  		status = class_register(&spi_slave_class);
4926  		if (status < 0)
4927  			goto err3;
4928  	}
4929  
4930  	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4931  		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4932  	if (IS_ENABLED(CONFIG_ACPI))
4933  		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4934  
4935  	return 0;
4936  
4937  err3:
4938  	class_unregister(&spi_master_class);
4939  err2:
4940  	bus_unregister(&spi_bus_type);
4941  err1:
4942  	kfree(buf);
4943  	buf = NULL;
4944  err0:
4945  	return status;
4946  }
4947  
4948  /*
4949   * A board_info is normally registered in arch_initcall(),
4950   * but even essential drivers wait till later.
4951   *
4952   * REVISIT only boardinfo really needs static linking. The rest (device and
4953   * driver registration) _could_ be dynamically linked (modular) ... Costs
4954   * include needing to have boardinfo data structures be much more public.
4955   */
4956  postcore_initcall(spi_init);
4957