Merge tag 'io_uring-5.10-2020-10-30' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / drivers / spi / spi.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/of_gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm_domain.h>
25 #include <linux/property.h>
26 #include <linux/export.h>
27 #include <linux/sched/rt.h>
28 #include <uapi/linux/sched/types.h>
29 #include <linux/delay.h>
30 #include <linux/kthread.h>
31 #include <linux/ioport.h>
32 #include <linux/acpi.h>
33 #include <linux/highmem.h>
34 #include <linux/idr.h>
35 #include <linux/platform_data/x86/apple.h>
36
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/spi.h>
39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
41
42 #include "internals.h"
43
44 static DEFINE_IDR(spi_master_idr);
45
46 static void spidev_release(struct device *dev)
47 {
48         struct spi_device       *spi = to_spi_device(dev);
49
50         /* spi controllers may cleanup for released devices */
51         if (spi->controller->cleanup)
52                 spi->controller->cleanup(spi);
53
54         spi_controller_put(spi->controller);
55         kfree(spi->driver_override);
56         kfree(spi);
57 }
58
59 static ssize_t
60 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
61 {
62         const struct spi_device *spi = to_spi_device(dev);
63         int len;
64
65         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
66         if (len != -ENODEV)
67                 return len;
68
69         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
70 }
71 static DEVICE_ATTR_RO(modalias);
72
73 static ssize_t driver_override_store(struct device *dev,
74                                      struct device_attribute *a,
75                                      const char *buf, size_t count)
76 {
77         struct spi_device *spi = to_spi_device(dev);
78         const char *end = memchr(buf, '\n', count);
79         const size_t len = end ? end - buf : count;
80         const char *driver_override, *old;
81
82         /* We need to keep extra room for a newline when displaying value */
83         if (len >= (PAGE_SIZE - 1))
84                 return -EINVAL;
85
86         driver_override = kstrndup(buf, len, GFP_KERNEL);
87         if (!driver_override)
88                 return -ENOMEM;
89
90         device_lock(dev);
91         old = spi->driver_override;
92         if (len) {
93                 spi->driver_override = driver_override;
94         } else {
95                 /* Empty string, disable driver override */
96                 spi->driver_override = NULL;
97                 kfree(driver_override);
98         }
99         device_unlock(dev);
100         kfree(old);
101
102         return count;
103 }
104
105 static ssize_t driver_override_show(struct device *dev,
106                                     struct device_attribute *a, char *buf)
107 {
108         const struct spi_device *spi = to_spi_device(dev);
109         ssize_t len;
110
111         device_lock(dev);
112         len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
113         device_unlock(dev);
114         return len;
115 }
116 static DEVICE_ATTR_RW(driver_override);
117
118 #define SPI_STATISTICS_ATTRS(field, file)                               \
119 static ssize_t spi_controller_##field##_show(struct device *dev,        \
120                                              struct device_attribute *attr, \
121                                              char *buf)                 \
122 {                                                                       \
123         struct spi_controller *ctlr = container_of(dev,                 \
124                                          struct spi_controller, dev);   \
125         return spi_statistics_##field##_show(&ctlr->statistics, buf);   \
126 }                                                                       \
127 static struct device_attribute dev_attr_spi_controller_##field = {      \
128         .attr = { .name = file, .mode = 0444 },                         \
129         .show = spi_controller_##field##_show,                          \
130 };                                                                      \
131 static ssize_t spi_device_##field##_show(struct device *dev,            \
132                                          struct device_attribute *attr, \
133                                         char *buf)                      \
134 {                                                                       \
135         struct spi_device *spi = to_spi_device(dev);                    \
136         return spi_statistics_##field##_show(&spi->statistics, buf);    \
137 }                                                                       \
138 static struct device_attribute dev_attr_spi_device_##field = {          \
139         .attr = { .name = file, .mode = 0444 },                         \
140         .show = spi_device_##field##_show,                              \
141 }
142
143 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
144 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
145                                             char *buf)                  \
146 {                                                                       \
147         unsigned long flags;                                            \
148         ssize_t len;                                                    \
149         spin_lock_irqsave(&stat->lock, flags);                          \
150         len = sprintf(buf, format_string, stat->field);                 \
151         spin_unlock_irqrestore(&stat->lock, flags);                     \
152         return len;                                                     \
153 }                                                                       \
154 SPI_STATISTICS_ATTRS(name, file)
155
156 #define SPI_STATISTICS_SHOW(field, format_string)                       \
157         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
158                                  field, format_string)
159
160 SPI_STATISTICS_SHOW(messages, "%lu");
161 SPI_STATISTICS_SHOW(transfers, "%lu");
162 SPI_STATISTICS_SHOW(errors, "%lu");
163 SPI_STATISTICS_SHOW(timedout, "%lu");
164
165 SPI_STATISTICS_SHOW(spi_sync, "%lu");
166 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
167 SPI_STATISTICS_SHOW(spi_async, "%lu");
168
169 SPI_STATISTICS_SHOW(bytes, "%llu");
170 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
171 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
172
173 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
174         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
175                                  "transfer_bytes_histo_" number,        \
176                                  transfer_bytes_histo[index],  "%lu")
177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
190 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
191 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
192 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
193 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
194
195 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
196
197 static struct attribute *spi_dev_attrs[] = {
198         &dev_attr_modalias.attr,
199         &dev_attr_driver_override.attr,
200         NULL,
201 };
202
203 static const struct attribute_group spi_dev_group = {
204         .attrs  = spi_dev_attrs,
205 };
206
207 static struct attribute *spi_device_statistics_attrs[] = {
208         &dev_attr_spi_device_messages.attr,
209         &dev_attr_spi_device_transfers.attr,
210         &dev_attr_spi_device_errors.attr,
211         &dev_attr_spi_device_timedout.attr,
212         &dev_attr_spi_device_spi_sync.attr,
213         &dev_attr_spi_device_spi_sync_immediate.attr,
214         &dev_attr_spi_device_spi_async.attr,
215         &dev_attr_spi_device_bytes.attr,
216         &dev_attr_spi_device_bytes_rx.attr,
217         &dev_attr_spi_device_bytes_tx.attr,
218         &dev_attr_spi_device_transfer_bytes_histo0.attr,
219         &dev_attr_spi_device_transfer_bytes_histo1.attr,
220         &dev_attr_spi_device_transfer_bytes_histo2.attr,
221         &dev_attr_spi_device_transfer_bytes_histo3.attr,
222         &dev_attr_spi_device_transfer_bytes_histo4.attr,
223         &dev_attr_spi_device_transfer_bytes_histo5.attr,
224         &dev_attr_spi_device_transfer_bytes_histo6.attr,
225         &dev_attr_spi_device_transfer_bytes_histo7.attr,
226         &dev_attr_spi_device_transfer_bytes_histo8.attr,
227         &dev_attr_spi_device_transfer_bytes_histo9.attr,
228         &dev_attr_spi_device_transfer_bytes_histo10.attr,
229         &dev_attr_spi_device_transfer_bytes_histo11.attr,
230         &dev_attr_spi_device_transfer_bytes_histo12.attr,
231         &dev_attr_spi_device_transfer_bytes_histo13.attr,
232         &dev_attr_spi_device_transfer_bytes_histo14.attr,
233         &dev_attr_spi_device_transfer_bytes_histo15.attr,
234         &dev_attr_spi_device_transfer_bytes_histo16.attr,
235         &dev_attr_spi_device_transfers_split_maxsize.attr,
236         NULL,
237 };
238
239 static const struct attribute_group spi_device_statistics_group = {
240         .name  = "statistics",
241         .attrs  = spi_device_statistics_attrs,
242 };
243
244 static const struct attribute_group *spi_dev_groups[] = {
245         &spi_dev_group,
246         &spi_device_statistics_group,
247         NULL,
248 };
249
250 static struct attribute *spi_controller_statistics_attrs[] = {
251         &dev_attr_spi_controller_messages.attr,
252         &dev_attr_spi_controller_transfers.attr,
253         &dev_attr_spi_controller_errors.attr,
254         &dev_attr_spi_controller_timedout.attr,
255         &dev_attr_spi_controller_spi_sync.attr,
256         &dev_attr_spi_controller_spi_sync_immediate.attr,
257         &dev_attr_spi_controller_spi_async.attr,
258         &dev_attr_spi_controller_bytes.attr,
259         &dev_attr_spi_controller_bytes_rx.attr,
260         &dev_attr_spi_controller_bytes_tx.attr,
261         &dev_attr_spi_controller_transfer_bytes_histo0.attr,
262         &dev_attr_spi_controller_transfer_bytes_histo1.attr,
263         &dev_attr_spi_controller_transfer_bytes_histo2.attr,
264         &dev_attr_spi_controller_transfer_bytes_histo3.attr,
265         &dev_attr_spi_controller_transfer_bytes_histo4.attr,
266         &dev_attr_spi_controller_transfer_bytes_histo5.attr,
267         &dev_attr_spi_controller_transfer_bytes_histo6.attr,
268         &dev_attr_spi_controller_transfer_bytes_histo7.attr,
269         &dev_attr_spi_controller_transfer_bytes_histo8.attr,
270         &dev_attr_spi_controller_transfer_bytes_histo9.attr,
271         &dev_attr_spi_controller_transfer_bytes_histo10.attr,
272         &dev_attr_spi_controller_transfer_bytes_histo11.attr,
273         &dev_attr_spi_controller_transfer_bytes_histo12.attr,
274         &dev_attr_spi_controller_transfer_bytes_histo13.attr,
275         &dev_attr_spi_controller_transfer_bytes_histo14.attr,
276         &dev_attr_spi_controller_transfer_bytes_histo15.attr,
277         &dev_attr_spi_controller_transfer_bytes_histo16.attr,
278         &dev_attr_spi_controller_transfers_split_maxsize.attr,
279         NULL,
280 };
281
282 static const struct attribute_group spi_controller_statistics_group = {
283         .name  = "statistics",
284         .attrs  = spi_controller_statistics_attrs,
285 };
286
287 static const struct attribute_group *spi_master_groups[] = {
288         &spi_controller_statistics_group,
289         NULL,
290 };
291
292 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
293                                        struct spi_transfer *xfer,
294                                        struct spi_controller *ctlr)
295 {
296         unsigned long flags;
297         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
298
299         if (l2len < 0)
300                 l2len = 0;
301
302         spin_lock_irqsave(&stats->lock, flags);
303
304         stats->transfers++;
305         stats->transfer_bytes_histo[l2len]++;
306
307         stats->bytes += xfer->len;
308         if ((xfer->tx_buf) &&
309             (xfer->tx_buf != ctlr->dummy_tx))
310                 stats->bytes_tx += xfer->len;
311         if ((xfer->rx_buf) &&
312             (xfer->rx_buf != ctlr->dummy_rx))
313                 stats->bytes_rx += xfer->len;
314
315         spin_unlock_irqrestore(&stats->lock, flags);
316 }
317 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
318
319 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
320  * and the sysfs version makes coldplug work too.
321  */
322
323 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
324                                                 const struct spi_device *sdev)
325 {
326         while (id->name[0]) {
327                 if (!strcmp(sdev->modalias, id->name))
328                         return id;
329                 id++;
330         }
331         return NULL;
332 }
333
334 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
335 {
336         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
337
338         return spi_match_id(sdrv->id_table, sdev);
339 }
340 EXPORT_SYMBOL_GPL(spi_get_device_id);
341
342 static int spi_match_device(struct device *dev, struct device_driver *drv)
343 {
344         const struct spi_device *spi = to_spi_device(dev);
345         const struct spi_driver *sdrv = to_spi_driver(drv);
346
347         /* Check override first, and if set, only use the named driver */
348         if (spi->driver_override)
349                 return strcmp(spi->driver_override, drv->name) == 0;
350
351         /* Attempt an OF style match */
352         if (of_driver_match_device(dev, drv))
353                 return 1;
354
355         /* Then try ACPI */
356         if (acpi_driver_match_device(dev, drv))
357                 return 1;
358
359         if (sdrv->id_table)
360                 return !!spi_match_id(sdrv->id_table, spi);
361
362         return strcmp(spi->modalias, drv->name) == 0;
363 }
364
365 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
366 {
367         const struct spi_device         *spi = to_spi_device(dev);
368         int rc;
369
370         rc = acpi_device_uevent_modalias(dev, env);
371         if (rc != -ENODEV)
372                 return rc;
373
374         return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
375 }
376
377 struct bus_type spi_bus_type = {
378         .name           = "spi",
379         .dev_groups     = spi_dev_groups,
380         .match          = spi_match_device,
381         .uevent         = spi_uevent,
382 };
383 EXPORT_SYMBOL_GPL(spi_bus_type);
384
385
386 static int spi_drv_probe(struct device *dev)
387 {
388         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
389         struct spi_device               *spi = to_spi_device(dev);
390         int ret;
391
392         ret = of_clk_set_defaults(dev->of_node, false);
393         if (ret)
394                 return ret;
395
396         if (dev->of_node) {
397                 spi->irq = of_irq_get(dev->of_node, 0);
398                 if (spi->irq == -EPROBE_DEFER)
399                         return -EPROBE_DEFER;
400                 if (spi->irq < 0)
401                         spi->irq = 0;
402         }
403
404         ret = dev_pm_domain_attach(dev, true);
405         if (ret)
406                 return ret;
407
408         ret = sdrv->probe(spi);
409         if (ret)
410                 dev_pm_domain_detach(dev, true);
411
412         return ret;
413 }
414
415 static int spi_drv_remove(struct device *dev)
416 {
417         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
418         int ret;
419
420         ret = sdrv->remove(to_spi_device(dev));
421         dev_pm_domain_detach(dev, true);
422
423         return ret;
424 }
425
426 static void spi_drv_shutdown(struct device *dev)
427 {
428         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
429
430         sdrv->shutdown(to_spi_device(dev));
431 }
432
433 /**
434  * __spi_register_driver - register a SPI driver
435  * @owner: owner module of the driver to register
436  * @sdrv: the driver to register
437  * Context: can sleep
438  *
439  * Return: zero on success, else a negative error code.
440  */
441 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
442 {
443         sdrv->driver.owner = owner;
444         sdrv->driver.bus = &spi_bus_type;
445         if (sdrv->probe)
446                 sdrv->driver.probe = spi_drv_probe;
447         if (sdrv->remove)
448                 sdrv->driver.remove = spi_drv_remove;
449         if (sdrv->shutdown)
450                 sdrv->driver.shutdown = spi_drv_shutdown;
451         return driver_register(&sdrv->driver);
452 }
453 EXPORT_SYMBOL_GPL(__spi_register_driver);
454
455 /*-------------------------------------------------------------------------*/
456
457 /* SPI devices should normally not be created by SPI device drivers; that
458  * would make them board-specific.  Similarly with SPI controller drivers.
459  * Device registration normally goes into like arch/.../mach.../board-YYY.c
460  * with other readonly (flashable) information about mainboard devices.
461  */
462
463 struct boardinfo {
464         struct list_head        list;
465         struct spi_board_info   board_info;
466 };
467
468 static LIST_HEAD(board_list);
469 static LIST_HEAD(spi_controller_list);
470
471 /*
472  * Used to protect add/del operation for board_info list and
473  * spi_controller list, and their matching process
474  * also used to protect object of type struct idr
475  */
476 static DEFINE_MUTEX(board_lock);
477
478 /*
479  * Prevents addition of devices with same chip select and
480  * addition of devices below an unregistering controller.
481  */
482 static DEFINE_MUTEX(spi_add_lock);
483
484 /**
485  * spi_alloc_device - Allocate a new SPI device
486  * @ctlr: Controller to which device is connected
487  * Context: can sleep
488  *
489  * Allows a driver to allocate and initialize a spi_device without
490  * registering it immediately.  This allows a driver to directly
491  * fill the spi_device with device parameters before calling
492  * spi_add_device() on it.
493  *
494  * Caller is responsible to call spi_add_device() on the returned
495  * spi_device structure to add it to the SPI controller.  If the caller
496  * needs to discard the spi_device without adding it, then it should
497  * call spi_dev_put() on it.
498  *
499  * Return: a pointer to the new device, or NULL.
500  */
501 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
502 {
503         struct spi_device       *spi;
504
505         if (!spi_controller_get(ctlr))
506                 return NULL;
507
508         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
509         if (!spi) {
510                 spi_controller_put(ctlr);
511                 return NULL;
512         }
513
514         spi->master = spi->controller = ctlr;
515         spi->dev.parent = &ctlr->dev;
516         spi->dev.bus = &spi_bus_type;
517         spi->dev.release = spidev_release;
518         spi->cs_gpio = -ENOENT;
519         spi->mode = ctlr->buswidth_override_bits;
520
521         spin_lock_init(&spi->statistics.lock);
522
523         device_initialize(&spi->dev);
524         return spi;
525 }
526 EXPORT_SYMBOL_GPL(spi_alloc_device);
527
528 static void spi_dev_set_name(struct spi_device *spi)
529 {
530         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
531
532         if (adev) {
533                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
534                 return;
535         }
536
537         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
538                      spi->chip_select);
539 }
540
541 static int spi_dev_check(struct device *dev, void *data)
542 {
543         struct spi_device *spi = to_spi_device(dev);
544         struct spi_device *new_spi = data;
545
546         if (spi->controller == new_spi->controller &&
547             spi->chip_select == new_spi->chip_select)
548                 return -EBUSY;
549         return 0;
550 }
551
552 /**
553  * spi_add_device - Add spi_device allocated with spi_alloc_device
554  * @spi: spi_device to register
555  *
556  * Companion function to spi_alloc_device.  Devices allocated with
557  * spi_alloc_device can be added onto the spi bus with this function.
558  *
559  * Return: 0 on success; negative errno on failure
560  */
561 int spi_add_device(struct spi_device *spi)
562 {
563         struct spi_controller *ctlr = spi->controller;
564         struct device *dev = ctlr->dev.parent;
565         int status;
566
567         /* Chipselects are numbered 0..max; validate. */
568         if (spi->chip_select >= ctlr->num_chipselect) {
569                 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
570                         ctlr->num_chipselect);
571                 return -EINVAL;
572         }
573
574         /* Set the bus ID string */
575         spi_dev_set_name(spi);
576
577         /* We need to make sure there's no other device with this
578          * chipselect **BEFORE** we call setup(), else we'll trash
579          * its configuration.  Lock against concurrent add() calls.
580          */
581         mutex_lock(&spi_add_lock);
582
583         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
584         if (status) {
585                 dev_err(dev, "chipselect %d already in use\n",
586                                 spi->chip_select);
587                 goto done;
588         }
589
590         /* Controller may unregister concurrently */
591         if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
592             !device_is_registered(&ctlr->dev)) {
593                 status = -ENODEV;
594                 goto done;
595         }
596
597         /* Descriptors take precedence */
598         if (ctlr->cs_gpiods)
599                 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
600         else if (ctlr->cs_gpios)
601                 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
602
603         /* Drivers may modify this initial i/o setup, but will
604          * normally rely on the device being setup.  Devices
605          * using SPI_CS_HIGH can't coexist well otherwise...
606          */
607         status = spi_setup(spi);
608         if (status < 0) {
609                 dev_err(dev, "can't setup %s, status %d\n",
610                                 dev_name(&spi->dev), status);
611                 goto done;
612         }
613
614         /* Device may be bound to an active driver when this returns */
615         status = device_add(&spi->dev);
616         if (status < 0)
617                 dev_err(dev, "can't add %s, status %d\n",
618                                 dev_name(&spi->dev), status);
619         else
620                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
621
622 done:
623         mutex_unlock(&spi_add_lock);
624         return status;
625 }
626 EXPORT_SYMBOL_GPL(spi_add_device);
627
628 /**
629  * spi_new_device - instantiate one new SPI device
630  * @ctlr: Controller to which device is connected
631  * @chip: Describes the SPI device
632  * Context: can sleep
633  *
634  * On typical mainboards, this is purely internal; and it's not needed
635  * after board init creates the hard-wired devices.  Some development
636  * platforms may not be able to use spi_register_board_info though, and
637  * this is exported so that for example a USB or parport based adapter
638  * driver could add devices (which it would learn about out-of-band).
639  *
640  * Return: the new device, or NULL.
641  */
642 struct spi_device *spi_new_device(struct spi_controller *ctlr,
643                                   struct spi_board_info *chip)
644 {
645         struct spi_device       *proxy;
646         int                     status;
647
648         /* NOTE:  caller did any chip->bus_num checks necessary.
649          *
650          * Also, unless we change the return value convention to use
651          * error-or-pointer (not NULL-or-pointer), troubleshootability
652          * suggests syslogged diagnostics are best here (ugh).
653          */
654
655         proxy = spi_alloc_device(ctlr);
656         if (!proxy)
657                 return NULL;
658
659         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
660
661         proxy->chip_select = chip->chip_select;
662         proxy->max_speed_hz = chip->max_speed_hz;
663         proxy->mode = chip->mode;
664         proxy->irq = chip->irq;
665         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
666         proxy->dev.platform_data = (void *) chip->platform_data;
667         proxy->controller_data = chip->controller_data;
668         proxy->controller_state = NULL;
669
670         if (chip->properties) {
671                 status = device_add_properties(&proxy->dev, chip->properties);
672                 if (status) {
673                         dev_err(&ctlr->dev,
674                                 "failed to add properties to '%s': %d\n",
675                                 chip->modalias, status);
676                         goto err_dev_put;
677                 }
678         }
679
680         status = spi_add_device(proxy);
681         if (status < 0)
682                 goto err_remove_props;
683
684         return proxy;
685
686 err_remove_props:
687         if (chip->properties)
688                 device_remove_properties(&proxy->dev);
689 err_dev_put:
690         spi_dev_put(proxy);
691         return NULL;
692 }
693 EXPORT_SYMBOL_GPL(spi_new_device);
694
695 /**
696  * spi_unregister_device - unregister a single SPI device
697  * @spi: spi_device to unregister
698  *
699  * Start making the passed SPI device vanish. Normally this would be handled
700  * by spi_unregister_controller().
701  */
702 void spi_unregister_device(struct spi_device *spi)
703 {
704         if (!spi)
705                 return;
706
707         if (spi->dev.of_node) {
708                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
709                 of_node_put(spi->dev.of_node);
710         }
711         if (ACPI_COMPANION(&spi->dev))
712                 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
713         device_unregister(&spi->dev);
714 }
715 EXPORT_SYMBOL_GPL(spi_unregister_device);
716
717 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
718                                               struct spi_board_info *bi)
719 {
720         struct spi_device *dev;
721
722         if (ctlr->bus_num != bi->bus_num)
723                 return;
724
725         dev = spi_new_device(ctlr, bi);
726         if (!dev)
727                 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
728                         bi->modalias);
729 }
730
731 /**
732  * spi_register_board_info - register SPI devices for a given board
733  * @info: array of chip descriptors
734  * @n: how many descriptors are provided
735  * Context: can sleep
736  *
737  * Board-specific early init code calls this (probably during arch_initcall)
738  * with segments of the SPI device table.  Any device nodes are created later,
739  * after the relevant parent SPI controller (bus_num) is defined.  We keep
740  * this table of devices forever, so that reloading a controller driver will
741  * not make Linux forget about these hard-wired devices.
742  *
743  * Other code can also call this, e.g. a particular add-on board might provide
744  * SPI devices through its expansion connector, so code initializing that board
745  * would naturally declare its SPI devices.
746  *
747  * The board info passed can safely be __initdata ... but be careful of
748  * any embedded pointers (platform_data, etc), they're copied as-is.
749  * Device properties are deep-copied though.
750  *
751  * Return: zero on success, else a negative error code.
752  */
753 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
754 {
755         struct boardinfo *bi;
756         int i;
757
758         if (!n)
759                 return 0;
760
761         bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
762         if (!bi)
763                 return -ENOMEM;
764
765         for (i = 0; i < n; i++, bi++, info++) {
766                 struct spi_controller *ctlr;
767
768                 memcpy(&bi->board_info, info, sizeof(*info));
769                 if (info->properties) {
770                         bi->board_info.properties =
771                                         property_entries_dup(info->properties);
772                         if (IS_ERR(bi->board_info.properties))
773                                 return PTR_ERR(bi->board_info.properties);
774                 }
775
776                 mutex_lock(&board_lock);
777                 list_add_tail(&bi->list, &board_list);
778                 list_for_each_entry(ctlr, &spi_controller_list, list)
779                         spi_match_controller_to_boardinfo(ctlr,
780                                                           &bi->board_info);
781                 mutex_unlock(&board_lock);
782         }
783
784         return 0;
785 }
786
787 /*-------------------------------------------------------------------------*/
788
789 static void spi_set_cs(struct spi_device *spi, bool enable)
790 {
791         bool enable1 = enable;
792
793         /*
794          * Avoid calling into the driver (or doing delays) if the chip select
795          * isn't actually changing from the last time this was called.
796          */
797         if ((spi->controller->last_cs_enable == enable) &&
798             (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
799                 return;
800
801         spi->controller->last_cs_enable = enable;
802         spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
803
804         if (!spi->controller->set_cs_timing) {
805                 if (enable1)
806                         spi_delay_exec(&spi->controller->cs_setup, NULL);
807                 else
808                         spi_delay_exec(&spi->controller->cs_hold, NULL);
809         }
810
811         if (spi->mode & SPI_CS_HIGH)
812                 enable = !enable;
813
814         if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
815                 /*
816                  * Honour the SPI_NO_CS flag and invert the enable line, as
817                  * active low is default for SPI. Execution paths that handle
818                  * polarity inversion in gpiolib (such as device tree) will
819                  * enforce active high using the SPI_CS_HIGH resulting in a
820                  * double inversion through the code above.
821                  */
822                 if (!(spi->mode & SPI_NO_CS)) {
823                         if (spi->cs_gpiod)
824                                 gpiod_set_value_cansleep(spi->cs_gpiod,
825                                                          !enable);
826                         else
827                                 gpio_set_value_cansleep(spi->cs_gpio, !enable);
828                 }
829                 /* Some SPI masters need both GPIO CS & slave_select */
830                 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
831                     spi->controller->set_cs)
832                         spi->controller->set_cs(spi, !enable);
833         } else if (spi->controller->set_cs) {
834                 spi->controller->set_cs(spi, !enable);
835         }
836
837         if (!spi->controller->set_cs_timing) {
838                 if (!enable1)
839                         spi_delay_exec(&spi->controller->cs_inactive, NULL);
840         }
841 }
842
843 #ifdef CONFIG_HAS_DMA
844 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
845                 struct sg_table *sgt, void *buf, size_t len,
846                 enum dma_data_direction dir)
847 {
848         const bool vmalloced_buf = is_vmalloc_addr(buf);
849         unsigned int max_seg_size = dma_get_max_seg_size(dev);
850 #ifdef CONFIG_HIGHMEM
851         const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
852                                 (unsigned long)buf < (PKMAP_BASE +
853                                         (LAST_PKMAP * PAGE_SIZE)));
854 #else
855         const bool kmap_buf = false;
856 #endif
857         int desc_len;
858         int sgs;
859         struct page *vm_page;
860         struct scatterlist *sg;
861         void *sg_buf;
862         size_t min;
863         int i, ret;
864
865         if (vmalloced_buf || kmap_buf) {
866                 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
867                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
868         } else if (virt_addr_valid(buf)) {
869                 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
870                 sgs = DIV_ROUND_UP(len, desc_len);
871         } else {
872                 return -EINVAL;
873         }
874
875         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
876         if (ret != 0)
877                 return ret;
878
879         sg = &sgt->sgl[0];
880         for (i = 0; i < sgs; i++) {
881
882                 if (vmalloced_buf || kmap_buf) {
883                         /*
884                          * Next scatterlist entry size is the minimum between
885                          * the desc_len and the remaining buffer length that
886                          * fits in a page.
887                          */
888                         min = min_t(size_t, desc_len,
889                                     min_t(size_t, len,
890                                           PAGE_SIZE - offset_in_page(buf)));
891                         if (vmalloced_buf)
892                                 vm_page = vmalloc_to_page(buf);
893                         else
894                                 vm_page = kmap_to_page(buf);
895                         if (!vm_page) {
896                                 sg_free_table(sgt);
897                                 return -ENOMEM;
898                         }
899                         sg_set_page(sg, vm_page,
900                                     min, offset_in_page(buf));
901                 } else {
902                         min = min_t(size_t, len, desc_len);
903                         sg_buf = buf;
904                         sg_set_buf(sg, sg_buf, min);
905                 }
906
907                 buf += min;
908                 len -= min;
909                 sg = sg_next(sg);
910         }
911
912         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
913         if (!ret)
914                 ret = -ENOMEM;
915         if (ret < 0) {
916                 sg_free_table(sgt);
917                 return ret;
918         }
919
920         sgt->nents = ret;
921
922         return 0;
923 }
924
925 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
926                    struct sg_table *sgt, enum dma_data_direction dir)
927 {
928         if (sgt->orig_nents) {
929                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
930                 sg_free_table(sgt);
931         }
932 }
933
934 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
935 {
936         struct device *tx_dev, *rx_dev;
937         struct spi_transfer *xfer;
938         int ret;
939
940         if (!ctlr->can_dma)
941                 return 0;
942
943         if (ctlr->dma_tx)
944                 tx_dev = ctlr->dma_tx->device->dev;
945         else
946                 tx_dev = ctlr->dev.parent;
947
948         if (ctlr->dma_rx)
949                 rx_dev = ctlr->dma_rx->device->dev;
950         else
951                 rx_dev = ctlr->dev.parent;
952
953         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
954                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
955                         continue;
956
957                 if (xfer->tx_buf != NULL) {
958                         ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
959                                           (void *)xfer->tx_buf, xfer->len,
960                                           DMA_TO_DEVICE);
961                         if (ret != 0)
962                                 return ret;
963                 }
964
965                 if (xfer->rx_buf != NULL) {
966                         ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
967                                           xfer->rx_buf, xfer->len,
968                                           DMA_FROM_DEVICE);
969                         if (ret != 0) {
970                                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
971                                               DMA_TO_DEVICE);
972                                 return ret;
973                         }
974                 }
975         }
976
977         ctlr->cur_msg_mapped = true;
978
979         return 0;
980 }
981
982 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
983 {
984         struct spi_transfer *xfer;
985         struct device *tx_dev, *rx_dev;
986
987         if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
988                 return 0;
989
990         if (ctlr->dma_tx)
991                 tx_dev = ctlr->dma_tx->device->dev;
992         else
993                 tx_dev = ctlr->dev.parent;
994
995         if (ctlr->dma_rx)
996                 rx_dev = ctlr->dma_rx->device->dev;
997         else
998                 rx_dev = ctlr->dev.parent;
999
1000         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1001                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1002                         continue;
1003
1004                 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1005                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1006         }
1007
1008         ctlr->cur_msg_mapped = false;
1009
1010         return 0;
1011 }
1012 #else /* !CONFIG_HAS_DMA */
1013 static inline int __spi_map_msg(struct spi_controller *ctlr,
1014                                 struct spi_message *msg)
1015 {
1016         return 0;
1017 }
1018
1019 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1020                                   struct spi_message *msg)
1021 {
1022         return 0;
1023 }
1024 #endif /* !CONFIG_HAS_DMA */
1025
1026 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1027                                 struct spi_message *msg)
1028 {
1029         struct spi_transfer *xfer;
1030
1031         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1032                 /*
1033                  * Restore the original value of tx_buf or rx_buf if they are
1034                  * NULL.
1035                  */
1036                 if (xfer->tx_buf == ctlr->dummy_tx)
1037                         xfer->tx_buf = NULL;
1038                 if (xfer->rx_buf == ctlr->dummy_rx)
1039                         xfer->rx_buf = NULL;
1040         }
1041
1042         return __spi_unmap_msg(ctlr, msg);
1043 }
1044
1045 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1046 {
1047         struct spi_transfer *xfer;
1048         void *tmp;
1049         unsigned int max_tx, max_rx;
1050
1051         if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1052                 && !(msg->spi->mode & SPI_3WIRE)) {
1053                 max_tx = 0;
1054                 max_rx = 0;
1055
1056                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1057                         if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1058                             !xfer->tx_buf)
1059                                 max_tx = max(xfer->len, max_tx);
1060                         if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1061                             !xfer->rx_buf)
1062                                 max_rx = max(xfer->len, max_rx);
1063                 }
1064
1065                 if (max_tx) {
1066                         tmp = krealloc(ctlr->dummy_tx, max_tx,
1067                                        GFP_KERNEL | GFP_DMA);
1068                         if (!tmp)
1069                                 return -ENOMEM;
1070                         ctlr->dummy_tx = tmp;
1071                         memset(tmp, 0, max_tx);
1072                 }
1073
1074                 if (max_rx) {
1075                         tmp = krealloc(ctlr->dummy_rx, max_rx,
1076                                        GFP_KERNEL | GFP_DMA);
1077                         if (!tmp)
1078                                 return -ENOMEM;
1079                         ctlr->dummy_rx = tmp;
1080                 }
1081
1082                 if (max_tx || max_rx) {
1083                         list_for_each_entry(xfer, &msg->transfers,
1084                                             transfer_list) {
1085                                 if (!xfer->len)
1086                                         continue;
1087                                 if (!xfer->tx_buf)
1088                                         xfer->tx_buf = ctlr->dummy_tx;
1089                                 if (!xfer->rx_buf)
1090                                         xfer->rx_buf = ctlr->dummy_rx;
1091                         }
1092                 }
1093         }
1094
1095         return __spi_map_msg(ctlr, msg);
1096 }
1097
1098 static int spi_transfer_wait(struct spi_controller *ctlr,
1099                              struct spi_message *msg,
1100                              struct spi_transfer *xfer)
1101 {
1102         struct spi_statistics *statm = &ctlr->statistics;
1103         struct spi_statistics *stats = &msg->spi->statistics;
1104         unsigned long long ms;
1105
1106         if (spi_controller_is_slave(ctlr)) {
1107                 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1108                         dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1109                         return -EINTR;
1110                 }
1111         } else {
1112                 ms = 8LL * 1000LL * xfer->len;
1113                 do_div(ms, xfer->speed_hz);
1114                 ms += ms + 200; /* some tolerance */
1115
1116                 if (ms > UINT_MAX)
1117                         ms = UINT_MAX;
1118
1119                 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1120                                                  msecs_to_jiffies(ms));
1121
1122                 if (ms == 0) {
1123                         SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1124                         SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1125                         dev_err(&msg->spi->dev,
1126                                 "SPI transfer timed out\n");
1127                         return -ETIMEDOUT;
1128                 }
1129         }
1130
1131         return 0;
1132 }
1133
1134 static void _spi_transfer_delay_ns(u32 ns)
1135 {
1136         if (!ns)
1137                 return;
1138         if (ns <= 1000) {
1139                 ndelay(ns);
1140         } else {
1141                 u32 us = DIV_ROUND_UP(ns, 1000);
1142
1143                 if (us <= 10)
1144                         udelay(us);
1145                 else
1146                         usleep_range(us, us + DIV_ROUND_UP(us, 10));
1147         }
1148 }
1149
1150 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1151 {
1152         u32 delay = _delay->value;
1153         u32 unit = _delay->unit;
1154         u32 hz;
1155
1156         if (!delay)
1157                 return 0;
1158
1159         switch (unit) {
1160         case SPI_DELAY_UNIT_USECS:
1161                 delay *= 1000;
1162                 break;
1163         case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
1164                 break;
1165         case SPI_DELAY_UNIT_SCK:
1166                 /* clock cycles need to be obtained from spi_transfer */
1167                 if (!xfer)
1168                         return -EINVAL;
1169                 /* if there is no effective speed know, then approximate
1170                  * by underestimating with half the requested hz
1171                  */
1172                 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1173                 if (!hz)
1174                         return -EINVAL;
1175                 delay *= DIV_ROUND_UP(1000000000, hz);
1176                 break;
1177         default:
1178                 return -EINVAL;
1179         }
1180
1181         return delay;
1182 }
1183 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1184
1185 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1186 {
1187         int delay;
1188
1189         might_sleep();
1190
1191         if (!_delay)
1192                 return -EINVAL;
1193
1194         delay = spi_delay_to_ns(_delay, xfer);
1195         if (delay < 0)
1196                 return delay;
1197
1198         _spi_transfer_delay_ns(delay);
1199
1200         return 0;
1201 }
1202 EXPORT_SYMBOL_GPL(spi_delay_exec);
1203
1204 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1205                                           struct spi_transfer *xfer)
1206 {
1207         u32 delay = xfer->cs_change_delay.value;
1208         u32 unit = xfer->cs_change_delay.unit;
1209         int ret;
1210
1211         /* return early on "fast" mode - for everything but USECS */
1212         if (!delay) {
1213                 if (unit == SPI_DELAY_UNIT_USECS)
1214                         _spi_transfer_delay_ns(10000);
1215                 return;
1216         }
1217
1218         ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1219         if (ret) {
1220                 dev_err_once(&msg->spi->dev,
1221                              "Use of unsupported delay unit %i, using default of 10us\n",
1222                              unit);
1223                 _spi_transfer_delay_ns(10000);
1224         }
1225 }
1226
1227 /*
1228  * spi_transfer_one_message - Default implementation of transfer_one_message()
1229  *
1230  * This is a standard implementation of transfer_one_message() for
1231  * drivers which implement a transfer_one() operation.  It provides
1232  * standard handling of delays and chip select management.
1233  */
1234 static int spi_transfer_one_message(struct spi_controller *ctlr,
1235                                     struct spi_message *msg)
1236 {
1237         struct spi_transfer *xfer;
1238         bool keep_cs = false;
1239         int ret = 0;
1240         struct spi_statistics *statm = &ctlr->statistics;
1241         struct spi_statistics *stats = &msg->spi->statistics;
1242
1243         spi_set_cs(msg->spi, true);
1244
1245         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1246         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1247
1248         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1249                 trace_spi_transfer_start(msg, xfer);
1250
1251                 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1252                 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1253
1254                 if (!ctlr->ptp_sts_supported) {
1255                         xfer->ptp_sts_word_pre = 0;
1256                         ptp_read_system_prets(xfer->ptp_sts);
1257                 }
1258
1259                 if (xfer->tx_buf || xfer->rx_buf) {
1260                         reinit_completion(&ctlr->xfer_completion);
1261
1262 fallback_pio:
1263                         ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1264                         if (ret < 0) {
1265                                 if (ctlr->cur_msg_mapped &&
1266                                    (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1267                                         __spi_unmap_msg(ctlr, msg);
1268                                         ctlr->fallback = true;
1269                                         xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1270                                         goto fallback_pio;
1271                                 }
1272
1273                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
1274                                                                errors);
1275                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
1276                                                                errors);
1277                                 dev_err(&msg->spi->dev,
1278                                         "SPI transfer failed: %d\n", ret);
1279                                 goto out;
1280                         }
1281
1282                         if (ret > 0) {
1283                                 ret = spi_transfer_wait(ctlr, msg, xfer);
1284                                 if (ret < 0)
1285                                         msg->status = ret;
1286                         }
1287                 } else {
1288                         if (xfer->len)
1289                                 dev_err(&msg->spi->dev,
1290                                         "Bufferless transfer has length %u\n",
1291                                         xfer->len);
1292                 }
1293
1294                 if (!ctlr->ptp_sts_supported) {
1295                         ptp_read_system_postts(xfer->ptp_sts);
1296                         xfer->ptp_sts_word_post = xfer->len;
1297                 }
1298
1299                 trace_spi_transfer_stop(msg, xfer);
1300
1301                 if (msg->status != -EINPROGRESS)
1302                         goto out;
1303
1304                 spi_transfer_delay_exec(xfer);
1305
1306                 if (xfer->cs_change) {
1307                         if (list_is_last(&xfer->transfer_list,
1308                                          &msg->transfers)) {
1309                                 keep_cs = true;
1310                         } else {
1311                                 spi_set_cs(msg->spi, false);
1312                                 _spi_transfer_cs_change_delay(msg, xfer);
1313                                 spi_set_cs(msg->spi, true);
1314                         }
1315                 }
1316
1317                 msg->actual_length += xfer->len;
1318         }
1319
1320 out:
1321         if (ret != 0 || !keep_cs)
1322                 spi_set_cs(msg->spi, false);
1323
1324         if (msg->status == -EINPROGRESS)
1325                 msg->status = ret;
1326
1327         if (msg->status && ctlr->handle_err)
1328                 ctlr->handle_err(ctlr, msg);
1329
1330         spi_finalize_current_message(ctlr);
1331
1332         return ret;
1333 }
1334
1335 /**
1336  * spi_finalize_current_transfer - report completion of a transfer
1337  * @ctlr: the controller reporting completion
1338  *
1339  * Called by SPI drivers using the core transfer_one_message()
1340  * implementation to notify it that the current interrupt driven
1341  * transfer has finished and the next one may be scheduled.
1342  */
1343 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1344 {
1345         complete(&ctlr->xfer_completion);
1346 }
1347 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1348
1349 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1350 {
1351         if (ctlr->auto_runtime_pm) {
1352                 pm_runtime_mark_last_busy(ctlr->dev.parent);
1353                 pm_runtime_put_autosuspend(ctlr->dev.parent);
1354         }
1355 }
1356
1357 /**
1358  * __spi_pump_messages - function which processes spi message queue
1359  * @ctlr: controller to process queue for
1360  * @in_kthread: true if we are in the context of the message pump thread
1361  *
1362  * This function checks if there is any spi message in the queue that
1363  * needs processing and if so call out to the driver to initialize hardware
1364  * and transfer each message.
1365  *
1366  * Note that it is called both from the kthread itself and also from
1367  * inside spi_sync(); the queue extraction handling at the top of the
1368  * function should deal with this safely.
1369  */
1370 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1371 {
1372         struct spi_transfer *xfer;
1373         struct spi_message *msg;
1374         bool was_busy = false;
1375         unsigned long flags;
1376         int ret;
1377
1378         /* Lock queue */
1379         spin_lock_irqsave(&ctlr->queue_lock, flags);
1380
1381         /* Make sure we are not already running a message */
1382         if (ctlr->cur_msg) {
1383                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1384                 return;
1385         }
1386
1387         /* If another context is idling the device then defer */
1388         if (ctlr->idling) {
1389                 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1390                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1391                 return;
1392         }
1393
1394         /* Check if the queue is idle */
1395         if (list_empty(&ctlr->queue) || !ctlr->running) {
1396                 if (!ctlr->busy) {
1397                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1398                         return;
1399                 }
1400
1401                 /* Defer any non-atomic teardown to the thread */
1402                 if (!in_kthread) {
1403                         if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1404                             !ctlr->unprepare_transfer_hardware) {
1405                                 spi_idle_runtime_pm(ctlr);
1406                                 ctlr->busy = false;
1407                                 trace_spi_controller_idle(ctlr);
1408                         } else {
1409                                 kthread_queue_work(ctlr->kworker,
1410                                                    &ctlr->pump_messages);
1411                         }
1412                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1413                         return;
1414                 }
1415
1416                 ctlr->busy = false;
1417                 ctlr->idling = true;
1418                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1419
1420                 kfree(ctlr->dummy_rx);
1421                 ctlr->dummy_rx = NULL;
1422                 kfree(ctlr->dummy_tx);
1423                 ctlr->dummy_tx = NULL;
1424                 if (ctlr->unprepare_transfer_hardware &&
1425                     ctlr->unprepare_transfer_hardware(ctlr))
1426                         dev_err(&ctlr->dev,
1427                                 "failed to unprepare transfer hardware\n");
1428                 spi_idle_runtime_pm(ctlr);
1429                 trace_spi_controller_idle(ctlr);
1430
1431                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1432                 ctlr->idling = false;
1433                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1434                 return;
1435         }
1436
1437         /* Extract head of queue */
1438         msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1439         ctlr->cur_msg = msg;
1440
1441         list_del_init(&msg->queue);
1442         if (ctlr->busy)
1443                 was_busy = true;
1444         else
1445                 ctlr->busy = true;
1446         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1447
1448         mutex_lock(&ctlr->io_mutex);
1449
1450         if (!was_busy && ctlr->auto_runtime_pm) {
1451                 ret = pm_runtime_get_sync(ctlr->dev.parent);
1452                 if (ret < 0) {
1453                         pm_runtime_put_noidle(ctlr->dev.parent);
1454                         dev_err(&ctlr->dev, "Failed to power device: %d\n",
1455                                 ret);
1456                         mutex_unlock(&ctlr->io_mutex);
1457                         return;
1458                 }
1459         }
1460
1461         if (!was_busy)
1462                 trace_spi_controller_busy(ctlr);
1463
1464         if (!was_busy && ctlr->prepare_transfer_hardware) {
1465                 ret = ctlr->prepare_transfer_hardware(ctlr);
1466                 if (ret) {
1467                         dev_err(&ctlr->dev,
1468                                 "failed to prepare transfer hardware: %d\n",
1469                                 ret);
1470
1471                         if (ctlr->auto_runtime_pm)
1472                                 pm_runtime_put(ctlr->dev.parent);
1473
1474                         msg->status = ret;
1475                         spi_finalize_current_message(ctlr);
1476
1477                         mutex_unlock(&ctlr->io_mutex);
1478                         return;
1479                 }
1480         }
1481
1482         trace_spi_message_start(msg);
1483
1484         if (ctlr->prepare_message) {
1485                 ret = ctlr->prepare_message(ctlr, msg);
1486                 if (ret) {
1487                         dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1488                                 ret);
1489                         msg->status = ret;
1490                         spi_finalize_current_message(ctlr);
1491                         goto out;
1492                 }
1493                 ctlr->cur_msg_prepared = true;
1494         }
1495
1496         ret = spi_map_msg(ctlr, msg);
1497         if (ret) {
1498                 msg->status = ret;
1499                 spi_finalize_current_message(ctlr);
1500                 goto out;
1501         }
1502
1503         if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1504                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1505                         xfer->ptp_sts_word_pre = 0;
1506                         ptp_read_system_prets(xfer->ptp_sts);
1507                 }
1508         }
1509
1510         ret = ctlr->transfer_one_message(ctlr, msg);
1511         if (ret) {
1512                 dev_err(&ctlr->dev,
1513                         "failed to transfer one message from queue\n");
1514                 goto out;
1515         }
1516
1517 out:
1518         mutex_unlock(&ctlr->io_mutex);
1519
1520         /* Prod the scheduler in case transfer_one() was busy waiting */
1521         if (!ret)
1522                 cond_resched();
1523 }
1524
1525 /**
1526  * spi_pump_messages - kthread work function which processes spi message queue
1527  * @work: pointer to kthread work struct contained in the controller struct
1528  */
1529 static void spi_pump_messages(struct kthread_work *work)
1530 {
1531         struct spi_controller *ctlr =
1532                 container_of(work, struct spi_controller, pump_messages);
1533
1534         __spi_pump_messages(ctlr, true);
1535 }
1536
1537 /**
1538  * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
1539  *                          TX timestamp for the requested byte from the SPI
1540  *                          transfer. The frequency with which this function
1541  *                          must be called (once per word, once for the whole
1542  *                          transfer, once per batch of words etc) is arbitrary
1543  *                          as long as the @tx buffer offset is greater than or
1544  *                          equal to the requested byte at the time of the
1545  *                          call. The timestamp is only taken once, at the
1546  *                          first such call. It is assumed that the driver
1547  *                          advances its @tx buffer pointer monotonically.
1548  * @ctlr: Pointer to the spi_controller structure of the driver
1549  * @xfer: Pointer to the transfer being timestamped
1550  * @progress: How many words (not bytes) have been transferred so far
1551  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1552  *            transfer, for less jitter in time measurement. Only compatible
1553  *            with PIO drivers. If true, must follow up with
1554  *            spi_take_timestamp_post or otherwise system will crash.
1555  *            WARNING: for fully predictable results, the CPU frequency must
1556  *            also be under control (governor).
1557  */
1558 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1559                             struct spi_transfer *xfer,
1560                             size_t progress, bool irqs_off)
1561 {
1562         if (!xfer->ptp_sts)
1563                 return;
1564
1565         if (xfer->timestamped)
1566                 return;
1567
1568         if (progress > xfer->ptp_sts_word_pre)
1569                 return;
1570
1571         /* Capture the resolution of the timestamp */
1572         xfer->ptp_sts_word_pre = progress;
1573
1574         if (irqs_off) {
1575                 local_irq_save(ctlr->irq_flags);
1576                 preempt_disable();
1577         }
1578
1579         ptp_read_system_prets(xfer->ptp_sts);
1580 }
1581 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1582
1583 /**
1584  * spi_take_timestamp_post - helper for drivers to collect the end of the
1585  *                           TX timestamp for the requested byte from the SPI
1586  *                           transfer. Can be called with an arbitrary
1587  *                           frequency: only the first call where @tx exceeds
1588  *                           or is equal to the requested word will be
1589  *                           timestamped.
1590  * @ctlr: Pointer to the spi_controller structure of the driver
1591  * @xfer: Pointer to the transfer being timestamped
1592  * @progress: How many words (not bytes) have been transferred so far
1593  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1594  */
1595 void spi_take_timestamp_post(struct spi_controller *ctlr,
1596                              struct spi_transfer *xfer,
1597                              size_t progress, bool irqs_off)
1598 {
1599         if (!xfer->ptp_sts)
1600                 return;
1601
1602         if (xfer->timestamped)
1603                 return;
1604
1605         if (progress < xfer->ptp_sts_word_post)
1606                 return;
1607
1608         ptp_read_system_postts(xfer->ptp_sts);
1609
1610         if (irqs_off) {
1611                 local_irq_restore(ctlr->irq_flags);
1612                 preempt_enable();
1613         }
1614
1615         /* Capture the resolution of the timestamp */
1616         xfer->ptp_sts_word_post = progress;
1617
1618         xfer->timestamped = true;
1619 }
1620 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1621
1622 /**
1623  * spi_set_thread_rt - set the controller to pump at realtime priority
1624  * @ctlr: controller to boost priority of
1625  *
1626  * This can be called because the controller requested realtime priority
1627  * (by setting the ->rt value before calling spi_register_controller()) or
1628  * because a device on the bus said that its transfers needed realtime
1629  * priority.
1630  *
1631  * NOTE: at the moment if any device on a bus says it needs realtime then
1632  * the thread will be at realtime priority for all transfers on that
1633  * controller.  If this eventually becomes a problem we may see if we can
1634  * find a way to boost the priority only temporarily during relevant
1635  * transfers.
1636  */
1637 static void spi_set_thread_rt(struct spi_controller *ctlr)
1638 {
1639         dev_info(&ctlr->dev,
1640                 "will run message pump with realtime priority\n");
1641         sched_set_fifo(ctlr->kworker->task);
1642 }
1643
1644 static int spi_init_queue(struct spi_controller *ctlr)
1645 {
1646         ctlr->running = false;
1647         ctlr->busy = false;
1648
1649         ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1650         if (IS_ERR(ctlr->kworker)) {
1651                 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1652                 return PTR_ERR(ctlr->kworker);
1653         }
1654
1655         kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1656
1657         /*
1658          * Controller config will indicate if this controller should run the
1659          * message pump with high (realtime) priority to reduce the transfer
1660          * latency on the bus by minimising the delay between a transfer
1661          * request and the scheduling of the message pump thread. Without this
1662          * setting the message pump thread will remain at default priority.
1663          */
1664         if (ctlr->rt)
1665                 spi_set_thread_rt(ctlr);
1666
1667         return 0;
1668 }
1669
1670 /**
1671  * spi_get_next_queued_message() - called by driver to check for queued
1672  * messages
1673  * @ctlr: the controller to check for queued messages
1674  *
1675  * If there are more messages in the queue, the next message is returned from
1676  * this call.
1677  *
1678  * Return: the next message in the queue, else NULL if the queue is empty.
1679  */
1680 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1681 {
1682         struct spi_message *next;
1683         unsigned long flags;
1684
1685         /* get a pointer to the next message, if any */
1686         spin_lock_irqsave(&ctlr->queue_lock, flags);
1687         next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1688                                         queue);
1689         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1690
1691         return next;
1692 }
1693 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1694
1695 /**
1696  * spi_finalize_current_message() - the current message is complete
1697  * @ctlr: the controller to return the message to
1698  *
1699  * Called by the driver to notify the core that the message in the front of the
1700  * queue is complete and can be removed from the queue.
1701  */
1702 void spi_finalize_current_message(struct spi_controller *ctlr)
1703 {
1704         struct spi_transfer *xfer;
1705         struct spi_message *mesg;
1706         unsigned long flags;
1707         int ret;
1708
1709         spin_lock_irqsave(&ctlr->queue_lock, flags);
1710         mesg = ctlr->cur_msg;
1711         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1712
1713         if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1714                 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1715                         ptp_read_system_postts(xfer->ptp_sts);
1716                         xfer->ptp_sts_word_post = xfer->len;
1717                 }
1718         }
1719
1720         if (unlikely(ctlr->ptp_sts_supported))
1721                 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
1722                         WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1723
1724         spi_unmap_msg(ctlr, mesg);
1725
1726         /* In the prepare_messages callback the spi bus has the opportunity to
1727          * split a transfer to smaller chunks.
1728          * Release splited transfers here since spi_map_msg is done on the
1729          * splited transfers.
1730          */
1731         spi_res_release(ctlr, mesg);
1732
1733         if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1734                 ret = ctlr->unprepare_message(ctlr, mesg);
1735                 if (ret) {
1736                         dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1737                                 ret);
1738                 }
1739         }
1740
1741         spin_lock_irqsave(&ctlr->queue_lock, flags);
1742         ctlr->cur_msg = NULL;
1743         ctlr->cur_msg_prepared = false;
1744         ctlr->fallback = false;
1745         kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1746         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1747
1748         trace_spi_message_done(mesg);
1749
1750         mesg->state = NULL;
1751         if (mesg->complete)
1752                 mesg->complete(mesg->context);
1753 }
1754 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1755
1756 static int spi_start_queue(struct spi_controller *ctlr)
1757 {
1758         unsigned long flags;
1759
1760         spin_lock_irqsave(&ctlr->queue_lock, flags);
1761
1762         if (ctlr->running || ctlr->busy) {
1763                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1764                 return -EBUSY;
1765         }
1766
1767         ctlr->running = true;
1768         ctlr->cur_msg = NULL;
1769         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1770
1771         kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1772
1773         return 0;
1774 }
1775
1776 static int spi_stop_queue(struct spi_controller *ctlr)
1777 {
1778         unsigned long flags;
1779         unsigned limit = 500;
1780         int ret = 0;
1781
1782         spin_lock_irqsave(&ctlr->queue_lock, flags);
1783
1784         /*
1785          * This is a bit lame, but is optimized for the common execution path.
1786          * A wait_queue on the ctlr->busy could be used, but then the common
1787          * execution path (pump_messages) would be required to call wake_up or
1788          * friends on every SPI message. Do this instead.
1789          */
1790         while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1791                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1792                 usleep_range(10000, 11000);
1793                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1794         }
1795
1796         if (!list_empty(&ctlr->queue) || ctlr->busy)
1797                 ret = -EBUSY;
1798         else
1799                 ctlr->running = false;
1800
1801         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1802
1803         if (ret) {
1804                 dev_warn(&ctlr->dev, "could not stop message queue\n");
1805                 return ret;
1806         }
1807         return ret;
1808 }
1809
1810 static int spi_destroy_queue(struct spi_controller *ctlr)
1811 {
1812         int ret;
1813
1814         ret = spi_stop_queue(ctlr);
1815
1816         /*
1817          * kthread_flush_worker will block until all work is done.
1818          * If the reason that stop_queue timed out is that the work will never
1819          * finish, then it does no good to call flush/stop thread, so
1820          * return anyway.
1821          */
1822         if (ret) {
1823                 dev_err(&ctlr->dev, "problem destroying queue\n");
1824                 return ret;
1825         }
1826
1827         kthread_destroy_worker(ctlr->kworker);
1828
1829         return 0;
1830 }
1831
1832 static int __spi_queued_transfer(struct spi_device *spi,
1833                                  struct spi_message *msg,
1834                                  bool need_pump)
1835 {
1836         struct spi_controller *ctlr = spi->controller;
1837         unsigned long flags;
1838
1839         spin_lock_irqsave(&ctlr->queue_lock, flags);
1840
1841         if (!ctlr->running) {
1842                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1843                 return -ESHUTDOWN;
1844         }
1845         msg->actual_length = 0;
1846         msg->status = -EINPROGRESS;
1847
1848         list_add_tail(&msg->queue, &ctlr->queue);
1849         if (!ctlr->busy && need_pump)
1850                 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1851
1852         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1853         return 0;
1854 }
1855
1856 /**
1857  * spi_queued_transfer - transfer function for queued transfers
1858  * @spi: spi device which is requesting transfer
1859  * @msg: spi message which is to handled is queued to driver queue
1860  *
1861  * Return: zero on success, else a negative error code.
1862  */
1863 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1864 {
1865         return __spi_queued_transfer(spi, msg, true);
1866 }
1867
1868 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1869 {
1870         int ret;
1871
1872         ctlr->transfer = spi_queued_transfer;
1873         if (!ctlr->transfer_one_message)
1874                 ctlr->transfer_one_message = spi_transfer_one_message;
1875
1876         /* Initialize and start queue */
1877         ret = spi_init_queue(ctlr);
1878         if (ret) {
1879                 dev_err(&ctlr->dev, "problem initializing queue\n");
1880                 goto err_init_queue;
1881         }
1882         ctlr->queued = true;
1883         ret = spi_start_queue(ctlr);
1884         if (ret) {
1885                 dev_err(&ctlr->dev, "problem starting queue\n");
1886                 goto err_start_queue;
1887         }
1888
1889         return 0;
1890
1891 err_start_queue:
1892         spi_destroy_queue(ctlr);
1893 err_init_queue:
1894         return ret;
1895 }
1896
1897 /**
1898  * spi_flush_queue - Send all pending messages in the queue from the callers'
1899  *                   context
1900  * @ctlr: controller to process queue for
1901  *
1902  * This should be used when one wants to ensure all pending messages have been
1903  * sent before doing something. Is used by the spi-mem code to make sure SPI
1904  * memory operations do not preempt regular SPI transfers that have been queued
1905  * before the spi-mem operation.
1906  */
1907 void spi_flush_queue(struct spi_controller *ctlr)
1908 {
1909         if (ctlr->transfer == spi_queued_transfer)
1910                 __spi_pump_messages(ctlr, false);
1911 }
1912
1913 /*-------------------------------------------------------------------------*/
1914
1915 #if defined(CONFIG_OF)
1916 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1917                            struct device_node *nc)
1918 {
1919         u32 value;
1920         int rc;
1921
1922         /* Mode (clock phase/polarity/etc.) */
1923         if (of_property_read_bool(nc, "spi-cpha"))
1924                 spi->mode |= SPI_CPHA;
1925         if (of_property_read_bool(nc, "spi-cpol"))
1926                 spi->mode |= SPI_CPOL;
1927         if (of_property_read_bool(nc, "spi-3wire"))
1928                 spi->mode |= SPI_3WIRE;
1929         if (of_property_read_bool(nc, "spi-lsb-first"))
1930                 spi->mode |= SPI_LSB_FIRST;
1931         if (of_property_read_bool(nc, "spi-cs-high"))
1932                 spi->mode |= SPI_CS_HIGH;
1933
1934         /* Device DUAL/QUAD mode */
1935         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1936                 switch (value) {
1937                 case 1:
1938                         break;
1939                 case 2:
1940                         spi->mode |= SPI_TX_DUAL;
1941                         break;
1942                 case 4:
1943                         spi->mode |= SPI_TX_QUAD;
1944                         break;
1945                 case 8:
1946                         spi->mode |= SPI_TX_OCTAL;
1947                         break;
1948                 default:
1949                         dev_warn(&ctlr->dev,
1950                                 "spi-tx-bus-width %d not supported\n",
1951                                 value);
1952                         break;
1953                 }
1954         }
1955
1956         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1957                 switch (value) {
1958                 case 1:
1959                         break;
1960                 case 2:
1961                         spi->mode |= SPI_RX_DUAL;
1962                         break;
1963                 case 4:
1964                         spi->mode |= SPI_RX_QUAD;
1965                         break;
1966                 case 8:
1967                         spi->mode |= SPI_RX_OCTAL;
1968                         break;
1969                 default:
1970                         dev_warn(&ctlr->dev,
1971                                 "spi-rx-bus-width %d not supported\n",
1972                                 value);
1973                         break;
1974                 }
1975         }
1976
1977         if (spi_controller_is_slave(ctlr)) {
1978                 if (!of_node_name_eq(nc, "slave")) {
1979                         dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1980                                 nc);
1981                         return -EINVAL;
1982                 }
1983                 return 0;
1984         }
1985
1986         /* Device address */
1987         rc = of_property_read_u32(nc, "reg", &value);
1988         if (rc) {
1989                 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1990                         nc, rc);
1991                 return rc;
1992         }
1993         spi->chip_select = value;
1994
1995         /*
1996          * For descriptors associated with the device, polarity inversion is
1997          * handled in the gpiolib, so all gpio chip selects are "active high"
1998          * in the logical sense, the gpiolib will invert the line if need be.
1999          */
2000         if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods &&
2001             ctlr->cs_gpiods[spi->chip_select])
2002                 spi->mode |= SPI_CS_HIGH;
2003
2004         /* Device speed */
2005         if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2006                 spi->max_speed_hz = value;
2007
2008         return 0;
2009 }
2010
2011 static struct spi_device *
2012 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2013 {
2014         struct spi_device *spi;
2015         int rc;
2016
2017         /* Alloc an spi_device */
2018         spi = spi_alloc_device(ctlr);
2019         if (!spi) {
2020                 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2021                 rc = -ENOMEM;
2022                 goto err_out;
2023         }
2024
2025         /* Select device driver */
2026         rc = of_modalias_node(nc, spi->modalias,
2027                                 sizeof(spi->modalias));
2028         if (rc < 0) {
2029                 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2030                 goto err_out;
2031         }
2032
2033         rc = of_spi_parse_dt(ctlr, spi, nc);
2034         if (rc)
2035                 goto err_out;
2036
2037         /* Store a pointer to the node in the device structure */
2038         of_node_get(nc);
2039         spi->dev.of_node = nc;
2040
2041         /* Register the new device */
2042         rc = spi_add_device(spi);
2043         if (rc) {
2044                 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2045                 goto err_of_node_put;
2046         }
2047
2048         return spi;
2049
2050 err_of_node_put:
2051         of_node_put(nc);
2052 err_out:
2053         spi_dev_put(spi);
2054         return ERR_PTR(rc);
2055 }
2056
2057 /**
2058  * of_register_spi_devices() - Register child devices onto the SPI bus
2059  * @ctlr:       Pointer to spi_controller device
2060  *
2061  * Registers an spi_device for each child node of controller node which
2062  * represents a valid SPI slave.
2063  */
2064 static void of_register_spi_devices(struct spi_controller *ctlr)
2065 {
2066         struct spi_device *spi;
2067         struct device_node *nc;
2068
2069         if (!ctlr->dev.of_node)
2070                 return;
2071
2072         for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2073                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2074                         continue;
2075                 spi = of_register_spi_device(ctlr, nc);
2076                 if (IS_ERR(spi)) {
2077                         dev_warn(&ctlr->dev,
2078                                  "Failed to create SPI device for %pOF\n", nc);
2079                         of_node_clear_flag(nc, OF_POPULATED);
2080                 }
2081         }
2082 }
2083 #else
2084 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2085 #endif
2086
2087 #ifdef CONFIG_ACPI
2088 struct acpi_spi_lookup {
2089         struct spi_controller   *ctlr;
2090         u32                     max_speed_hz;
2091         u32                     mode;
2092         int                     irq;
2093         u8                      bits_per_word;
2094         u8                      chip_select;
2095 };
2096
2097 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2098                                             struct acpi_spi_lookup *lookup)
2099 {
2100         const union acpi_object *obj;
2101
2102         if (!x86_apple_machine)
2103                 return;
2104
2105         if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2106             && obj->buffer.length >= 4)
2107                 lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2108
2109         if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2110             && obj->buffer.length == 8)
2111                 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2112
2113         if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2114             && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2115                 lookup->mode |= SPI_LSB_FIRST;
2116
2117         if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2118             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2119                 lookup->mode |= SPI_CPOL;
2120
2121         if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2122             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2123                 lookup->mode |= SPI_CPHA;
2124 }
2125
2126 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2127 {
2128         struct acpi_spi_lookup *lookup = data;
2129         struct spi_controller *ctlr = lookup->ctlr;
2130
2131         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2132                 struct acpi_resource_spi_serialbus *sb;
2133                 acpi_handle parent_handle;
2134                 acpi_status status;
2135
2136                 sb = &ares->data.spi_serial_bus;
2137                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2138
2139                         status = acpi_get_handle(NULL,
2140                                                  sb->resource_source.string_ptr,
2141                                                  &parent_handle);
2142
2143                         if (ACPI_FAILURE(status) ||
2144                             ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2145                                 return -ENODEV;
2146
2147                         /*
2148                          * ACPI DeviceSelection numbering is handled by the
2149                          * host controller driver in Windows and can vary
2150                          * from driver to driver. In Linux we always expect
2151                          * 0 .. max - 1 so we need to ask the driver to
2152                          * translate between the two schemes.
2153                          */
2154                         if (ctlr->fw_translate_cs) {
2155                                 int cs = ctlr->fw_translate_cs(ctlr,
2156                                                 sb->device_selection);
2157                                 if (cs < 0)
2158                                         return cs;
2159                                 lookup->chip_select = cs;
2160                         } else {
2161                                 lookup->chip_select = sb->device_selection;
2162                         }
2163
2164                         lookup->max_speed_hz = sb->connection_speed;
2165                         lookup->bits_per_word = sb->data_bit_length;
2166
2167                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2168                                 lookup->mode |= SPI_CPHA;
2169                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2170                                 lookup->mode |= SPI_CPOL;
2171                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2172                                 lookup->mode |= SPI_CS_HIGH;
2173                 }
2174         } else if (lookup->irq < 0) {
2175                 struct resource r;
2176
2177                 if (acpi_dev_resource_interrupt(ares, 0, &r))
2178                         lookup->irq = r.start;
2179         }
2180
2181         /* Always tell the ACPI core to skip this resource */
2182         return 1;
2183 }
2184
2185 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2186                                             struct acpi_device *adev)
2187 {
2188         acpi_handle parent_handle = NULL;
2189         struct list_head resource_list;
2190         struct acpi_spi_lookup lookup = {};
2191         struct spi_device *spi;
2192         int ret;
2193
2194         if (acpi_bus_get_status(adev) || !adev->status.present ||
2195             acpi_device_enumerated(adev))
2196                 return AE_OK;
2197
2198         lookup.ctlr             = ctlr;
2199         lookup.irq              = -1;
2200
2201         INIT_LIST_HEAD(&resource_list);
2202         ret = acpi_dev_get_resources(adev, &resource_list,
2203                                      acpi_spi_add_resource, &lookup);
2204         acpi_dev_free_resource_list(&resource_list);
2205
2206         if (ret < 0)
2207                 /* found SPI in _CRS but it points to another controller */
2208                 return AE_OK;
2209
2210         if (!lookup.max_speed_hz &&
2211             !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
2212             ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
2213                 /* Apple does not use _CRS but nested devices for SPI slaves */
2214                 acpi_spi_parse_apple_properties(adev, &lookup);
2215         }
2216
2217         if (!lookup.max_speed_hz)
2218                 return AE_OK;
2219
2220         spi = spi_alloc_device(ctlr);
2221         if (!spi) {
2222                 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
2223                         dev_name(&adev->dev));
2224                 return AE_NO_MEMORY;
2225         }
2226
2227
2228         ACPI_COMPANION_SET(&spi->dev, adev);
2229         spi->max_speed_hz       = lookup.max_speed_hz;
2230         spi->mode               |= lookup.mode;
2231         spi->irq                = lookup.irq;
2232         spi->bits_per_word      = lookup.bits_per_word;
2233         spi->chip_select        = lookup.chip_select;
2234
2235         acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2236                           sizeof(spi->modalias));
2237
2238         if (spi->irq < 0)
2239                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2240
2241         acpi_device_set_enumerated(adev);
2242
2243         adev->power.flags.ignore_parent = true;
2244         if (spi_add_device(spi)) {
2245                 adev->power.flags.ignore_parent = false;
2246                 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2247                         dev_name(&adev->dev));
2248                 spi_dev_put(spi);
2249         }
2250
2251         return AE_OK;
2252 }
2253
2254 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2255                                        void *data, void **return_value)
2256 {
2257         struct spi_controller *ctlr = data;
2258         struct acpi_device *adev;
2259
2260         if (acpi_bus_get_device(handle, &adev))
2261                 return AE_OK;
2262
2263         return acpi_register_spi_device(ctlr, adev);
2264 }
2265
2266 #define SPI_ACPI_ENUMERATE_MAX_DEPTH            32
2267
2268 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2269 {
2270         acpi_status status;
2271         acpi_handle handle;
2272
2273         handle = ACPI_HANDLE(ctlr->dev.parent);
2274         if (!handle)
2275                 return;
2276
2277         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2278                                      SPI_ACPI_ENUMERATE_MAX_DEPTH,
2279                                      acpi_spi_add_device, NULL, ctlr, NULL);
2280         if (ACPI_FAILURE(status))
2281                 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2282 }
2283 #else
2284 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2285 #endif /* CONFIG_ACPI */
2286
2287 static void spi_controller_release(struct device *dev)
2288 {
2289         struct spi_controller *ctlr;
2290
2291         ctlr = container_of(dev, struct spi_controller, dev);
2292         kfree(ctlr);
2293 }
2294
2295 static struct class spi_master_class = {
2296         .name           = "spi_master",
2297         .owner          = THIS_MODULE,
2298         .dev_release    = spi_controller_release,
2299         .dev_groups     = spi_master_groups,
2300 };
2301
2302 #ifdef CONFIG_SPI_SLAVE
2303 /**
2304  * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2305  *                   controller
2306  * @spi: device used for the current transfer
2307  */
2308 int spi_slave_abort(struct spi_device *spi)
2309 {
2310         struct spi_controller *ctlr = spi->controller;
2311
2312         if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2313                 return ctlr->slave_abort(ctlr);
2314
2315         return -ENOTSUPP;
2316 }
2317 EXPORT_SYMBOL_GPL(spi_slave_abort);
2318
2319 static int match_true(struct device *dev, void *data)
2320 {
2321         return 1;
2322 }
2323
2324 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2325                           char *buf)
2326 {
2327         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2328                                                    dev);
2329         struct device *child;
2330
2331         child = device_find_child(&ctlr->dev, NULL, match_true);
2332         return sprintf(buf, "%s\n",
2333                        child ? to_spi_device(child)->modalias : NULL);
2334 }
2335
2336 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2337                            const char *buf, size_t count)
2338 {
2339         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2340                                                    dev);
2341         struct spi_device *spi;
2342         struct device *child;
2343         char name[32];
2344         int rc;
2345
2346         rc = sscanf(buf, "%31s", name);
2347         if (rc != 1 || !name[0])
2348                 return -EINVAL;
2349
2350         child = device_find_child(&ctlr->dev, NULL, match_true);
2351         if (child) {
2352                 /* Remove registered slave */
2353                 device_unregister(child);
2354                 put_device(child);
2355         }
2356
2357         if (strcmp(name, "(null)")) {
2358                 /* Register new slave */
2359                 spi = spi_alloc_device(ctlr);
2360                 if (!spi)
2361                         return -ENOMEM;
2362
2363                 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2364
2365                 rc = spi_add_device(spi);
2366                 if (rc) {
2367                         spi_dev_put(spi);
2368                         return rc;
2369                 }
2370         }
2371
2372         return count;
2373 }
2374
2375 static DEVICE_ATTR_RW(slave);
2376
2377 static struct attribute *spi_slave_attrs[] = {
2378         &dev_attr_slave.attr,
2379         NULL,
2380 };
2381
2382 static const struct attribute_group spi_slave_group = {
2383         .attrs = spi_slave_attrs,
2384 };
2385
2386 static const struct attribute_group *spi_slave_groups[] = {
2387         &spi_controller_statistics_group,
2388         &spi_slave_group,
2389         NULL,
2390 };
2391
2392 static struct class spi_slave_class = {
2393         .name           = "spi_slave",
2394         .owner          = THIS_MODULE,
2395         .dev_release    = spi_controller_release,
2396         .dev_groups     = spi_slave_groups,
2397 };
2398 #else
2399 extern struct class spi_slave_class;    /* dummy */
2400 #endif
2401
2402 /**
2403  * __spi_alloc_controller - allocate an SPI master or slave controller
2404  * @dev: the controller, possibly using the platform_bus
2405  * @size: how much zeroed driver-private data to allocate; the pointer to this
2406  *      memory is in the driver_data field of the returned device, accessible
2407  *      with spi_controller_get_devdata(); the memory is cacheline aligned;
2408  *      drivers granting DMA access to portions of their private data need to
2409  *      round up @size using ALIGN(size, dma_get_cache_alignment()).
2410  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2411  *      slave (true) controller
2412  * Context: can sleep
2413  *
2414  * This call is used only by SPI controller drivers, which are the
2415  * only ones directly touching chip registers.  It's how they allocate
2416  * an spi_controller structure, prior to calling spi_register_controller().
2417  *
2418  * This must be called from context that can sleep.
2419  *
2420  * The caller is responsible for assigning the bus number and initializing the
2421  * controller's methods before calling spi_register_controller(); and (after
2422  * errors adding the device) calling spi_controller_put() to prevent a memory
2423  * leak.
2424  *
2425  * Return: the SPI controller structure on success, else NULL.
2426  */
2427 struct spi_controller *__spi_alloc_controller(struct device *dev,
2428                                               unsigned int size, bool slave)
2429 {
2430         struct spi_controller   *ctlr;
2431         size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2432
2433         if (!dev)
2434                 return NULL;
2435
2436         ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2437         if (!ctlr)
2438                 return NULL;
2439
2440         device_initialize(&ctlr->dev);
2441         ctlr->bus_num = -1;
2442         ctlr->num_chipselect = 1;
2443         ctlr->slave = slave;
2444         if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2445                 ctlr->dev.class = &spi_slave_class;
2446         else
2447                 ctlr->dev.class = &spi_master_class;
2448         ctlr->dev.parent = dev;
2449         pm_suspend_ignore_children(&ctlr->dev, true);
2450         spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2451
2452         return ctlr;
2453 }
2454 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2455
2456 #ifdef CONFIG_OF
2457 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2458 {
2459         int nb, i, *cs;
2460         struct device_node *np = ctlr->dev.of_node;
2461
2462         if (!np)
2463                 return 0;
2464
2465         nb = of_gpio_named_count(np, "cs-gpios");
2466         ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2467
2468         /* Return error only for an incorrectly formed cs-gpios property */
2469         if (nb == 0 || nb == -ENOENT)
2470                 return 0;
2471         else if (nb < 0)
2472                 return nb;
2473
2474         cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2475                           GFP_KERNEL);
2476         ctlr->cs_gpios = cs;
2477
2478         if (!ctlr->cs_gpios)
2479                 return -ENOMEM;
2480
2481         for (i = 0; i < ctlr->num_chipselect; i++)
2482                 cs[i] = -ENOENT;
2483
2484         for (i = 0; i < nb; i++)
2485                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2486
2487         return 0;
2488 }
2489 #else
2490 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2491 {
2492         return 0;
2493 }
2494 #endif
2495
2496 /**
2497  * spi_get_gpio_descs() - grab chip select GPIOs for the master
2498  * @ctlr: The SPI master to grab GPIO descriptors for
2499  */
2500 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2501 {
2502         int nb, i;
2503         struct gpio_desc **cs;
2504         struct device *dev = &ctlr->dev;
2505         unsigned long native_cs_mask = 0;
2506         unsigned int num_cs_gpios = 0;
2507
2508         nb = gpiod_count(dev, "cs");
2509         ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2510
2511         /* No GPIOs at all is fine, else return the error */
2512         if (nb == 0 || nb == -ENOENT)
2513                 return 0;
2514         else if (nb < 0)
2515                 return nb;
2516
2517         cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2518                           GFP_KERNEL);
2519         if (!cs)
2520                 return -ENOMEM;
2521         ctlr->cs_gpiods = cs;
2522
2523         for (i = 0; i < nb; i++) {
2524                 /*
2525                  * Most chipselects are active low, the inverted
2526                  * semantics are handled by special quirks in gpiolib,
2527                  * so initializing them GPIOD_OUT_LOW here means
2528                  * "unasserted", in most cases this will drive the physical
2529                  * line high.
2530                  */
2531                 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2532                                                       GPIOD_OUT_LOW);
2533                 if (IS_ERR(cs[i]))
2534                         return PTR_ERR(cs[i]);
2535
2536                 if (cs[i]) {
2537                         /*
2538                          * If we find a CS GPIO, name it after the device and
2539                          * chip select line.
2540                          */
2541                         char *gpioname;
2542
2543                         gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2544                                                   dev_name(dev), i);
2545                         if (!gpioname)
2546                                 return -ENOMEM;
2547                         gpiod_set_consumer_name(cs[i], gpioname);
2548                         num_cs_gpios++;
2549                         continue;
2550                 }
2551
2552                 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2553                         dev_err(dev, "Invalid native chip select %d\n", i);
2554                         return -EINVAL;
2555                 }
2556                 native_cs_mask |= BIT(i);
2557         }
2558
2559         ctlr->unused_native_cs = ffz(native_cs_mask);
2560         if (num_cs_gpios && ctlr->max_native_cs &&
2561             ctlr->unused_native_cs >= ctlr->max_native_cs) {
2562                 dev_err(dev, "No unused native chip select available\n");
2563                 return -EINVAL;
2564         }
2565
2566         return 0;
2567 }
2568
2569 static int spi_controller_check_ops(struct spi_controller *ctlr)
2570 {
2571         /*
2572          * The controller may implement only the high-level SPI-memory like
2573          * operations if it does not support regular SPI transfers, and this is
2574          * valid use case.
2575          * If ->mem_ops is NULL, we request that at least one of the
2576          * ->transfer_xxx() method be implemented.
2577          */
2578         if (ctlr->mem_ops) {
2579                 if (!ctlr->mem_ops->exec_op)
2580                         return -EINVAL;
2581         } else if (!ctlr->transfer && !ctlr->transfer_one &&
2582                    !ctlr->transfer_one_message) {
2583                 return -EINVAL;
2584         }
2585
2586         return 0;
2587 }
2588
2589 /**
2590  * spi_register_controller - register SPI master or slave controller
2591  * @ctlr: initialized master, originally from spi_alloc_master() or
2592  *      spi_alloc_slave()
2593  * Context: can sleep
2594  *
2595  * SPI controllers connect to their drivers using some non-SPI bus,
2596  * such as the platform bus.  The final stage of probe() in that code
2597  * includes calling spi_register_controller() to hook up to this SPI bus glue.
2598  *
2599  * SPI controllers use board specific (often SOC specific) bus numbers,
2600  * and board-specific addressing for SPI devices combines those numbers
2601  * with chip select numbers.  Since SPI does not directly support dynamic
2602  * device identification, boards need configuration tables telling which
2603  * chip is at which address.
2604  *
2605  * This must be called from context that can sleep.  It returns zero on
2606  * success, else a negative error code (dropping the controller's refcount).
2607  * After a successful return, the caller is responsible for calling
2608  * spi_unregister_controller().
2609  *
2610  * Return: zero on success, else a negative error code.
2611  */
2612 int spi_register_controller(struct spi_controller *ctlr)
2613 {
2614         struct device           *dev = ctlr->dev.parent;
2615         struct boardinfo        *bi;
2616         int                     status;
2617         int                     id, first_dynamic;
2618
2619         if (!dev)
2620                 return -ENODEV;
2621
2622         /*
2623          * Make sure all necessary hooks are implemented before registering
2624          * the SPI controller.
2625          */
2626         status = spi_controller_check_ops(ctlr);
2627         if (status)
2628                 return status;
2629
2630         if (ctlr->bus_num >= 0) {
2631                 /* devices with a fixed bus num must check-in with the num */
2632                 mutex_lock(&board_lock);
2633                 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2634                         ctlr->bus_num + 1, GFP_KERNEL);
2635                 mutex_unlock(&board_lock);
2636                 if (WARN(id < 0, "couldn't get idr"))
2637                         return id == -ENOSPC ? -EBUSY : id;
2638                 ctlr->bus_num = id;
2639         } else if (ctlr->dev.of_node) {
2640                 /* allocate dynamic bus number using Linux idr */
2641                 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2642                 if (id >= 0) {
2643                         ctlr->bus_num = id;
2644                         mutex_lock(&board_lock);
2645                         id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2646                                        ctlr->bus_num + 1, GFP_KERNEL);
2647                         mutex_unlock(&board_lock);
2648                         if (WARN(id < 0, "couldn't get idr"))
2649                                 return id == -ENOSPC ? -EBUSY : id;
2650                 }
2651         }
2652         if (ctlr->bus_num < 0) {
2653                 first_dynamic = of_alias_get_highest_id("spi");
2654                 if (first_dynamic < 0)
2655                         first_dynamic = 0;
2656                 else
2657                         first_dynamic++;
2658
2659                 mutex_lock(&board_lock);
2660                 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2661                                0, GFP_KERNEL);
2662                 mutex_unlock(&board_lock);
2663                 if (WARN(id < 0, "couldn't get idr"))
2664                         return id;
2665                 ctlr->bus_num = id;
2666         }
2667         INIT_LIST_HEAD(&ctlr->queue);
2668         spin_lock_init(&ctlr->queue_lock);
2669         spin_lock_init(&ctlr->bus_lock_spinlock);
2670         mutex_init(&ctlr->bus_lock_mutex);
2671         mutex_init(&ctlr->io_mutex);
2672         ctlr->bus_lock_flag = 0;
2673         init_completion(&ctlr->xfer_completion);
2674         if (!ctlr->max_dma_len)
2675                 ctlr->max_dma_len = INT_MAX;
2676
2677         /* register the device, then userspace will see it.
2678          * registration fails if the bus ID is in use.
2679          */
2680         dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2681
2682         if (!spi_controller_is_slave(ctlr)) {
2683                 if (ctlr->use_gpio_descriptors) {
2684                         status = spi_get_gpio_descs(ctlr);
2685                         if (status)
2686                                 goto free_bus_id;
2687                         /*
2688                          * A controller using GPIO descriptors always
2689                          * supports SPI_CS_HIGH if need be.
2690                          */
2691                         ctlr->mode_bits |= SPI_CS_HIGH;
2692                 } else {
2693                         /* Legacy code path for GPIOs from DT */
2694                         status = of_spi_get_gpio_numbers(ctlr);
2695                         if (status)
2696                                 goto free_bus_id;
2697                 }
2698         }
2699
2700         /*
2701          * Even if it's just one always-selected device, there must
2702          * be at least one chipselect.
2703          */
2704         if (!ctlr->num_chipselect) {
2705                 status = -EINVAL;
2706                 goto free_bus_id;
2707         }
2708
2709         status = device_add(&ctlr->dev);
2710         if (status < 0)
2711                 goto free_bus_id;
2712         dev_dbg(dev, "registered %s %s\n",
2713                         spi_controller_is_slave(ctlr) ? "slave" : "master",
2714                         dev_name(&ctlr->dev));
2715
2716         /*
2717          * If we're using a queued driver, start the queue. Note that we don't
2718          * need the queueing logic if the driver is only supporting high-level
2719          * memory operations.
2720          */
2721         if (ctlr->transfer) {
2722                 dev_info(dev, "controller is unqueued, this is deprecated\n");
2723         } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2724                 status = spi_controller_initialize_queue(ctlr);
2725                 if (status) {
2726                         device_del(&ctlr->dev);
2727                         goto free_bus_id;
2728                 }
2729         }
2730         /* add statistics */
2731         spin_lock_init(&ctlr->statistics.lock);
2732
2733         mutex_lock(&board_lock);
2734         list_add_tail(&ctlr->list, &spi_controller_list);
2735         list_for_each_entry(bi, &board_list, list)
2736                 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2737         mutex_unlock(&board_lock);
2738
2739         /* Register devices from the device tree and ACPI */
2740         of_register_spi_devices(ctlr);
2741         acpi_register_spi_devices(ctlr);
2742         return status;
2743
2744 free_bus_id:
2745         mutex_lock(&board_lock);
2746         idr_remove(&spi_master_idr, ctlr->bus_num);
2747         mutex_unlock(&board_lock);
2748         return status;
2749 }
2750 EXPORT_SYMBOL_GPL(spi_register_controller);
2751
2752 static void devm_spi_unregister(struct device *dev, void *res)
2753 {
2754         spi_unregister_controller(*(struct spi_controller **)res);
2755 }
2756
2757 /**
2758  * devm_spi_register_controller - register managed SPI master or slave
2759  *      controller
2760  * @dev:    device managing SPI controller
2761  * @ctlr: initialized controller, originally from spi_alloc_master() or
2762  *      spi_alloc_slave()
2763  * Context: can sleep
2764  *
2765  * Register a SPI device as with spi_register_controller() which will
2766  * automatically be unregistered and freed.
2767  *
2768  * Return: zero on success, else a negative error code.
2769  */
2770 int devm_spi_register_controller(struct device *dev,
2771                                  struct spi_controller *ctlr)
2772 {
2773         struct spi_controller **ptr;
2774         int ret;
2775
2776         ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2777         if (!ptr)
2778                 return -ENOMEM;
2779
2780         ret = spi_register_controller(ctlr);
2781         if (!ret) {
2782                 *ptr = ctlr;
2783                 devres_add(dev, ptr);
2784         } else {
2785                 devres_free(ptr);
2786         }
2787
2788         return ret;
2789 }
2790 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2791
2792 static int __unregister(struct device *dev, void *null)
2793 {
2794         spi_unregister_device(to_spi_device(dev));
2795         return 0;
2796 }
2797
2798 /**
2799  * spi_unregister_controller - unregister SPI master or slave controller
2800  * @ctlr: the controller being unregistered
2801  * Context: can sleep
2802  *
2803  * This call is used only by SPI controller drivers, which are the
2804  * only ones directly touching chip registers.
2805  *
2806  * This must be called from context that can sleep.
2807  *
2808  * Note that this function also drops a reference to the controller.
2809  */
2810 void spi_unregister_controller(struct spi_controller *ctlr)
2811 {
2812         struct spi_controller *found;
2813         int id = ctlr->bus_num;
2814
2815         /* Prevent addition of new devices, unregister existing ones */
2816         if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2817                 mutex_lock(&spi_add_lock);
2818
2819         device_for_each_child(&ctlr->dev, NULL, __unregister);
2820
2821         /* First make sure that this controller was ever added */
2822         mutex_lock(&board_lock);
2823         found = idr_find(&spi_master_idr, id);
2824         mutex_unlock(&board_lock);
2825         if (ctlr->queued) {
2826                 if (spi_destroy_queue(ctlr))
2827                         dev_err(&ctlr->dev, "queue remove failed\n");
2828         }
2829         mutex_lock(&board_lock);
2830         list_del(&ctlr->list);
2831         mutex_unlock(&board_lock);
2832
2833         device_unregister(&ctlr->dev);
2834         /* free bus id */
2835         mutex_lock(&board_lock);
2836         if (found == ctlr)
2837                 idr_remove(&spi_master_idr, id);
2838         mutex_unlock(&board_lock);
2839
2840         if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2841                 mutex_unlock(&spi_add_lock);
2842 }
2843 EXPORT_SYMBOL_GPL(spi_unregister_controller);
2844
2845 int spi_controller_suspend(struct spi_controller *ctlr)
2846 {
2847         int ret;
2848
2849         /* Basically no-ops for non-queued controllers */
2850         if (!ctlr->queued)
2851                 return 0;
2852
2853         ret = spi_stop_queue(ctlr);
2854         if (ret)
2855                 dev_err(&ctlr->dev, "queue stop failed\n");
2856
2857         return ret;
2858 }
2859 EXPORT_SYMBOL_GPL(spi_controller_suspend);
2860
2861 int spi_controller_resume(struct spi_controller *ctlr)
2862 {
2863         int ret;
2864
2865         if (!ctlr->queued)
2866                 return 0;
2867
2868         ret = spi_start_queue(ctlr);
2869         if (ret)
2870                 dev_err(&ctlr->dev, "queue restart failed\n");
2871
2872         return ret;
2873 }
2874 EXPORT_SYMBOL_GPL(spi_controller_resume);
2875
2876 static int __spi_controller_match(struct device *dev, const void *data)
2877 {
2878         struct spi_controller *ctlr;
2879         const u16 *bus_num = data;
2880
2881         ctlr = container_of(dev, struct spi_controller, dev);
2882         return ctlr->bus_num == *bus_num;
2883 }
2884
2885 /**
2886  * spi_busnum_to_master - look up master associated with bus_num
2887  * @bus_num: the master's bus number
2888  * Context: can sleep
2889  *
2890  * This call may be used with devices that are registered after
2891  * arch init time.  It returns a refcounted pointer to the relevant
2892  * spi_controller (which the caller must release), or NULL if there is
2893  * no such master registered.
2894  *
2895  * Return: the SPI master structure on success, else NULL.
2896  */
2897 struct spi_controller *spi_busnum_to_master(u16 bus_num)
2898 {
2899         struct device           *dev;
2900         struct spi_controller   *ctlr = NULL;
2901
2902         dev = class_find_device(&spi_master_class, NULL, &bus_num,
2903                                 __spi_controller_match);
2904         if (dev)
2905                 ctlr = container_of(dev, struct spi_controller, dev);
2906         /* reference got in class_find_device */
2907         return ctlr;
2908 }
2909 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2910
2911 /*-------------------------------------------------------------------------*/
2912
2913 /* Core methods for SPI resource management */
2914
2915 /**
2916  * spi_res_alloc - allocate a spi resource that is life-cycle managed
2917  *                 during the processing of a spi_message while using
2918  *                 spi_transfer_one
2919  * @spi:     the spi device for which we allocate memory
2920  * @release: the release code to execute for this resource
2921  * @size:    size to alloc and return
2922  * @gfp:     GFP allocation flags
2923  *
2924  * Return: the pointer to the allocated data
2925  *
2926  * This may get enhanced in the future to allocate from a memory pool
2927  * of the @spi_device or @spi_controller to avoid repeated allocations.
2928  */
2929 void *spi_res_alloc(struct spi_device *spi,
2930                     spi_res_release_t release,
2931                     size_t size, gfp_t gfp)
2932 {
2933         struct spi_res *sres;
2934
2935         sres = kzalloc(sizeof(*sres) + size, gfp);
2936         if (!sres)
2937                 return NULL;
2938
2939         INIT_LIST_HEAD(&sres->entry);
2940         sres->release = release;
2941
2942         return sres->data;
2943 }
2944 EXPORT_SYMBOL_GPL(spi_res_alloc);
2945
2946 /**
2947  * spi_res_free - free an spi resource
2948  * @res: pointer to the custom data of a resource
2949  *
2950  */
2951 void spi_res_free(void *res)
2952 {
2953         struct spi_res *sres = container_of(res, struct spi_res, data);
2954
2955         if (!res)
2956                 return;
2957
2958         WARN_ON(!list_empty(&sres->entry));
2959         kfree(sres);
2960 }
2961 EXPORT_SYMBOL_GPL(spi_res_free);
2962
2963 /**
2964  * spi_res_add - add a spi_res to the spi_message
2965  * @message: the spi message
2966  * @res:     the spi_resource
2967  */
2968 void spi_res_add(struct spi_message *message, void *res)
2969 {
2970         struct spi_res *sres = container_of(res, struct spi_res, data);
2971
2972         WARN_ON(!list_empty(&sres->entry));
2973         list_add_tail(&sres->entry, &message->resources);
2974 }
2975 EXPORT_SYMBOL_GPL(spi_res_add);
2976
2977 /**
2978  * spi_res_release - release all spi resources for this message
2979  * @ctlr:  the @spi_controller
2980  * @message: the @spi_message
2981  */
2982 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2983 {
2984         struct spi_res *res, *tmp;
2985
2986         list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
2987                 if (res->release)
2988                         res->release(ctlr, message, res->data);
2989
2990                 list_del(&res->entry);
2991
2992                 kfree(res);
2993         }
2994 }
2995 EXPORT_SYMBOL_GPL(spi_res_release);
2996
2997 /*-------------------------------------------------------------------------*/
2998
2999 /* Core methods for spi_message alterations */
3000
3001 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3002                                             struct spi_message *msg,
3003                                             void *res)
3004 {
3005         struct spi_replaced_transfers *rxfer = res;
3006         size_t i;
3007
3008         /* call extra callback if requested */
3009         if (rxfer->release)
3010                 rxfer->release(ctlr, msg, res);
3011
3012         /* insert replaced transfers back into the message */
3013         list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3014
3015         /* remove the formerly inserted entries */
3016         for (i = 0; i < rxfer->inserted; i++)
3017                 list_del(&rxfer->inserted_transfers[i].transfer_list);
3018 }
3019
3020 /**
3021  * spi_replace_transfers - replace transfers with several transfers
3022  *                         and register change with spi_message.resources
3023  * @msg:           the spi_message we work upon
3024  * @xfer_first:    the first spi_transfer we want to replace
3025  * @remove:        number of transfers to remove
3026  * @insert:        the number of transfers we want to insert instead
3027  * @release:       extra release code necessary in some circumstances
3028  * @extradatasize: extra data to allocate (with alignment guarantees
3029  *                 of struct @spi_transfer)
3030  * @gfp:           gfp flags
3031  *
3032  * Returns: pointer to @spi_replaced_transfers,
3033  *          PTR_ERR(...) in case of errors.
3034  */
3035 struct spi_replaced_transfers *spi_replace_transfers(
3036         struct spi_message *msg,
3037         struct spi_transfer *xfer_first,
3038         size_t remove,
3039         size_t insert,
3040         spi_replaced_release_t release,
3041         size_t extradatasize,
3042         gfp_t gfp)
3043 {
3044         struct spi_replaced_transfers *rxfer;
3045         struct spi_transfer *xfer;
3046         size_t i;
3047
3048         /* allocate the structure using spi_res */
3049         rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3050                               struct_size(rxfer, inserted_transfers, insert)
3051                               + extradatasize,
3052                               gfp);
3053         if (!rxfer)
3054                 return ERR_PTR(-ENOMEM);
3055
3056         /* the release code to invoke before running the generic release */
3057         rxfer->release = release;
3058
3059         /* assign extradata */
3060         if (extradatasize)
3061                 rxfer->extradata =
3062                         &rxfer->inserted_transfers[insert];
3063
3064         /* init the replaced_transfers list */
3065         INIT_LIST_HEAD(&rxfer->replaced_transfers);
3066
3067         /* assign the list_entry after which we should reinsert
3068          * the @replaced_transfers - it may be spi_message.messages!
3069          */
3070         rxfer->replaced_after = xfer_first->transfer_list.prev;
3071
3072         /* remove the requested number of transfers */
3073         for (i = 0; i < remove; i++) {
3074                 /* if the entry after replaced_after it is msg->transfers
3075                  * then we have been requested to remove more transfers
3076                  * than are in the list
3077                  */
3078                 if (rxfer->replaced_after->next == &msg->transfers) {
3079                         dev_err(&msg->spi->dev,
3080                                 "requested to remove more spi_transfers than are available\n");
3081                         /* insert replaced transfers back into the message */
3082                         list_splice(&rxfer->replaced_transfers,
3083                                     rxfer->replaced_after);
3084
3085                         /* free the spi_replace_transfer structure */
3086                         spi_res_free(rxfer);
3087
3088                         /* and return with an error */
3089                         return ERR_PTR(-EINVAL);
3090                 }
3091
3092                 /* remove the entry after replaced_after from list of
3093                  * transfers and add it to list of replaced_transfers
3094                  */
3095                 list_move_tail(rxfer->replaced_after->next,
3096                                &rxfer->replaced_transfers);
3097         }
3098
3099         /* create copy of the given xfer with identical settings
3100          * based on the first transfer to get removed
3101          */
3102         for (i = 0; i < insert; i++) {
3103                 /* we need to run in reverse order */
3104                 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3105
3106                 /* copy all spi_transfer data */
3107                 memcpy(xfer, xfer_first, sizeof(*xfer));
3108
3109                 /* add to list */
3110                 list_add(&xfer->transfer_list, rxfer->replaced_after);
3111
3112                 /* clear cs_change and delay for all but the last */
3113                 if (i) {
3114                         xfer->cs_change = false;
3115                         xfer->delay_usecs = 0;
3116                         xfer->delay.value = 0;
3117                 }
3118         }
3119
3120         /* set up inserted */
3121         rxfer->inserted = insert;
3122
3123         /* and register it with spi_res/spi_message */
3124         spi_res_add(msg, rxfer);
3125
3126         return rxfer;
3127 }
3128 EXPORT_SYMBOL_GPL(spi_replace_transfers);
3129
3130 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3131                                         struct spi_message *msg,
3132                                         struct spi_transfer **xferp,
3133                                         size_t maxsize,
3134                                         gfp_t gfp)
3135 {
3136         struct spi_transfer *xfer = *xferp, *xfers;
3137         struct spi_replaced_transfers *srt;
3138         size_t offset;
3139         size_t count, i;
3140
3141         /* calculate how many we have to replace */
3142         count = DIV_ROUND_UP(xfer->len, maxsize);
3143
3144         /* create replacement */
3145         srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3146         if (IS_ERR(srt))
3147                 return PTR_ERR(srt);
3148         xfers = srt->inserted_transfers;
3149
3150         /* now handle each of those newly inserted spi_transfers
3151          * note that the replacements spi_transfers all are preset
3152          * to the same values as *xferp, so tx_buf, rx_buf and len
3153          * are all identical (as well as most others)
3154          * so we just have to fix up len and the pointers.
3155          *
3156          * this also includes support for the depreciated
3157          * spi_message.is_dma_mapped interface
3158          */
3159
3160         /* the first transfer just needs the length modified, so we
3161          * run it outside the loop
3162          */
3163         xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3164
3165         /* all the others need rx_buf/tx_buf also set */
3166         for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3167                 /* update rx_buf, tx_buf and dma */
3168                 if (xfers[i].rx_buf)
3169                         xfers[i].rx_buf += offset;
3170                 if (xfers[i].rx_dma)
3171                         xfers[i].rx_dma += offset;
3172                 if (xfers[i].tx_buf)
3173                         xfers[i].tx_buf += offset;
3174                 if (xfers[i].tx_dma)
3175                         xfers[i].tx_dma += offset;
3176
3177                 /* update length */
3178                 xfers[i].len = min(maxsize, xfers[i].len - offset);
3179         }
3180
3181         /* we set up xferp to the last entry we have inserted,
3182          * so that we skip those already split transfers
3183          */
3184         *xferp = &xfers[count - 1];
3185
3186         /* increment statistics counters */
3187         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3188                                        transfers_split_maxsize);
3189         SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
3190                                        transfers_split_maxsize);
3191
3192         return 0;
3193 }
3194
3195 /**
3196  * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
3197  *                              when an individual transfer exceeds a
3198  *                              certain size
3199  * @ctlr:    the @spi_controller for this transfer
3200  * @msg:   the @spi_message to transform
3201  * @maxsize:  the maximum when to apply this
3202  * @gfp: GFP allocation flags
3203  *
3204  * Return: status of transformation
3205  */
3206 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3207                                 struct spi_message *msg,
3208                                 size_t maxsize,
3209                                 gfp_t gfp)
3210 {
3211         struct spi_transfer *xfer;
3212         int ret;
3213
3214         /* iterate over the transfer_list,
3215          * but note that xfer is advanced to the last transfer inserted
3216          * to avoid checking sizes again unnecessarily (also xfer does
3217          * potentiall belong to a different list by the time the
3218          * replacement has happened
3219          */
3220         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3221                 if (xfer->len > maxsize) {
3222                         ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3223                                                            maxsize, gfp);
3224                         if (ret)
3225                                 return ret;
3226                 }
3227         }
3228
3229         return 0;
3230 }
3231 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3232
3233 /*-------------------------------------------------------------------------*/
3234
3235 /* Core methods for SPI controller protocol drivers.  Some of the
3236  * other core methods are currently defined as inline functions.
3237  */
3238
3239 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3240                                         u8 bits_per_word)
3241 {
3242         if (ctlr->bits_per_word_mask) {
3243                 /* Only 32 bits fit in the mask */
3244                 if (bits_per_word > 32)
3245                         return -EINVAL;
3246                 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3247                         return -EINVAL;
3248         }
3249
3250         return 0;
3251 }
3252
3253 /**
3254  * spi_setup - setup SPI mode and clock rate
3255  * @spi: the device whose settings are being modified
3256  * Context: can sleep, and no requests are queued to the device
3257  *
3258  * SPI protocol drivers may need to update the transfer mode if the
3259  * device doesn't work with its default.  They may likewise need
3260  * to update clock rates or word sizes from initial values.  This function
3261  * changes those settings, and must be called from a context that can sleep.
3262  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3263  * effect the next time the device is selected and data is transferred to
3264  * or from it.  When this function returns, the spi device is deselected.
3265  *
3266  * Note that this call will fail if the protocol driver specifies an option
3267  * that the underlying controller or its driver does not support.  For
3268  * example, not all hardware supports wire transfers using nine bit words,
3269  * LSB-first wire encoding, or active-high chipselects.
3270  *
3271  * Return: zero on success, else a negative error code.
3272  */
3273 int spi_setup(struct spi_device *spi)
3274 {
3275         unsigned        bad_bits, ugly_bits;
3276         int             status;
3277
3278         /* check mode to prevent that DUAL and QUAD set at the same time
3279          */
3280         if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
3281                 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
3282                 dev_err(&spi->dev,
3283                 "setup: can not select dual and quad at the same time\n");
3284                 return -EINVAL;
3285         }
3286         /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
3287          */
3288         if ((spi->mode & SPI_3WIRE) && (spi->mode &
3289                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3290                  SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3291                 return -EINVAL;
3292         /* help drivers fail *cleanly* when they need options
3293          * that aren't supported with their current controller
3294          * SPI_CS_WORD has a fallback software implementation,
3295          * so it is ignored here.
3296          */
3297         bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
3298         /* nothing prevents from working with active-high CS in case if it
3299          * is driven by GPIO.
3300          */
3301         if (gpio_is_valid(spi->cs_gpio))
3302                 bad_bits &= ~SPI_CS_HIGH;
3303         ugly_bits = bad_bits &
3304                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3305                      SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3306         if (ugly_bits) {
3307                 dev_warn(&spi->dev,
3308                          "setup: ignoring unsupported mode bits %x\n",
3309                          ugly_bits);
3310                 spi->mode &= ~ugly_bits;
3311                 bad_bits &= ~ugly_bits;
3312         }
3313         if (bad_bits) {
3314                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3315                         bad_bits);
3316                 return -EINVAL;
3317         }
3318
3319         if (!spi->bits_per_word)
3320                 spi->bits_per_word = 8;
3321
3322         status = __spi_validate_bits_per_word(spi->controller,
3323                                               spi->bits_per_word);
3324         if (status)
3325                 return status;
3326
3327         if (!spi->max_speed_hz)
3328                 spi->max_speed_hz = spi->controller->max_speed_hz;
3329
3330         if (spi->controller->setup)
3331                 status = spi->controller->setup(spi);
3332
3333         if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3334                 status = pm_runtime_get_sync(spi->controller->dev.parent);
3335                 if (status < 0) {
3336                         pm_runtime_put_noidle(spi->controller->dev.parent);
3337                         dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3338                                 status);
3339                         return status;
3340                 }
3341
3342                 /*
3343                  * We do not want to return positive value from pm_runtime_get,
3344                  * there are many instances of devices calling spi_setup() and
3345                  * checking for a non-zero return value instead of a negative
3346                  * return value.
3347                  */
3348                 status = 0;
3349
3350                 spi_set_cs(spi, false);
3351                 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3352                 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3353         } else {
3354                 spi_set_cs(spi, false);
3355         }
3356
3357         if (spi->rt && !spi->controller->rt) {
3358                 spi->controller->rt = true;
3359                 spi_set_thread_rt(spi->controller);
3360         }
3361
3362         dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3363                         (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
3364                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3365                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3366                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3367                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
3368                         spi->bits_per_word, spi->max_speed_hz,
3369                         status);
3370
3371         return status;
3372 }
3373 EXPORT_SYMBOL_GPL(spi_setup);
3374
3375 /**
3376  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3377  * @spi: the device that requires specific CS timing configuration
3378  * @setup: CS setup time specified via @spi_delay
3379  * @hold: CS hold time specified via @spi_delay
3380  * @inactive: CS inactive delay between transfers specified via @spi_delay
3381  *
3382  * Return: zero on success, else a negative error code.
3383  */
3384 int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
3385                       struct spi_delay *hold, struct spi_delay *inactive)
3386 {
3387         size_t len;
3388
3389         if (spi->controller->set_cs_timing)
3390                 return spi->controller->set_cs_timing(spi, setup, hold,
3391                                                       inactive);
3392
3393         if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
3394             (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
3395             (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
3396                 dev_err(&spi->dev,
3397                         "Clock-cycle delays for CS not supported in SW mode\n");
3398                 return -ENOTSUPP;
3399         }
3400
3401         len = sizeof(struct spi_delay);
3402
3403         /* copy delays to controller */
3404         if (setup)
3405                 memcpy(&spi->controller->cs_setup, setup, len);
3406         else
3407                 memset(&spi->controller->cs_setup, 0, len);
3408
3409         if (hold)
3410                 memcpy(&spi->controller->cs_hold, hold, len);
3411         else
3412                 memset(&spi->controller->cs_hold, 0, len);
3413
3414         if (inactive)
3415                 memcpy(&spi->controller->cs_inactive, inactive, len);
3416         else
3417                 memset(&spi->controller->cs_inactive, 0, len);
3418
3419         return 0;
3420 }
3421 EXPORT_SYMBOL_GPL(spi_set_cs_timing);
3422
3423 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3424                                        struct spi_device *spi)
3425 {
3426         int delay1, delay2;
3427
3428         delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3429         if (delay1 < 0)
3430                 return delay1;
3431
3432         delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3433         if (delay2 < 0)
3434                 return delay2;
3435
3436         if (delay1 < delay2)
3437                 memcpy(&xfer->word_delay, &spi->word_delay,
3438                        sizeof(xfer->word_delay));
3439
3440         return 0;
3441 }
3442
3443 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3444 {
3445         struct spi_controller *ctlr = spi->controller;
3446         struct spi_transfer *xfer;
3447         int w_size;
3448
3449         if (list_empty(&message->transfers))
3450                 return -EINVAL;
3451
3452         /* If an SPI controller does not support toggling the CS line on each
3453          * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3454          * for the CS line, we can emulate the CS-per-word hardware function by
3455          * splitting transfers into one-word transfers and ensuring that
3456          * cs_change is set for each transfer.
3457          */
3458         if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3459                                           spi->cs_gpiod ||
3460                                           gpio_is_valid(spi->cs_gpio))) {
3461                 size_t maxsize;
3462                 int ret;
3463
3464                 maxsize = (spi->bits_per_word + 7) / 8;
3465
3466                 /* spi_split_transfers_maxsize() requires message->spi */
3467                 message->spi = spi;
3468
3469                 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3470                                                   GFP_KERNEL);
3471                 if (ret)
3472                         return ret;
3473
3474                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3475                         /* don't change cs_change on the last entry in the list */
3476                         if (list_is_last(&xfer->transfer_list, &message->transfers))
3477                                 break;
3478                         xfer->cs_change = 1;
3479                 }
3480         }
3481
3482         /* Half-duplex links include original MicroWire, and ones with
3483          * only one data pin like SPI_3WIRE (switches direction) or where
3484          * either MOSI or MISO is missing.  They can also be caused by
3485          * software limitations.
3486          */
3487         if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3488             (spi->mode & SPI_3WIRE)) {
3489                 unsigned flags = ctlr->flags;
3490
3491                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3492                         if (xfer->rx_buf && xfer->tx_buf)
3493                                 return -EINVAL;
3494                         if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3495                                 return -EINVAL;
3496                         if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3497                                 return -EINVAL;
3498                 }
3499         }
3500
3501         /**
3502          * Set transfer bits_per_word and max speed as spi device default if
3503          * it is not set for this transfer.
3504          * Set transfer tx_nbits and rx_nbits as single transfer default
3505          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3506          * Ensure transfer word_delay is at least as long as that required by
3507          * device itself.
3508          */
3509         message->frame_length = 0;
3510         list_for_each_entry(xfer, &message->transfers, transfer_list) {
3511                 xfer->effective_speed_hz = 0;
3512                 message->frame_length += xfer->len;
3513                 if (!xfer->bits_per_word)
3514                         xfer->bits_per_word = spi->bits_per_word;
3515
3516                 if (!xfer->speed_hz)
3517                         xfer->speed_hz = spi->max_speed_hz;
3518
3519                 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3520                         xfer->speed_hz = ctlr->max_speed_hz;
3521
3522                 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3523                         return -EINVAL;
3524
3525                 /*
3526                  * SPI transfer length should be multiple of SPI word size
3527                  * where SPI word size should be power-of-two multiple
3528                  */
3529                 if (xfer->bits_per_word <= 8)
3530                         w_size = 1;
3531                 else if (xfer->bits_per_word <= 16)
3532                         w_size = 2;
3533                 else
3534                         w_size = 4;
3535
3536                 /* No partial transfers accepted */
3537                 if (xfer->len % w_size)
3538                         return -EINVAL;
3539
3540                 if (xfer->speed_hz && ctlr->min_speed_hz &&
3541                     xfer->speed_hz < ctlr->min_speed_hz)
3542                         return -EINVAL;
3543
3544                 if (xfer->tx_buf && !xfer->tx_nbits)
3545                         xfer->tx_nbits = SPI_NBITS_SINGLE;
3546                 if (xfer->rx_buf && !xfer->rx_nbits)
3547                         xfer->rx_nbits = SPI_NBITS_SINGLE;
3548                 /* check transfer tx/rx_nbits:
3549                  * 1. check the value matches one of single, dual and quad
3550                  * 2. check tx/rx_nbits match the mode in spi_device
3551                  */
3552                 if (xfer->tx_buf) {
3553                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3554                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
3555                                 xfer->tx_nbits != SPI_NBITS_QUAD)
3556                                 return -EINVAL;
3557                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3558                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3559                                 return -EINVAL;
3560                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3561                                 !(spi->mode & SPI_TX_QUAD))
3562                                 return -EINVAL;
3563                 }
3564                 /* check transfer rx_nbits */
3565                 if (xfer->rx_buf) {
3566                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3567                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
3568                                 xfer->rx_nbits != SPI_NBITS_QUAD)
3569                                 return -EINVAL;
3570                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3571                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3572                                 return -EINVAL;
3573                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3574                                 !(spi->mode & SPI_RX_QUAD))
3575                                 return -EINVAL;
3576                 }
3577
3578                 if (_spi_xfer_word_delay_update(xfer, spi))
3579                         return -EINVAL;
3580         }
3581
3582         message->status = -EINPROGRESS;
3583
3584         return 0;
3585 }
3586
3587 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3588 {
3589         struct spi_controller *ctlr = spi->controller;
3590         struct spi_transfer *xfer;
3591
3592         /*
3593          * Some controllers do not support doing regular SPI transfers. Return
3594          * ENOTSUPP when this is the case.
3595          */
3596         if (!ctlr->transfer)
3597                 return -ENOTSUPP;
3598
3599         message->spi = spi;
3600
3601         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3602         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3603
3604         trace_spi_message_submit(message);
3605
3606         if (!ctlr->ptp_sts_supported) {
3607                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3608                         xfer->ptp_sts_word_pre = 0;
3609                         ptp_read_system_prets(xfer->ptp_sts);
3610                 }
3611         }
3612
3613         return ctlr->transfer(spi, message);
3614 }
3615
3616 /**
3617  * spi_async - asynchronous SPI transfer
3618  * @spi: device with which data will be exchanged
3619  * @message: describes the data transfers, including completion callback
3620  * Context: any (irqs may be blocked, etc)
3621  *
3622  * This call may be used in_irq and other contexts which can't sleep,
3623  * as well as from task contexts which can sleep.
3624  *
3625  * The completion callback is invoked in a context which can't sleep.
3626  * Before that invocation, the value of message->status is undefined.
3627  * When the callback is issued, message->status holds either zero (to
3628  * indicate complete success) or a negative error code.  After that
3629  * callback returns, the driver which issued the transfer request may
3630  * deallocate the associated memory; it's no longer in use by any SPI
3631  * core or controller driver code.
3632  *
3633  * Note that although all messages to a spi_device are handled in
3634  * FIFO order, messages may go to different devices in other orders.
3635  * Some device might be higher priority, or have various "hard" access
3636  * time requirements, for example.
3637  *
3638  * On detection of any fault during the transfer, processing of
3639  * the entire message is aborted, and the device is deselected.
3640  * Until returning from the associated message completion callback,
3641  * no other spi_message queued to that device will be processed.
3642  * (This rule applies equally to all the synchronous transfer calls,
3643  * which are wrappers around this core asynchronous primitive.)
3644  *
3645  * Return: zero on success, else a negative error code.
3646  */
3647 int spi_async(struct spi_device *spi, struct spi_message *message)
3648 {
3649         struct spi_controller *ctlr = spi->controller;
3650         int ret;
3651         unsigned long flags;
3652
3653         ret = __spi_validate(spi, message);
3654         if (ret != 0)
3655                 return ret;
3656
3657         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3658
3659         if (ctlr->bus_lock_flag)
3660                 ret = -EBUSY;
3661         else
3662                 ret = __spi_async(spi, message);
3663
3664         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3665
3666         return ret;
3667 }
3668 EXPORT_SYMBOL_GPL(spi_async);
3669
3670 /**
3671  * spi_async_locked - version of spi_async with exclusive bus usage
3672  * @spi: device with which data will be exchanged
3673  * @message: describes the data transfers, including completion callback
3674  * Context: any (irqs may be blocked, etc)
3675  *
3676  * This call may be used in_irq and other contexts which can't sleep,
3677  * as well as from task contexts which can sleep.
3678  *
3679  * The completion callback is invoked in a context which can't sleep.
3680  * Before that invocation, the value of message->status is undefined.
3681  * When the callback is issued, message->status holds either zero (to
3682  * indicate complete success) or a negative error code.  After that
3683  * callback returns, the driver which issued the transfer request may
3684  * deallocate the associated memory; it's no longer in use by any SPI
3685  * core or controller driver code.
3686  *
3687  * Note that although all messages to a spi_device are handled in
3688  * FIFO order, messages may go to different devices in other orders.
3689  * Some device might be higher priority, or have various "hard" access
3690  * time requirements, for example.
3691  *
3692  * On detection of any fault during the transfer, processing of
3693  * the entire message is aborted, and the device is deselected.
3694  * Until returning from the associated message completion callback,
3695  * no other spi_message queued to that device will be processed.
3696  * (This rule applies equally to all the synchronous transfer calls,
3697  * which are wrappers around this core asynchronous primitive.)
3698  *
3699  * Return: zero on success, else a negative error code.
3700  */
3701 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3702 {
3703         struct spi_controller *ctlr = spi->controller;
3704         int ret;
3705         unsigned long flags;
3706
3707         ret = __spi_validate(spi, message);
3708         if (ret != 0)
3709                 return ret;
3710
3711         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3712
3713         ret = __spi_async(spi, message);
3714
3715         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3716
3717         return ret;
3718
3719 }
3720 EXPORT_SYMBOL_GPL(spi_async_locked);
3721
3722 /*-------------------------------------------------------------------------*/
3723
3724 /* Utility methods for SPI protocol drivers, layered on
3725  * top of the core.  Some other utility methods are defined as
3726  * inline functions.
3727  */
3728
3729 static void spi_complete(void *arg)
3730 {
3731         complete(arg);
3732 }
3733
3734 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3735 {
3736         DECLARE_COMPLETION_ONSTACK(done);
3737         int status;
3738         struct spi_controller *ctlr = spi->controller;
3739         unsigned long flags;
3740
3741         status = __spi_validate(spi, message);
3742         if (status != 0)
3743                 return status;
3744
3745         message->complete = spi_complete;
3746         message->context = &done;
3747         message->spi = spi;
3748
3749         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3750         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3751
3752         /* If we're not using the legacy transfer method then we will
3753          * try to transfer in the calling context so special case.
3754          * This code would be less tricky if we could remove the
3755          * support for driver implemented message queues.
3756          */
3757         if (ctlr->transfer == spi_queued_transfer) {
3758                 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3759
3760                 trace_spi_message_submit(message);
3761
3762                 status = __spi_queued_transfer(spi, message, false);
3763
3764                 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3765         } else {
3766                 status = spi_async_locked(spi, message);
3767         }
3768
3769         if (status == 0) {
3770                 /* Push out the messages in the calling context if we
3771                  * can.
3772                  */
3773                 if (ctlr->transfer == spi_queued_transfer) {
3774                         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3775                                                        spi_sync_immediate);
3776                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3777                                                        spi_sync_immediate);
3778                         __spi_pump_messages(ctlr, false);
3779                 }
3780
3781                 wait_for_completion(&done);
3782                 status = message->status;
3783         }
3784         message->context = NULL;
3785         return status;
3786 }
3787
3788 /**
3789  * spi_sync - blocking/synchronous SPI data transfers
3790  * @spi: device with which data will be exchanged
3791  * @message: describes the data transfers
3792  * Context: can sleep
3793  *
3794  * This call may only be used from a context that may sleep.  The sleep
3795  * is non-interruptible, and has no timeout.  Low-overhead controller
3796  * drivers may DMA directly into and out of the message buffers.
3797  *
3798  * Note that the SPI device's chip select is active during the message,
3799  * and then is normally disabled between messages.  Drivers for some
3800  * frequently-used devices may want to minimize costs of selecting a chip,
3801  * by leaving it selected in anticipation that the next message will go
3802  * to the same chip.  (That may increase power usage.)
3803  *
3804  * Also, the caller is guaranteeing that the memory associated with the
3805  * message will not be freed before this call returns.
3806  *
3807  * Return: zero on success, else a negative error code.
3808  */
3809 int spi_sync(struct spi_device *spi, struct spi_message *message)
3810 {
3811         int ret;
3812
3813         mutex_lock(&spi->controller->bus_lock_mutex);
3814         ret = __spi_sync(spi, message);
3815         mutex_unlock(&spi->controller->bus_lock_mutex);
3816
3817         return ret;
3818 }
3819 EXPORT_SYMBOL_GPL(spi_sync);
3820
3821 /**
3822  * spi_sync_locked - version of spi_sync with exclusive bus usage
3823  * @spi: device with which data will be exchanged
3824  * @message: describes the data transfers
3825  * Context: can sleep
3826  *
3827  * This call may only be used from a context that may sleep.  The sleep
3828  * is non-interruptible, and has no timeout.  Low-overhead controller
3829  * drivers may DMA directly into and out of the message buffers.
3830  *
3831  * This call should be used by drivers that require exclusive access to the
3832  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3833  * be released by a spi_bus_unlock call when the exclusive access is over.
3834  *
3835  * Return: zero on success, else a negative error code.
3836  */
3837 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3838 {
3839         return __spi_sync(spi, message);
3840 }
3841 EXPORT_SYMBOL_GPL(spi_sync_locked);
3842
3843 /**
3844  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3845  * @ctlr: SPI bus master that should be locked for exclusive bus access
3846  * Context: can sleep
3847  *
3848  * This call may only be used from a context that may sleep.  The sleep
3849  * is non-interruptible, and has no timeout.
3850  *
3851  * This call should be used by drivers that require exclusive access to the
3852  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3853  * exclusive access is over. Data transfer must be done by spi_sync_locked
3854  * and spi_async_locked calls when the SPI bus lock is held.
3855  *
3856  * Return: always zero.
3857  */
3858 int spi_bus_lock(struct spi_controller *ctlr)
3859 {
3860         unsigned long flags;
3861
3862         mutex_lock(&ctlr->bus_lock_mutex);
3863
3864         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3865         ctlr->bus_lock_flag = 1;
3866         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3867
3868         /* mutex remains locked until spi_bus_unlock is called */
3869
3870         return 0;
3871 }
3872 EXPORT_SYMBOL_GPL(spi_bus_lock);
3873
3874 /**
3875  * spi_bus_unlock - release the lock for exclusive SPI bus usage
3876  * @ctlr: SPI bus master that was locked for exclusive bus access
3877  * Context: can sleep
3878  *
3879  * This call may only be used from a context that may sleep.  The sleep
3880  * is non-interruptible, and has no timeout.
3881  *
3882  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3883  * call.
3884  *
3885  * Return: always zero.
3886  */
3887 int spi_bus_unlock(struct spi_controller *ctlr)
3888 {
3889         ctlr->bus_lock_flag = 0;
3890
3891         mutex_unlock(&ctlr->bus_lock_mutex);
3892
3893         return 0;
3894 }
3895 EXPORT_SYMBOL_GPL(spi_bus_unlock);
3896
3897 /* portable code must never pass more than 32 bytes */
3898 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
3899
3900 static u8       *buf;
3901
3902 /**
3903  * spi_write_then_read - SPI synchronous write followed by read
3904  * @spi: device with which data will be exchanged
3905  * @txbuf: data to be written (need not be dma-safe)
3906  * @n_tx: size of txbuf, in bytes
3907  * @rxbuf: buffer into which data will be read (need not be dma-safe)
3908  * @n_rx: size of rxbuf, in bytes
3909  * Context: can sleep
3910  *
3911  * This performs a half duplex MicroWire style transaction with the
3912  * device, sending txbuf and then reading rxbuf.  The return value
3913  * is zero for success, else a negative errno status code.
3914  * This call may only be used from a context that may sleep.
3915  *
3916  * Parameters to this routine are always copied using a small buffer.
3917  * Performance-sensitive or bulk transfer code should instead use
3918  * spi_{async,sync}() calls with dma-safe buffers.
3919  *
3920  * Return: zero on success, else a negative error code.
3921  */
3922 int spi_write_then_read(struct spi_device *spi,
3923                 const void *txbuf, unsigned n_tx,
3924                 void *rxbuf, unsigned n_rx)
3925 {
3926         static DEFINE_MUTEX(lock);
3927
3928         int                     status;
3929         struct spi_message      message;
3930         struct spi_transfer     x[2];
3931         u8                      *local_buf;
3932
3933         /* Use preallocated DMA-safe buffer if we can.  We can't avoid
3934          * copying here, (as a pure convenience thing), but we can
3935          * keep heap costs out of the hot path unless someone else is
3936          * using the pre-allocated buffer or the transfer is too large.
3937          */
3938         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3939                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3940                                     GFP_KERNEL | GFP_DMA);
3941                 if (!local_buf)
3942                         return -ENOMEM;
3943         } else {
3944                 local_buf = buf;
3945         }
3946
3947         spi_message_init(&message);
3948         memset(x, 0, sizeof(x));
3949         if (n_tx) {
3950                 x[0].len = n_tx;
3951                 spi_message_add_tail(&x[0], &message);
3952         }
3953         if (n_rx) {
3954                 x[1].len = n_rx;
3955                 spi_message_add_tail(&x[1], &message);
3956         }
3957
3958         memcpy(local_buf, txbuf, n_tx);
3959         x[0].tx_buf = local_buf;
3960         x[1].rx_buf = local_buf + n_tx;
3961
3962         /* do the i/o */
3963         status = spi_sync(spi, &message);
3964         if (status == 0)
3965                 memcpy(rxbuf, x[1].rx_buf, n_rx);
3966
3967         if (x[0].tx_buf == buf)
3968                 mutex_unlock(&lock);
3969         else
3970                 kfree(local_buf);
3971
3972         return status;
3973 }
3974 EXPORT_SYMBOL_GPL(spi_write_then_read);
3975
3976 /*-------------------------------------------------------------------------*/
3977
3978 #if IS_ENABLED(CONFIG_OF)
3979 /* must call put_device() when done with returned spi_device device */
3980 struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3981 {
3982         struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
3983
3984         return dev ? to_spi_device(dev) : NULL;
3985 }
3986 EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
3987 #endif /* IS_ENABLED(CONFIG_OF) */
3988
3989 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3990 /* the spi controllers are not using spi_bus, so we find it with another way */
3991 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3992 {
3993         struct device *dev;
3994
3995         dev = class_find_device_by_of_node(&spi_master_class, node);
3996         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3997                 dev = class_find_device_by_of_node(&spi_slave_class, node);
3998         if (!dev)
3999                 return NULL;
4000
4001         /* reference got in class_find_device */
4002         return container_of(dev, struct spi_controller, dev);
4003 }
4004
4005 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4006                          void *arg)
4007 {
4008         struct of_reconfig_data *rd = arg;
4009         struct spi_controller *ctlr;
4010         struct spi_device *spi;
4011
4012         switch (of_reconfig_get_state_change(action, arg)) {
4013         case OF_RECONFIG_CHANGE_ADD:
4014                 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4015                 if (ctlr == NULL)
4016                         return NOTIFY_OK;       /* not for us */
4017
4018                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4019                         put_device(&ctlr->dev);
4020                         return NOTIFY_OK;
4021                 }
4022
4023                 spi = of_register_spi_device(ctlr, rd->dn);
4024                 put_device(&ctlr->dev);
4025
4026                 if (IS_ERR(spi)) {
4027                         pr_err("%s: failed to create for '%pOF'\n",
4028                                         __func__, rd->dn);
4029                         of_node_clear_flag(rd->dn, OF_POPULATED);
4030                         return notifier_from_errno(PTR_ERR(spi));
4031                 }
4032                 break;
4033
4034         case OF_RECONFIG_CHANGE_REMOVE:
4035                 /* already depopulated? */
4036                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4037                         return NOTIFY_OK;
4038
4039                 /* find our device by node */
4040                 spi = of_find_spi_device_by_node(rd->dn);
4041                 if (spi == NULL)
4042                         return NOTIFY_OK;       /* no? not meant for us */
4043
4044                 /* unregister takes one ref away */
4045                 spi_unregister_device(spi);
4046
4047                 /* and put the reference of the find */
4048                 put_device(&spi->dev);
4049                 break;
4050         }
4051
4052         return NOTIFY_OK;
4053 }
4054
4055 static struct notifier_block spi_of_notifier = {
4056         .notifier_call = of_spi_notify,
4057 };
4058 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4059 extern struct notifier_block spi_of_notifier;
4060 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4061
4062 #if IS_ENABLED(CONFIG_ACPI)
4063 static int spi_acpi_controller_match(struct device *dev, const void *data)
4064 {
4065         return ACPI_COMPANION(dev->parent) == data;
4066 }
4067
4068 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4069 {
4070         struct device *dev;
4071
4072         dev = class_find_device(&spi_master_class, NULL, adev,
4073                                 spi_acpi_controller_match);
4074         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4075                 dev = class_find_device(&spi_slave_class, NULL, adev,
4076                                         spi_acpi_controller_match);
4077         if (!dev)
4078                 return NULL;
4079
4080         return container_of(dev, struct spi_controller, dev);
4081 }
4082
4083 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4084 {
4085         struct device *dev;
4086
4087         dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4088         return to_spi_device(dev);
4089 }
4090
4091 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4092                            void *arg)
4093 {
4094         struct acpi_device *adev = arg;
4095         struct spi_controller *ctlr;
4096         struct spi_device *spi;
4097
4098         switch (value) {
4099         case ACPI_RECONFIG_DEVICE_ADD:
4100                 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4101                 if (!ctlr)
4102                         break;
4103
4104                 acpi_register_spi_device(ctlr, adev);
4105                 put_device(&ctlr->dev);
4106                 break;
4107         case ACPI_RECONFIG_DEVICE_REMOVE:
4108                 if (!acpi_device_enumerated(adev))
4109                         break;
4110
4111                 spi = acpi_spi_find_device_by_adev(adev);
4112                 if (!spi)
4113                         break;
4114
4115                 spi_unregister_device(spi);
4116                 put_device(&spi->dev);
4117                 break;
4118         }
4119
4120         return NOTIFY_OK;
4121 }
4122
4123 static struct notifier_block spi_acpi_notifier = {
4124         .notifier_call = acpi_spi_notify,
4125 };
4126 #else
4127 extern struct notifier_block spi_acpi_notifier;
4128 #endif
4129
4130 static int __init spi_init(void)
4131 {
4132         int     status;
4133
4134         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4135         if (!buf) {
4136                 status = -ENOMEM;
4137                 goto err0;
4138         }
4139
4140         status = bus_register(&spi_bus_type);
4141         if (status < 0)
4142                 goto err1;
4143
4144         status = class_register(&spi_master_class);
4145         if (status < 0)
4146                 goto err2;
4147
4148         if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4149                 status = class_register(&spi_slave_class);
4150                 if (status < 0)
4151                         goto err3;
4152         }
4153
4154         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4155                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4156         if (IS_ENABLED(CONFIG_ACPI))
4157                 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4158
4159         return 0;
4160
4161 err3:
4162         class_unregister(&spi_master_class);
4163 err2:
4164         bus_unregister(&spi_bus_type);
4165 err1:
4166         kfree(buf);
4167         buf = NULL;
4168 err0:
4169         return status;
4170 }
4171
4172 /* board_info is normally registered in arch_initcall(),
4173  * but even essential drivers wait till later
4174  *
4175  * REVISIT only boardinfo really needs static linking. the rest (device and
4176  * driver registration) _could_ be dynamically linked (modular) ... costs
4177  * include needing to have boardinfo data structures be much more public.
4178  */
4179 postcore_initcall(spi_init);