Merge tag 'spi-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[linux-2.6-microblaze.git] / drivers / spi / spi.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pm_domain.h>
24 #include <linux/property.h>
25 #include <linux/export.h>
26 #include <linux/sched/rt.h>
27 #include <uapi/linux/sched/types.h>
28 #include <linux/delay.h>
29 #include <linux/kthread.h>
30 #include <linux/ioport.h>
31 #include <linux/acpi.h>
32 #include <linux/highmem.h>
33 #include <linux/idr.h>
34 #include <linux/platform_data/x86/apple.h>
35 #include <linux/ptp_clock_kernel.h>
36
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/spi.h>
39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
41
42 #include "internals.h"
43
44 static DEFINE_IDR(spi_master_idr);
45
46 static void spidev_release(struct device *dev)
47 {
48         struct spi_device       *spi = to_spi_device(dev);
49
50         spi_controller_put(spi->controller);
51         kfree(spi->driver_override);
52         kfree(spi);
53 }
54
55 static ssize_t
56 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
57 {
58         const struct spi_device *spi = to_spi_device(dev);
59         int len;
60
61         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
62         if (len != -ENODEV)
63                 return len;
64
65         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
66 }
67 static DEVICE_ATTR_RO(modalias);
68
69 static ssize_t driver_override_store(struct device *dev,
70                                      struct device_attribute *a,
71                                      const char *buf, size_t count)
72 {
73         struct spi_device *spi = to_spi_device(dev);
74         const char *end = memchr(buf, '\n', count);
75         const size_t len = end ? end - buf : count;
76         const char *driver_override, *old;
77
78         /* We need to keep extra room for a newline when displaying value */
79         if (len >= (PAGE_SIZE - 1))
80                 return -EINVAL;
81
82         driver_override = kstrndup(buf, len, GFP_KERNEL);
83         if (!driver_override)
84                 return -ENOMEM;
85
86         device_lock(dev);
87         old = spi->driver_override;
88         if (len) {
89                 spi->driver_override = driver_override;
90         } else {
91                 /* Empty string, disable driver override */
92                 spi->driver_override = NULL;
93                 kfree(driver_override);
94         }
95         device_unlock(dev);
96         kfree(old);
97
98         return count;
99 }
100
101 static ssize_t driver_override_show(struct device *dev,
102                                     struct device_attribute *a, char *buf)
103 {
104         const struct spi_device *spi = to_spi_device(dev);
105         ssize_t len;
106
107         device_lock(dev);
108         len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
109         device_unlock(dev);
110         return len;
111 }
112 static DEVICE_ATTR_RW(driver_override);
113
114 #define SPI_STATISTICS_ATTRS(field, file)                               \
115 static ssize_t spi_controller_##field##_show(struct device *dev,        \
116                                              struct device_attribute *attr, \
117                                              char *buf)                 \
118 {                                                                       \
119         struct spi_controller *ctlr = container_of(dev,                 \
120                                          struct spi_controller, dev);   \
121         return spi_statistics_##field##_show(&ctlr->statistics, buf);   \
122 }                                                                       \
123 static struct device_attribute dev_attr_spi_controller_##field = {      \
124         .attr = { .name = file, .mode = 0444 },                         \
125         .show = spi_controller_##field##_show,                          \
126 };                                                                      \
127 static ssize_t spi_device_##field##_show(struct device *dev,            \
128                                          struct device_attribute *attr, \
129                                         char *buf)                      \
130 {                                                                       \
131         struct spi_device *spi = to_spi_device(dev);                    \
132         return spi_statistics_##field##_show(&spi->statistics, buf);    \
133 }                                                                       \
134 static struct device_attribute dev_attr_spi_device_##field = {          \
135         .attr = { .name = file, .mode = 0444 },                         \
136         .show = spi_device_##field##_show,                              \
137 }
138
139 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
140 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
141                                             char *buf)                  \
142 {                                                                       \
143         unsigned long flags;                                            \
144         ssize_t len;                                                    \
145         spin_lock_irqsave(&stat->lock, flags);                          \
146         len = sysfs_emit(buf, format_string "\n", stat->field);         \
147         spin_unlock_irqrestore(&stat->lock, flags);                     \
148         return len;                                                     \
149 }                                                                       \
150 SPI_STATISTICS_ATTRS(name, file)
151
152 #define SPI_STATISTICS_SHOW(field, format_string)                       \
153         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
154                                  field, format_string)
155
156 SPI_STATISTICS_SHOW(messages, "%lu");
157 SPI_STATISTICS_SHOW(transfers, "%lu");
158 SPI_STATISTICS_SHOW(errors, "%lu");
159 SPI_STATISTICS_SHOW(timedout, "%lu");
160
161 SPI_STATISTICS_SHOW(spi_sync, "%lu");
162 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
163 SPI_STATISTICS_SHOW(spi_async, "%lu");
164
165 SPI_STATISTICS_SHOW(bytes, "%llu");
166 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
167 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
168
169 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
170         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
171                                  "transfer_bytes_histo_" number,        \
172                                  transfer_bytes_histo[index],  "%lu")
173 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
174 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
175 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
176 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
190
191 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
192
193 static struct attribute *spi_dev_attrs[] = {
194         &dev_attr_modalias.attr,
195         &dev_attr_driver_override.attr,
196         NULL,
197 };
198
199 static const struct attribute_group spi_dev_group = {
200         .attrs  = spi_dev_attrs,
201 };
202
203 static struct attribute *spi_device_statistics_attrs[] = {
204         &dev_attr_spi_device_messages.attr,
205         &dev_attr_spi_device_transfers.attr,
206         &dev_attr_spi_device_errors.attr,
207         &dev_attr_spi_device_timedout.attr,
208         &dev_attr_spi_device_spi_sync.attr,
209         &dev_attr_spi_device_spi_sync_immediate.attr,
210         &dev_attr_spi_device_spi_async.attr,
211         &dev_attr_spi_device_bytes.attr,
212         &dev_attr_spi_device_bytes_rx.attr,
213         &dev_attr_spi_device_bytes_tx.attr,
214         &dev_attr_spi_device_transfer_bytes_histo0.attr,
215         &dev_attr_spi_device_transfer_bytes_histo1.attr,
216         &dev_attr_spi_device_transfer_bytes_histo2.attr,
217         &dev_attr_spi_device_transfer_bytes_histo3.attr,
218         &dev_attr_spi_device_transfer_bytes_histo4.attr,
219         &dev_attr_spi_device_transfer_bytes_histo5.attr,
220         &dev_attr_spi_device_transfer_bytes_histo6.attr,
221         &dev_attr_spi_device_transfer_bytes_histo7.attr,
222         &dev_attr_spi_device_transfer_bytes_histo8.attr,
223         &dev_attr_spi_device_transfer_bytes_histo9.attr,
224         &dev_attr_spi_device_transfer_bytes_histo10.attr,
225         &dev_attr_spi_device_transfer_bytes_histo11.attr,
226         &dev_attr_spi_device_transfer_bytes_histo12.attr,
227         &dev_attr_spi_device_transfer_bytes_histo13.attr,
228         &dev_attr_spi_device_transfer_bytes_histo14.attr,
229         &dev_attr_spi_device_transfer_bytes_histo15.attr,
230         &dev_attr_spi_device_transfer_bytes_histo16.attr,
231         &dev_attr_spi_device_transfers_split_maxsize.attr,
232         NULL,
233 };
234
235 static const struct attribute_group spi_device_statistics_group = {
236         .name  = "statistics",
237         .attrs  = spi_device_statistics_attrs,
238 };
239
240 static const struct attribute_group *spi_dev_groups[] = {
241         &spi_dev_group,
242         &spi_device_statistics_group,
243         NULL,
244 };
245
246 static struct attribute *spi_controller_statistics_attrs[] = {
247         &dev_attr_spi_controller_messages.attr,
248         &dev_attr_spi_controller_transfers.attr,
249         &dev_attr_spi_controller_errors.attr,
250         &dev_attr_spi_controller_timedout.attr,
251         &dev_attr_spi_controller_spi_sync.attr,
252         &dev_attr_spi_controller_spi_sync_immediate.attr,
253         &dev_attr_spi_controller_spi_async.attr,
254         &dev_attr_spi_controller_bytes.attr,
255         &dev_attr_spi_controller_bytes_rx.attr,
256         &dev_attr_spi_controller_bytes_tx.attr,
257         &dev_attr_spi_controller_transfer_bytes_histo0.attr,
258         &dev_attr_spi_controller_transfer_bytes_histo1.attr,
259         &dev_attr_spi_controller_transfer_bytes_histo2.attr,
260         &dev_attr_spi_controller_transfer_bytes_histo3.attr,
261         &dev_attr_spi_controller_transfer_bytes_histo4.attr,
262         &dev_attr_spi_controller_transfer_bytes_histo5.attr,
263         &dev_attr_spi_controller_transfer_bytes_histo6.attr,
264         &dev_attr_spi_controller_transfer_bytes_histo7.attr,
265         &dev_attr_spi_controller_transfer_bytes_histo8.attr,
266         &dev_attr_spi_controller_transfer_bytes_histo9.attr,
267         &dev_attr_spi_controller_transfer_bytes_histo10.attr,
268         &dev_attr_spi_controller_transfer_bytes_histo11.attr,
269         &dev_attr_spi_controller_transfer_bytes_histo12.attr,
270         &dev_attr_spi_controller_transfer_bytes_histo13.attr,
271         &dev_attr_spi_controller_transfer_bytes_histo14.attr,
272         &dev_attr_spi_controller_transfer_bytes_histo15.attr,
273         &dev_attr_spi_controller_transfer_bytes_histo16.attr,
274         &dev_attr_spi_controller_transfers_split_maxsize.attr,
275         NULL,
276 };
277
278 static const struct attribute_group spi_controller_statistics_group = {
279         .name  = "statistics",
280         .attrs  = spi_controller_statistics_attrs,
281 };
282
283 static const struct attribute_group *spi_master_groups[] = {
284         &spi_controller_statistics_group,
285         NULL,
286 };
287
288 static void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
289                                               struct spi_transfer *xfer,
290                                               struct spi_controller *ctlr)
291 {
292         unsigned long flags;
293         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
294
295         if (l2len < 0)
296                 l2len = 0;
297
298         spin_lock_irqsave(&stats->lock, flags);
299
300         stats->transfers++;
301         stats->transfer_bytes_histo[l2len]++;
302
303         stats->bytes += xfer->len;
304         if ((xfer->tx_buf) &&
305             (xfer->tx_buf != ctlr->dummy_tx))
306                 stats->bytes_tx += xfer->len;
307         if ((xfer->rx_buf) &&
308             (xfer->rx_buf != ctlr->dummy_rx))
309                 stats->bytes_rx += xfer->len;
310
311         spin_unlock_irqrestore(&stats->lock, flags);
312 }
313
314 /*
315  * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
316  * and the sysfs version makes coldplug work too.
317  */
318 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
319 {
320         while (id->name[0]) {
321                 if (!strcmp(name, id->name))
322                         return id;
323                 id++;
324         }
325         return NULL;
326 }
327
328 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
329 {
330         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
331
332         return spi_match_id(sdrv->id_table, sdev->modalias);
333 }
334 EXPORT_SYMBOL_GPL(spi_get_device_id);
335
336 static int spi_match_device(struct device *dev, struct device_driver *drv)
337 {
338         const struct spi_device *spi = to_spi_device(dev);
339         const struct spi_driver *sdrv = to_spi_driver(drv);
340
341         /* Check override first, and if set, only use the named driver */
342         if (spi->driver_override)
343                 return strcmp(spi->driver_override, drv->name) == 0;
344
345         /* Attempt an OF style match */
346         if (of_driver_match_device(dev, drv))
347                 return 1;
348
349         /* Then try ACPI */
350         if (acpi_driver_match_device(dev, drv))
351                 return 1;
352
353         if (sdrv->id_table)
354                 return !!spi_match_id(sdrv->id_table, spi->modalias);
355
356         return strcmp(spi->modalias, drv->name) == 0;
357 }
358
359 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
360 {
361         const struct spi_device         *spi = to_spi_device(dev);
362         int rc;
363
364         rc = acpi_device_uevent_modalias(dev, env);
365         if (rc != -ENODEV)
366                 return rc;
367
368         return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
369 }
370
371 static int spi_probe(struct device *dev)
372 {
373         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
374         struct spi_device               *spi = to_spi_device(dev);
375         int ret;
376
377         ret = of_clk_set_defaults(dev->of_node, false);
378         if (ret)
379                 return ret;
380
381         if (dev->of_node) {
382                 spi->irq = of_irq_get(dev->of_node, 0);
383                 if (spi->irq == -EPROBE_DEFER)
384                         return -EPROBE_DEFER;
385                 if (spi->irq < 0)
386                         spi->irq = 0;
387         }
388
389         ret = dev_pm_domain_attach(dev, true);
390         if (ret)
391                 return ret;
392
393         if (sdrv->probe) {
394                 ret = sdrv->probe(spi);
395                 if (ret)
396                         dev_pm_domain_detach(dev, true);
397         }
398
399         return ret;
400 }
401
402 static void spi_remove(struct device *dev)
403 {
404         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
405
406         if (sdrv->remove)
407                 sdrv->remove(to_spi_device(dev));
408
409         dev_pm_domain_detach(dev, true);
410 }
411
412 static void spi_shutdown(struct device *dev)
413 {
414         if (dev->driver) {
415                 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
416
417                 if (sdrv->shutdown)
418                         sdrv->shutdown(to_spi_device(dev));
419         }
420 }
421
422 struct bus_type spi_bus_type = {
423         .name           = "spi",
424         .dev_groups     = spi_dev_groups,
425         .match          = spi_match_device,
426         .uevent         = spi_uevent,
427         .probe          = spi_probe,
428         .remove         = spi_remove,
429         .shutdown       = spi_shutdown,
430 };
431 EXPORT_SYMBOL_GPL(spi_bus_type);
432
433 /**
434  * __spi_register_driver - register a SPI driver
435  * @owner: owner module of the driver to register
436  * @sdrv: the driver to register
437  * Context: can sleep
438  *
439  * Return: zero on success, else a negative error code.
440  */
441 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
442 {
443         sdrv->driver.owner = owner;
444         sdrv->driver.bus = &spi_bus_type;
445
446         /*
447          * For Really Good Reasons we use spi: modaliases not of:
448          * modaliases for DT so module autoloading won't work if we
449          * don't have a spi_device_id as well as a compatible string.
450          */
451         if (sdrv->driver.of_match_table) {
452                 const struct of_device_id *of_id;
453
454                 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
455                      of_id++) {
456                         const char *of_name;
457
458                         /* Strip off any vendor prefix */
459                         of_name = strnchr(of_id->compatible,
460                                           sizeof(of_id->compatible), ',');
461                         if (of_name)
462                                 of_name++;
463                         else
464                                 of_name = of_id->compatible;
465
466                         if (sdrv->id_table) {
467                                 const struct spi_device_id *spi_id;
468
469                                 spi_id = spi_match_id(sdrv->id_table, of_name);
470                                 if (spi_id)
471                                         continue;
472                         } else {
473                                 if (strcmp(sdrv->driver.name, of_name) == 0)
474                                         continue;
475                         }
476
477                         pr_warn("SPI driver %s has no spi_device_id for %s\n",
478                                 sdrv->driver.name, of_id->compatible);
479                 }
480         }
481
482         return driver_register(&sdrv->driver);
483 }
484 EXPORT_SYMBOL_GPL(__spi_register_driver);
485
486 /*-------------------------------------------------------------------------*/
487
488 /*
489  * SPI devices should normally not be created by SPI device drivers; that
490  * would make them board-specific.  Similarly with SPI controller drivers.
491  * Device registration normally goes into like arch/.../mach.../board-YYY.c
492  * with other readonly (flashable) information about mainboard devices.
493  */
494
495 struct boardinfo {
496         struct list_head        list;
497         struct spi_board_info   board_info;
498 };
499
500 static LIST_HEAD(board_list);
501 static LIST_HEAD(spi_controller_list);
502
503 /*
504  * Used to protect add/del operation for board_info list and
505  * spi_controller list, and their matching process also used
506  * to protect object of type struct idr.
507  */
508 static DEFINE_MUTEX(board_lock);
509
510 /**
511  * spi_alloc_device - Allocate a new SPI device
512  * @ctlr: Controller to which device is connected
513  * Context: can sleep
514  *
515  * Allows a driver to allocate and initialize a spi_device without
516  * registering it immediately.  This allows a driver to directly
517  * fill the spi_device with device parameters before calling
518  * spi_add_device() on it.
519  *
520  * Caller is responsible to call spi_add_device() on the returned
521  * spi_device structure to add it to the SPI controller.  If the caller
522  * needs to discard the spi_device without adding it, then it should
523  * call spi_dev_put() on it.
524  *
525  * Return: a pointer to the new device, or NULL.
526  */
527 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
528 {
529         struct spi_device       *spi;
530
531         if (!spi_controller_get(ctlr))
532                 return NULL;
533
534         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
535         if (!spi) {
536                 spi_controller_put(ctlr);
537                 return NULL;
538         }
539
540         spi->master = spi->controller = ctlr;
541         spi->dev.parent = &ctlr->dev;
542         spi->dev.bus = &spi_bus_type;
543         spi->dev.release = spidev_release;
544         spi->mode = ctlr->buswidth_override_bits;
545
546         spin_lock_init(&spi->statistics.lock);
547
548         device_initialize(&spi->dev);
549         return spi;
550 }
551 EXPORT_SYMBOL_GPL(spi_alloc_device);
552
553 static void spi_dev_set_name(struct spi_device *spi)
554 {
555         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
556
557         if (adev) {
558                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
559                 return;
560         }
561
562         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
563                      spi->chip_select);
564 }
565
566 static int spi_dev_check(struct device *dev, void *data)
567 {
568         struct spi_device *spi = to_spi_device(dev);
569         struct spi_device *new_spi = data;
570
571         if (spi->controller == new_spi->controller &&
572             spi->chip_select == new_spi->chip_select)
573                 return -EBUSY;
574         return 0;
575 }
576
577 static void spi_cleanup(struct spi_device *spi)
578 {
579         if (spi->controller->cleanup)
580                 spi->controller->cleanup(spi);
581 }
582
583 static int __spi_add_device(struct spi_device *spi)
584 {
585         struct spi_controller *ctlr = spi->controller;
586         struct device *dev = ctlr->dev.parent;
587         int status;
588
589         /*
590          * We need to make sure there's no other device with this
591          * chipselect **BEFORE** we call setup(), else we'll trash
592          * its configuration.
593          */
594         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
595         if (status) {
596                 dev_err(dev, "chipselect %d already in use\n",
597                                 spi->chip_select);
598                 return status;
599         }
600
601         /* Controller may unregister concurrently */
602         if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
603             !device_is_registered(&ctlr->dev)) {
604                 return -ENODEV;
605         }
606
607         if (ctlr->cs_gpiods)
608                 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
609
610         /*
611          * Drivers may modify this initial i/o setup, but will
612          * normally rely on the device being setup.  Devices
613          * using SPI_CS_HIGH can't coexist well otherwise...
614          */
615         status = spi_setup(spi);
616         if (status < 0) {
617                 dev_err(dev, "can't setup %s, status %d\n",
618                                 dev_name(&spi->dev), status);
619                 return status;
620         }
621
622         /* Device may be bound to an active driver when this returns */
623         status = device_add(&spi->dev);
624         if (status < 0) {
625                 dev_err(dev, "can't add %s, status %d\n",
626                                 dev_name(&spi->dev), status);
627                 spi_cleanup(spi);
628         } else {
629                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
630         }
631
632         return status;
633 }
634
635 /**
636  * spi_add_device - Add spi_device allocated with spi_alloc_device
637  * @spi: spi_device to register
638  *
639  * Companion function to spi_alloc_device.  Devices allocated with
640  * spi_alloc_device can be added onto the spi bus with this function.
641  *
642  * Return: 0 on success; negative errno on failure
643  */
644 int spi_add_device(struct spi_device *spi)
645 {
646         struct spi_controller *ctlr = spi->controller;
647         struct device *dev = ctlr->dev.parent;
648         int status;
649
650         /* Chipselects are numbered 0..max; validate. */
651         if (spi->chip_select >= ctlr->num_chipselect) {
652                 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
653                         ctlr->num_chipselect);
654                 return -EINVAL;
655         }
656
657         /* Set the bus ID string */
658         spi_dev_set_name(spi);
659
660         mutex_lock(&ctlr->add_lock);
661         status = __spi_add_device(spi);
662         mutex_unlock(&ctlr->add_lock);
663         return status;
664 }
665 EXPORT_SYMBOL_GPL(spi_add_device);
666
667 static int spi_add_device_locked(struct spi_device *spi)
668 {
669         struct spi_controller *ctlr = spi->controller;
670         struct device *dev = ctlr->dev.parent;
671
672         /* Chipselects are numbered 0..max; validate. */
673         if (spi->chip_select >= ctlr->num_chipselect) {
674                 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
675                         ctlr->num_chipselect);
676                 return -EINVAL;
677         }
678
679         /* Set the bus ID string */
680         spi_dev_set_name(spi);
681
682         WARN_ON(!mutex_is_locked(&ctlr->add_lock));
683         return __spi_add_device(spi);
684 }
685
686 /**
687  * spi_new_device - instantiate one new SPI device
688  * @ctlr: Controller to which device is connected
689  * @chip: Describes the SPI device
690  * Context: can sleep
691  *
692  * On typical mainboards, this is purely internal; and it's not needed
693  * after board init creates the hard-wired devices.  Some development
694  * platforms may not be able to use spi_register_board_info though, and
695  * this is exported so that for example a USB or parport based adapter
696  * driver could add devices (which it would learn about out-of-band).
697  *
698  * Return: the new device, or NULL.
699  */
700 struct spi_device *spi_new_device(struct spi_controller *ctlr,
701                                   struct spi_board_info *chip)
702 {
703         struct spi_device       *proxy;
704         int                     status;
705
706         /*
707          * NOTE:  caller did any chip->bus_num checks necessary.
708          *
709          * Also, unless we change the return value convention to use
710          * error-or-pointer (not NULL-or-pointer), troubleshootability
711          * suggests syslogged diagnostics are best here (ugh).
712          */
713
714         proxy = spi_alloc_device(ctlr);
715         if (!proxy)
716                 return NULL;
717
718         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
719
720         proxy->chip_select = chip->chip_select;
721         proxy->max_speed_hz = chip->max_speed_hz;
722         proxy->mode = chip->mode;
723         proxy->irq = chip->irq;
724         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
725         proxy->dev.platform_data = (void *) chip->platform_data;
726         proxy->controller_data = chip->controller_data;
727         proxy->controller_state = NULL;
728
729         if (chip->swnode) {
730                 status = device_add_software_node(&proxy->dev, chip->swnode);
731                 if (status) {
732                         dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
733                                 chip->modalias, status);
734                         goto err_dev_put;
735                 }
736         }
737
738         status = spi_add_device(proxy);
739         if (status < 0)
740                 goto err_dev_put;
741
742         return proxy;
743
744 err_dev_put:
745         device_remove_software_node(&proxy->dev);
746         spi_dev_put(proxy);
747         return NULL;
748 }
749 EXPORT_SYMBOL_GPL(spi_new_device);
750
751 /**
752  * spi_unregister_device - unregister a single SPI device
753  * @spi: spi_device to unregister
754  *
755  * Start making the passed SPI device vanish. Normally this would be handled
756  * by spi_unregister_controller().
757  */
758 void spi_unregister_device(struct spi_device *spi)
759 {
760         if (!spi)
761                 return;
762
763         if (spi->dev.of_node) {
764                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
765                 of_node_put(spi->dev.of_node);
766         }
767         if (ACPI_COMPANION(&spi->dev))
768                 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
769         device_remove_software_node(&spi->dev);
770         device_del(&spi->dev);
771         spi_cleanup(spi);
772         put_device(&spi->dev);
773 }
774 EXPORT_SYMBOL_GPL(spi_unregister_device);
775
776 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
777                                               struct spi_board_info *bi)
778 {
779         struct spi_device *dev;
780
781         if (ctlr->bus_num != bi->bus_num)
782                 return;
783
784         dev = spi_new_device(ctlr, bi);
785         if (!dev)
786                 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
787                         bi->modalias);
788 }
789
790 /**
791  * spi_register_board_info - register SPI devices for a given board
792  * @info: array of chip descriptors
793  * @n: how many descriptors are provided
794  * Context: can sleep
795  *
796  * Board-specific early init code calls this (probably during arch_initcall)
797  * with segments of the SPI device table.  Any device nodes are created later,
798  * after the relevant parent SPI controller (bus_num) is defined.  We keep
799  * this table of devices forever, so that reloading a controller driver will
800  * not make Linux forget about these hard-wired devices.
801  *
802  * Other code can also call this, e.g. a particular add-on board might provide
803  * SPI devices through its expansion connector, so code initializing that board
804  * would naturally declare its SPI devices.
805  *
806  * The board info passed can safely be __initdata ... but be careful of
807  * any embedded pointers (platform_data, etc), they're copied as-is.
808  *
809  * Return: zero on success, else a negative error code.
810  */
811 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
812 {
813         struct boardinfo *bi;
814         int i;
815
816         if (!n)
817                 return 0;
818
819         bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
820         if (!bi)
821                 return -ENOMEM;
822
823         for (i = 0; i < n; i++, bi++, info++) {
824                 struct spi_controller *ctlr;
825
826                 memcpy(&bi->board_info, info, sizeof(*info));
827
828                 mutex_lock(&board_lock);
829                 list_add_tail(&bi->list, &board_list);
830                 list_for_each_entry(ctlr, &spi_controller_list, list)
831                         spi_match_controller_to_boardinfo(ctlr,
832                                                           &bi->board_info);
833                 mutex_unlock(&board_lock);
834         }
835
836         return 0;
837 }
838
839 /*-------------------------------------------------------------------------*/
840
841 /* Core methods for SPI resource management */
842
843 /**
844  * spi_res_alloc - allocate a spi resource that is life-cycle managed
845  *                 during the processing of a spi_message while using
846  *                 spi_transfer_one
847  * @spi:     the spi device for which we allocate memory
848  * @release: the release code to execute for this resource
849  * @size:    size to alloc and return
850  * @gfp:     GFP allocation flags
851  *
852  * Return: the pointer to the allocated data
853  *
854  * This may get enhanced in the future to allocate from a memory pool
855  * of the @spi_device or @spi_controller to avoid repeated allocations.
856  */
857 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
858                            size_t size, gfp_t gfp)
859 {
860         struct spi_res *sres;
861
862         sres = kzalloc(sizeof(*sres) + size, gfp);
863         if (!sres)
864                 return NULL;
865
866         INIT_LIST_HEAD(&sres->entry);
867         sres->release = release;
868
869         return sres->data;
870 }
871
872 /**
873  * spi_res_free - free an spi resource
874  * @res: pointer to the custom data of a resource
875  */
876 static void spi_res_free(void *res)
877 {
878         struct spi_res *sres = container_of(res, struct spi_res, data);
879
880         if (!res)
881                 return;
882
883         WARN_ON(!list_empty(&sres->entry));
884         kfree(sres);
885 }
886
887 /**
888  * spi_res_add - add a spi_res to the spi_message
889  * @message: the spi message
890  * @res:     the spi_resource
891  */
892 static void spi_res_add(struct spi_message *message, void *res)
893 {
894         struct spi_res *sres = container_of(res, struct spi_res, data);
895
896         WARN_ON(!list_empty(&sres->entry));
897         list_add_tail(&sres->entry, &message->resources);
898 }
899
900 /**
901  * spi_res_release - release all spi resources for this message
902  * @ctlr:  the @spi_controller
903  * @message: the @spi_message
904  */
905 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
906 {
907         struct spi_res *res, *tmp;
908
909         list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
910                 if (res->release)
911                         res->release(ctlr, message, res->data);
912
913                 list_del(&res->entry);
914
915                 kfree(res);
916         }
917 }
918
919 /*-------------------------------------------------------------------------*/
920
921 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
922 {
923         bool activate = enable;
924
925         /*
926          * Avoid calling into the driver (or doing delays) if the chip select
927          * isn't actually changing from the last time this was called.
928          */
929         if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
930                                 (!enable && spi->controller->last_cs != spi->chip_select)) &&
931             (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
932                 return;
933
934         trace_spi_set_cs(spi, activate);
935
936         spi->controller->last_cs = enable ? spi->chip_select : -1;
937         spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
938
939         if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
940                 spi_delay_exec(&spi->cs_hold, NULL);
941         }
942
943         if (spi->mode & SPI_CS_HIGH)
944                 enable = !enable;
945
946         if (spi->cs_gpiod) {
947                 if (!(spi->mode & SPI_NO_CS)) {
948                         /*
949                          * Historically ACPI has no means of the GPIO polarity and
950                          * thus the SPISerialBus() resource defines it on the per-chip
951                          * basis. In order to avoid a chain of negations, the GPIO
952                          * polarity is considered being Active High. Even for the cases
953                          * when _DSD() is involved (in the updated versions of ACPI)
954                          * the GPIO CS polarity must be defined Active High to avoid
955                          * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
956                          * into account.
957                          */
958                         if (has_acpi_companion(&spi->dev))
959                                 gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
960                         else
961                                 /* Polarity handled by GPIO library */
962                                 gpiod_set_value_cansleep(spi->cs_gpiod, activate);
963                 }
964                 /* Some SPI masters need both GPIO CS & slave_select */
965                 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
966                     spi->controller->set_cs)
967                         spi->controller->set_cs(spi, !enable);
968         } else if (spi->controller->set_cs) {
969                 spi->controller->set_cs(spi, !enable);
970         }
971
972         if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
973                 if (activate)
974                         spi_delay_exec(&spi->cs_setup, NULL);
975                 else
976                         spi_delay_exec(&spi->cs_inactive, NULL);
977         }
978 }
979
980 #ifdef CONFIG_HAS_DMA
981 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
982                 struct sg_table *sgt, void *buf, size_t len,
983                 enum dma_data_direction dir)
984 {
985         const bool vmalloced_buf = is_vmalloc_addr(buf);
986         unsigned int max_seg_size = dma_get_max_seg_size(dev);
987 #ifdef CONFIG_HIGHMEM
988         const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
989                                 (unsigned long)buf < (PKMAP_BASE +
990                                         (LAST_PKMAP * PAGE_SIZE)));
991 #else
992         const bool kmap_buf = false;
993 #endif
994         int desc_len;
995         int sgs;
996         struct page *vm_page;
997         struct scatterlist *sg;
998         void *sg_buf;
999         size_t min;
1000         int i, ret;
1001
1002         if (vmalloced_buf || kmap_buf) {
1003                 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1004                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1005         } else if (virt_addr_valid(buf)) {
1006                 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1007                 sgs = DIV_ROUND_UP(len, desc_len);
1008         } else {
1009                 return -EINVAL;
1010         }
1011
1012         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1013         if (ret != 0)
1014                 return ret;
1015
1016         sg = &sgt->sgl[0];
1017         for (i = 0; i < sgs; i++) {
1018
1019                 if (vmalloced_buf || kmap_buf) {
1020                         /*
1021                          * Next scatterlist entry size is the minimum between
1022                          * the desc_len and the remaining buffer length that
1023                          * fits in a page.
1024                          */
1025                         min = min_t(size_t, desc_len,
1026                                     min_t(size_t, len,
1027                                           PAGE_SIZE - offset_in_page(buf)));
1028                         if (vmalloced_buf)
1029                                 vm_page = vmalloc_to_page(buf);
1030                         else
1031                                 vm_page = kmap_to_page(buf);
1032                         if (!vm_page) {
1033                                 sg_free_table(sgt);
1034                                 return -ENOMEM;
1035                         }
1036                         sg_set_page(sg, vm_page,
1037                                     min, offset_in_page(buf));
1038                 } else {
1039                         min = min_t(size_t, len, desc_len);
1040                         sg_buf = buf;
1041                         sg_set_buf(sg, sg_buf, min);
1042                 }
1043
1044                 buf += min;
1045                 len -= min;
1046                 sg = sg_next(sg);
1047         }
1048
1049         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
1050         if (!ret)
1051                 ret = -ENOMEM;
1052         if (ret < 0) {
1053                 sg_free_table(sgt);
1054                 return ret;
1055         }
1056
1057         sgt->nents = ret;
1058
1059         return 0;
1060 }
1061
1062 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1063                    struct sg_table *sgt, enum dma_data_direction dir)
1064 {
1065         if (sgt->orig_nents) {
1066                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
1067                 sg_free_table(sgt);
1068         }
1069 }
1070
1071 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1072 {
1073         struct device *tx_dev, *rx_dev;
1074         struct spi_transfer *xfer;
1075         int ret;
1076
1077         if (!ctlr->can_dma)
1078                 return 0;
1079
1080         if (ctlr->dma_tx)
1081                 tx_dev = ctlr->dma_tx->device->dev;
1082         else if (ctlr->dma_map_dev)
1083                 tx_dev = ctlr->dma_map_dev;
1084         else
1085                 tx_dev = ctlr->dev.parent;
1086
1087         if (ctlr->dma_rx)
1088                 rx_dev = ctlr->dma_rx->device->dev;
1089         else if (ctlr->dma_map_dev)
1090                 rx_dev = ctlr->dma_map_dev;
1091         else
1092                 rx_dev = ctlr->dev.parent;
1093
1094         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1095                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1096                         continue;
1097
1098                 if (xfer->tx_buf != NULL) {
1099                         ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
1100                                           (void *)xfer->tx_buf, xfer->len,
1101                                           DMA_TO_DEVICE);
1102                         if (ret != 0)
1103                                 return ret;
1104                 }
1105
1106                 if (xfer->rx_buf != NULL) {
1107                         ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
1108                                           xfer->rx_buf, xfer->len,
1109                                           DMA_FROM_DEVICE);
1110                         if (ret != 0) {
1111                                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
1112                                               DMA_TO_DEVICE);
1113                                 return ret;
1114                         }
1115                 }
1116         }
1117
1118         ctlr->cur_msg_mapped = true;
1119
1120         return 0;
1121 }
1122
1123 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1124 {
1125         struct spi_transfer *xfer;
1126         struct device *tx_dev, *rx_dev;
1127
1128         if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1129                 return 0;
1130
1131         if (ctlr->dma_tx)
1132                 tx_dev = ctlr->dma_tx->device->dev;
1133         else if (ctlr->dma_map_dev)
1134                 tx_dev = ctlr->dma_map_dev;
1135         else
1136                 tx_dev = ctlr->dev.parent;
1137
1138         if (ctlr->dma_rx)
1139                 rx_dev = ctlr->dma_rx->device->dev;
1140         else if (ctlr->dma_map_dev)
1141                 rx_dev = ctlr->dma_map_dev;
1142         else
1143                 rx_dev = ctlr->dev.parent;
1144
1145         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1146                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1147                         continue;
1148
1149                 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1150                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1151         }
1152
1153         ctlr->cur_msg_mapped = false;
1154
1155         return 0;
1156 }
1157 #else /* !CONFIG_HAS_DMA */
1158 static inline int __spi_map_msg(struct spi_controller *ctlr,
1159                                 struct spi_message *msg)
1160 {
1161         return 0;
1162 }
1163
1164 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1165                                   struct spi_message *msg)
1166 {
1167         return 0;
1168 }
1169 #endif /* !CONFIG_HAS_DMA */
1170
1171 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1172                                 struct spi_message *msg)
1173 {
1174         struct spi_transfer *xfer;
1175
1176         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1177                 /*
1178                  * Restore the original value of tx_buf or rx_buf if they are
1179                  * NULL.
1180                  */
1181                 if (xfer->tx_buf == ctlr->dummy_tx)
1182                         xfer->tx_buf = NULL;
1183                 if (xfer->rx_buf == ctlr->dummy_rx)
1184                         xfer->rx_buf = NULL;
1185         }
1186
1187         return __spi_unmap_msg(ctlr, msg);
1188 }
1189
1190 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1191 {
1192         struct spi_transfer *xfer;
1193         void *tmp;
1194         unsigned int max_tx, max_rx;
1195
1196         if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1197                 && !(msg->spi->mode & SPI_3WIRE)) {
1198                 max_tx = 0;
1199                 max_rx = 0;
1200
1201                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1202                         if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1203                             !xfer->tx_buf)
1204                                 max_tx = max(xfer->len, max_tx);
1205                         if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1206                             !xfer->rx_buf)
1207                                 max_rx = max(xfer->len, max_rx);
1208                 }
1209
1210                 if (max_tx) {
1211                         tmp = krealloc(ctlr->dummy_tx, max_tx,
1212                                        GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1213                         if (!tmp)
1214                                 return -ENOMEM;
1215                         ctlr->dummy_tx = tmp;
1216                 }
1217
1218                 if (max_rx) {
1219                         tmp = krealloc(ctlr->dummy_rx, max_rx,
1220                                        GFP_KERNEL | GFP_DMA);
1221                         if (!tmp)
1222                                 return -ENOMEM;
1223                         ctlr->dummy_rx = tmp;
1224                 }
1225
1226                 if (max_tx || max_rx) {
1227                         list_for_each_entry(xfer, &msg->transfers,
1228                                             transfer_list) {
1229                                 if (!xfer->len)
1230                                         continue;
1231                                 if (!xfer->tx_buf)
1232                                         xfer->tx_buf = ctlr->dummy_tx;
1233                                 if (!xfer->rx_buf)
1234                                         xfer->rx_buf = ctlr->dummy_rx;
1235                         }
1236                 }
1237         }
1238
1239         return __spi_map_msg(ctlr, msg);
1240 }
1241
1242 static int spi_transfer_wait(struct spi_controller *ctlr,
1243                              struct spi_message *msg,
1244                              struct spi_transfer *xfer)
1245 {
1246         struct spi_statistics *statm = &ctlr->statistics;
1247         struct spi_statistics *stats = &msg->spi->statistics;
1248         u32 speed_hz = xfer->speed_hz;
1249         unsigned long long ms;
1250
1251         if (spi_controller_is_slave(ctlr)) {
1252                 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1253                         dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1254                         return -EINTR;
1255                 }
1256         } else {
1257                 if (!speed_hz)
1258                         speed_hz = 100000;
1259
1260                 /*
1261                  * For each byte we wait for 8 cycles of the SPI clock.
1262                  * Since speed is defined in Hz and we want milliseconds,
1263                  * use respective multiplier, but before the division,
1264                  * otherwise we may get 0 for short transfers.
1265                  */
1266                 ms = 8LL * MSEC_PER_SEC * xfer->len;
1267                 do_div(ms, speed_hz);
1268
1269                 /*
1270                  * Increase it twice and add 200 ms tolerance, use
1271                  * predefined maximum in case of overflow.
1272                  */
1273                 ms += ms + 200;
1274                 if (ms > UINT_MAX)
1275                         ms = UINT_MAX;
1276
1277                 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1278                                                  msecs_to_jiffies(ms));
1279
1280                 if (ms == 0) {
1281                         SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1282                         SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1283                         dev_err(&msg->spi->dev,
1284                                 "SPI transfer timed out\n");
1285                         return -ETIMEDOUT;
1286                 }
1287         }
1288
1289         return 0;
1290 }
1291
1292 static void _spi_transfer_delay_ns(u32 ns)
1293 {
1294         if (!ns)
1295                 return;
1296         if (ns <= NSEC_PER_USEC) {
1297                 ndelay(ns);
1298         } else {
1299                 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1300
1301                 if (us <= 10)
1302                         udelay(us);
1303                 else
1304                         usleep_range(us, us + DIV_ROUND_UP(us, 10));
1305         }
1306 }
1307
1308 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1309 {
1310         u32 delay = _delay->value;
1311         u32 unit = _delay->unit;
1312         u32 hz;
1313
1314         if (!delay)
1315                 return 0;
1316
1317         switch (unit) {
1318         case SPI_DELAY_UNIT_USECS:
1319                 delay *= NSEC_PER_USEC;
1320                 break;
1321         case SPI_DELAY_UNIT_NSECS:
1322                 /* Nothing to do here */
1323                 break;
1324         case SPI_DELAY_UNIT_SCK:
1325                 /* clock cycles need to be obtained from spi_transfer */
1326                 if (!xfer)
1327                         return -EINVAL;
1328                 /*
1329                  * If there is unknown effective speed, approximate it
1330                  * by underestimating with half of the requested hz.
1331                  */
1332                 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1333                 if (!hz)
1334                         return -EINVAL;
1335
1336                 /* Convert delay to nanoseconds */
1337                 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1338                 break;
1339         default:
1340                 return -EINVAL;
1341         }
1342
1343         return delay;
1344 }
1345 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1346
1347 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1348 {
1349         int delay;
1350
1351         might_sleep();
1352
1353         if (!_delay)
1354                 return -EINVAL;
1355
1356         delay = spi_delay_to_ns(_delay, xfer);
1357         if (delay < 0)
1358                 return delay;
1359
1360         _spi_transfer_delay_ns(delay);
1361
1362         return 0;
1363 }
1364 EXPORT_SYMBOL_GPL(spi_delay_exec);
1365
1366 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1367                                           struct spi_transfer *xfer)
1368 {
1369         u32 default_delay_ns = 10 * NSEC_PER_USEC;
1370         u32 delay = xfer->cs_change_delay.value;
1371         u32 unit = xfer->cs_change_delay.unit;
1372         int ret;
1373
1374         /* return early on "fast" mode - for everything but USECS */
1375         if (!delay) {
1376                 if (unit == SPI_DELAY_UNIT_USECS)
1377                         _spi_transfer_delay_ns(default_delay_ns);
1378                 return;
1379         }
1380
1381         ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1382         if (ret) {
1383                 dev_err_once(&msg->spi->dev,
1384                              "Use of unsupported delay unit %i, using default of %luus\n",
1385                              unit, default_delay_ns / NSEC_PER_USEC);
1386                 _spi_transfer_delay_ns(default_delay_ns);
1387         }
1388 }
1389
1390 /*
1391  * spi_transfer_one_message - Default implementation of transfer_one_message()
1392  *
1393  * This is a standard implementation of transfer_one_message() for
1394  * drivers which implement a transfer_one() operation.  It provides
1395  * standard handling of delays and chip select management.
1396  */
1397 static int spi_transfer_one_message(struct spi_controller *ctlr,
1398                                     struct spi_message *msg)
1399 {
1400         struct spi_transfer *xfer;
1401         bool keep_cs = false;
1402         int ret = 0;
1403         struct spi_statistics *statm = &ctlr->statistics;
1404         struct spi_statistics *stats = &msg->spi->statistics;
1405
1406         spi_set_cs(msg->spi, true, false);
1407
1408         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1409         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1410
1411         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1412                 trace_spi_transfer_start(msg, xfer);
1413
1414                 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1415                 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1416
1417                 if (!ctlr->ptp_sts_supported) {
1418                         xfer->ptp_sts_word_pre = 0;
1419                         ptp_read_system_prets(xfer->ptp_sts);
1420                 }
1421
1422                 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1423                         reinit_completion(&ctlr->xfer_completion);
1424
1425 fallback_pio:
1426                         ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1427                         if (ret < 0) {
1428                                 if (ctlr->cur_msg_mapped &&
1429                                    (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1430                                         __spi_unmap_msg(ctlr, msg);
1431                                         ctlr->fallback = true;
1432                                         xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1433                                         goto fallback_pio;
1434                                 }
1435
1436                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
1437                                                                errors);
1438                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
1439                                                                errors);
1440                                 dev_err(&msg->spi->dev,
1441                                         "SPI transfer failed: %d\n", ret);
1442                                 goto out;
1443                         }
1444
1445                         if (ret > 0) {
1446                                 ret = spi_transfer_wait(ctlr, msg, xfer);
1447                                 if (ret < 0)
1448                                         msg->status = ret;
1449                         }
1450                 } else {
1451                         if (xfer->len)
1452                                 dev_err(&msg->spi->dev,
1453                                         "Bufferless transfer has length %u\n",
1454                                         xfer->len);
1455                 }
1456
1457                 if (!ctlr->ptp_sts_supported) {
1458                         ptp_read_system_postts(xfer->ptp_sts);
1459                         xfer->ptp_sts_word_post = xfer->len;
1460                 }
1461
1462                 trace_spi_transfer_stop(msg, xfer);
1463
1464                 if (msg->status != -EINPROGRESS)
1465                         goto out;
1466
1467                 spi_transfer_delay_exec(xfer);
1468
1469                 if (xfer->cs_change) {
1470                         if (list_is_last(&xfer->transfer_list,
1471                                          &msg->transfers)) {
1472                                 keep_cs = true;
1473                         } else {
1474                                 spi_set_cs(msg->spi, false, false);
1475                                 _spi_transfer_cs_change_delay(msg, xfer);
1476                                 spi_set_cs(msg->spi, true, false);
1477                         }
1478                 }
1479
1480                 msg->actual_length += xfer->len;
1481         }
1482
1483 out:
1484         if (ret != 0 || !keep_cs)
1485                 spi_set_cs(msg->spi, false, false);
1486
1487         if (msg->status == -EINPROGRESS)
1488                 msg->status = ret;
1489
1490         if (msg->status && ctlr->handle_err)
1491                 ctlr->handle_err(ctlr, msg);
1492
1493         spi_finalize_current_message(ctlr);
1494
1495         return ret;
1496 }
1497
1498 /**
1499  * spi_finalize_current_transfer - report completion of a transfer
1500  * @ctlr: the controller reporting completion
1501  *
1502  * Called by SPI drivers using the core transfer_one_message()
1503  * implementation to notify it that the current interrupt driven
1504  * transfer has finished and the next one may be scheduled.
1505  */
1506 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1507 {
1508         complete(&ctlr->xfer_completion);
1509 }
1510 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1511
1512 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1513 {
1514         if (ctlr->auto_runtime_pm) {
1515                 pm_runtime_mark_last_busy(ctlr->dev.parent);
1516                 pm_runtime_put_autosuspend(ctlr->dev.parent);
1517         }
1518 }
1519
1520 /**
1521  * __spi_pump_messages - function which processes spi message queue
1522  * @ctlr: controller to process queue for
1523  * @in_kthread: true if we are in the context of the message pump thread
1524  *
1525  * This function checks if there is any spi message in the queue that
1526  * needs processing and if so call out to the driver to initialize hardware
1527  * and transfer each message.
1528  *
1529  * Note that it is called both from the kthread itself and also from
1530  * inside spi_sync(); the queue extraction handling at the top of the
1531  * function should deal with this safely.
1532  */
1533 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1534 {
1535         struct spi_transfer *xfer;
1536         struct spi_message *msg;
1537         bool was_busy = false;
1538         unsigned long flags;
1539         int ret;
1540
1541         /* Lock queue */
1542         spin_lock_irqsave(&ctlr->queue_lock, flags);
1543
1544         /* Make sure we are not already running a message */
1545         if (ctlr->cur_msg) {
1546                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1547                 return;
1548         }
1549
1550         /* If another context is idling the device then defer */
1551         if (ctlr->idling) {
1552                 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1553                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1554                 return;
1555         }
1556
1557         /* Check if the queue is idle */
1558         if (list_empty(&ctlr->queue) || !ctlr->running) {
1559                 if (!ctlr->busy) {
1560                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1561                         return;
1562                 }
1563
1564                 /* Defer any non-atomic teardown to the thread */
1565                 if (!in_kthread) {
1566                         if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1567                             !ctlr->unprepare_transfer_hardware) {
1568                                 spi_idle_runtime_pm(ctlr);
1569                                 ctlr->busy = false;
1570                                 trace_spi_controller_idle(ctlr);
1571                         } else {
1572                                 kthread_queue_work(ctlr->kworker,
1573                                                    &ctlr->pump_messages);
1574                         }
1575                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1576                         return;
1577                 }
1578
1579                 ctlr->busy = false;
1580                 ctlr->idling = true;
1581                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1582
1583                 kfree(ctlr->dummy_rx);
1584                 ctlr->dummy_rx = NULL;
1585                 kfree(ctlr->dummy_tx);
1586                 ctlr->dummy_tx = NULL;
1587                 if (ctlr->unprepare_transfer_hardware &&
1588                     ctlr->unprepare_transfer_hardware(ctlr))
1589                         dev_err(&ctlr->dev,
1590                                 "failed to unprepare transfer hardware\n");
1591                 spi_idle_runtime_pm(ctlr);
1592                 trace_spi_controller_idle(ctlr);
1593
1594                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1595                 ctlr->idling = false;
1596                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1597                 return;
1598         }
1599
1600         /* Extract head of queue */
1601         msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1602         ctlr->cur_msg = msg;
1603
1604         list_del_init(&msg->queue);
1605         if (ctlr->busy)
1606                 was_busy = true;
1607         else
1608                 ctlr->busy = true;
1609         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1610
1611         mutex_lock(&ctlr->io_mutex);
1612
1613         if (!was_busy && ctlr->auto_runtime_pm) {
1614                 ret = pm_runtime_resume_and_get(ctlr->dev.parent);
1615                 if (ret < 0) {
1616                         dev_err(&ctlr->dev, "Failed to power device: %d\n",
1617                                 ret);
1618                         mutex_unlock(&ctlr->io_mutex);
1619                         return;
1620                 }
1621         }
1622
1623         if (!was_busy)
1624                 trace_spi_controller_busy(ctlr);
1625
1626         if (!was_busy && ctlr->prepare_transfer_hardware) {
1627                 ret = ctlr->prepare_transfer_hardware(ctlr);
1628                 if (ret) {
1629                         dev_err(&ctlr->dev,
1630                                 "failed to prepare transfer hardware: %d\n",
1631                                 ret);
1632
1633                         if (ctlr->auto_runtime_pm)
1634                                 pm_runtime_put(ctlr->dev.parent);
1635
1636                         msg->status = ret;
1637                         spi_finalize_current_message(ctlr);
1638
1639                         mutex_unlock(&ctlr->io_mutex);
1640                         return;
1641                 }
1642         }
1643
1644         trace_spi_message_start(msg);
1645
1646         if (ctlr->prepare_message) {
1647                 ret = ctlr->prepare_message(ctlr, msg);
1648                 if (ret) {
1649                         dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1650                                 ret);
1651                         msg->status = ret;
1652                         spi_finalize_current_message(ctlr);
1653                         goto out;
1654                 }
1655                 ctlr->cur_msg_prepared = true;
1656         }
1657
1658         ret = spi_map_msg(ctlr, msg);
1659         if (ret) {
1660                 msg->status = ret;
1661                 spi_finalize_current_message(ctlr);
1662                 goto out;
1663         }
1664
1665         if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1666                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1667                         xfer->ptp_sts_word_pre = 0;
1668                         ptp_read_system_prets(xfer->ptp_sts);
1669                 }
1670         }
1671
1672         ret = ctlr->transfer_one_message(ctlr, msg);
1673         if (ret) {
1674                 dev_err(&ctlr->dev,
1675                         "failed to transfer one message from queue\n");
1676                 goto out;
1677         }
1678
1679 out:
1680         mutex_unlock(&ctlr->io_mutex);
1681
1682         /* Prod the scheduler in case transfer_one() was busy waiting */
1683         if (!ret)
1684                 cond_resched();
1685 }
1686
1687 /**
1688  * spi_pump_messages - kthread work function which processes spi message queue
1689  * @work: pointer to kthread work struct contained in the controller struct
1690  */
1691 static void spi_pump_messages(struct kthread_work *work)
1692 {
1693         struct spi_controller *ctlr =
1694                 container_of(work, struct spi_controller, pump_messages);
1695
1696         __spi_pump_messages(ctlr, true);
1697 }
1698
1699 /**
1700  * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1701  * @ctlr: Pointer to the spi_controller structure of the driver
1702  * @xfer: Pointer to the transfer being timestamped
1703  * @progress: How many words (not bytes) have been transferred so far
1704  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1705  *            transfer, for less jitter in time measurement. Only compatible
1706  *            with PIO drivers. If true, must follow up with
1707  *            spi_take_timestamp_post or otherwise system will crash.
1708  *            WARNING: for fully predictable results, the CPU frequency must
1709  *            also be under control (governor).
1710  *
1711  * This is a helper for drivers to collect the beginning of the TX timestamp
1712  * for the requested byte from the SPI transfer. The frequency with which this
1713  * function must be called (once per word, once for the whole transfer, once
1714  * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1715  * greater than or equal to the requested byte at the time of the call. The
1716  * timestamp is only taken once, at the first such call. It is assumed that
1717  * the driver advances its @tx buffer pointer monotonically.
1718  */
1719 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1720                             struct spi_transfer *xfer,
1721                             size_t progress, bool irqs_off)
1722 {
1723         if (!xfer->ptp_sts)
1724                 return;
1725
1726         if (xfer->timestamped)
1727                 return;
1728
1729         if (progress > xfer->ptp_sts_word_pre)
1730                 return;
1731
1732         /* Capture the resolution of the timestamp */
1733         xfer->ptp_sts_word_pre = progress;
1734
1735         if (irqs_off) {
1736                 local_irq_save(ctlr->irq_flags);
1737                 preempt_disable();
1738         }
1739
1740         ptp_read_system_prets(xfer->ptp_sts);
1741 }
1742 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1743
1744 /**
1745  * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1746  * @ctlr: Pointer to the spi_controller structure of the driver
1747  * @xfer: Pointer to the transfer being timestamped
1748  * @progress: How many words (not bytes) have been transferred so far
1749  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1750  *
1751  * This is a helper for drivers to collect the end of the TX timestamp for
1752  * the requested byte from the SPI transfer. Can be called with an arbitrary
1753  * frequency: only the first call where @tx exceeds or is equal to the
1754  * requested word will be timestamped.
1755  */
1756 void spi_take_timestamp_post(struct spi_controller *ctlr,
1757                              struct spi_transfer *xfer,
1758                              size_t progress, bool irqs_off)
1759 {
1760         if (!xfer->ptp_sts)
1761                 return;
1762
1763         if (xfer->timestamped)
1764                 return;
1765
1766         if (progress < xfer->ptp_sts_word_post)
1767                 return;
1768
1769         ptp_read_system_postts(xfer->ptp_sts);
1770
1771         if (irqs_off) {
1772                 local_irq_restore(ctlr->irq_flags);
1773                 preempt_enable();
1774         }
1775
1776         /* Capture the resolution of the timestamp */
1777         xfer->ptp_sts_word_post = progress;
1778
1779         xfer->timestamped = true;
1780 }
1781 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1782
1783 /**
1784  * spi_set_thread_rt - set the controller to pump at realtime priority
1785  * @ctlr: controller to boost priority of
1786  *
1787  * This can be called because the controller requested realtime priority
1788  * (by setting the ->rt value before calling spi_register_controller()) or
1789  * because a device on the bus said that its transfers needed realtime
1790  * priority.
1791  *
1792  * NOTE: at the moment if any device on a bus says it needs realtime then
1793  * the thread will be at realtime priority for all transfers on that
1794  * controller.  If this eventually becomes a problem we may see if we can
1795  * find a way to boost the priority only temporarily during relevant
1796  * transfers.
1797  */
1798 static void spi_set_thread_rt(struct spi_controller *ctlr)
1799 {
1800         dev_info(&ctlr->dev,
1801                 "will run message pump with realtime priority\n");
1802         sched_set_fifo(ctlr->kworker->task);
1803 }
1804
1805 static int spi_init_queue(struct spi_controller *ctlr)
1806 {
1807         ctlr->running = false;
1808         ctlr->busy = false;
1809
1810         ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1811         if (IS_ERR(ctlr->kworker)) {
1812                 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1813                 return PTR_ERR(ctlr->kworker);
1814         }
1815
1816         kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1817
1818         /*
1819          * Controller config will indicate if this controller should run the
1820          * message pump with high (realtime) priority to reduce the transfer
1821          * latency on the bus by minimising the delay between a transfer
1822          * request and the scheduling of the message pump thread. Without this
1823          * setting the message pump thread will remain at default priority.
1824          */
1825         if (ctlr->rt)
1826                 spi_set_thread_rt(ctlr);
1827
1828         return 0;
1829 }
1830
1831 /**
1832  * spi_get_next_queued_message() - called by driver to check for queued
1833  * messages
1834  * @ctlr: the controller to check for queued messages
1835  *
1836  * If there are more messages in the queue, the next message is returned from
1837  * this call.
1838  *
1839  * Return: the next message in the queue, else NULL if the queue is empty.
1840  */
1841 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1842 {
1843         struct spi_message *next;
1844         unsigned long flags;
1845
1846         /* get a pointer to the next message, if any */
1847         spin_lock_irqsave(&ctlr->queue_lock, flags);
1848         next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1849                                         queue);
1850         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1851
1852         return next;
1853 }
1854 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1855
1856 /**
1857  * spi_finalize_current_message() - the current message is complete
1858  * @ctlr: the controller to return the message to
1859  *
1860  * Called by the driver to notify the core that the message in the front of the
1861  * queue is complete and can be removed from the queue.
1862  */
1863 void spi_finalize_current_message(struct spi_controller *ctlr)
1864 {
1865         struct spi_transfer *xfer;
1866         struct spi_message *mesg;
1867         unsigned long flags;
1868         int ret;
1869
1870         spin_lock_irqsave(&ctlr->queue_lock, flags);
1871         mesg = ctlr->cur_msg;
1872         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1873
1874         if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1875                 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1876                         ptp_read_system_postts(xfer->ptp_sts);
1877                         xfer->ptp_sts_word_post = xfer->len;
1878                 }
1879         }
1880
1881         if (unlikely(ctlr->ptp_sts_supported))
1882                 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
1883                         WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1884
1885         spi_unmap_msg(ctlr, mesg);
1886
1887         /*
1888          * In the prepare_messages callback the SPI bus has the opportunity
1889          * to split a transfer to smaller chunks.
1890          *
1891          * Release the split transfers here since spi_map_msg() is done on
1892          * the split transfers.
1893          */
1894         spi_res_release(ctlr, mesg);
1895
1896         if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1897                 ret = ctlr->unprepare_message(ctlr, mesg);
1898                 if (ret) {
1899                         dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1900                                 ret);
1901                 }
1902         }
1903
1904         spin_lock_irqsave(&ctlr->queue_lock, flags);
1905         ctlr->cur_msg = NULL;
1906         ctlr->cur_msg_prepared = false;
1907         ctlr->fallback = false;
1908         kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1909         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1910
1911         trace_spi_message_done(mesg);
1912
1913         mesg->state = NULL;
1914         if (mesg->complete)
1915                 mesg->complete(mesg->context);
1916 }
1917 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1918
1919 static int spi_start_queue(struct spi_controller *ctlr)
1920 {
1921         unsigned long flags;
1922
1923         spin_lock_irqsave(&ctlr->queue_lock, flags);
1924
1925         if (ctlr->running || ctlr->busy) {
1926                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1927                 return -EBUSY;
1928         }
1929
1930         ctlr->running = true;
1931         ctlr->cur_msg = NULL;
1932         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1933
1934         kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1935
1936         return 0;
1937 }
1938
1939 static int spi_stop_queue(struct spi_controller *ctlr)
1940 {
1941         unsigned long flags;
1942         unsigned limit = 500;
1943         int ret = 0;
1944
1945         spin_lock_irqsave(&ctlr->queue_lock, flags);
1946
1947         /*
1948          * This is a bit lame, but is optimized for the common execution path.
1949          * A wait_queue on the ctlr->busy could be used, but then the common
1950          * execution path (pump_messages) would be required to call wake_up or
1951          * friends on every SPI message. Do this instead.
1952          */
1953         while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1954                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1955                 usleep_range(10000, 11000);
1956                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1957         }
1958
1959         if (!list_empty(&ctlr->queue) || ctlr->busy)
1960                 ret = -EBUSY;
1961         else
1962                 ctlr->running = false;
1963
1964         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1965
1966         if (ret) {
1967                 dev_warn(&ctlr->dev, "could not stop message queue\n");
1968                 return ret;
1969         }
1970         return ret;
1971 }
1972
1973 static int spi_destroy_queue(struct spi_controller *ctlr)
1974 {
1975         int ret;
1976
1977         ret = spi_stop_queue(ctlr);
1978
1979         /*
1980          * kthread_flush_worker will block until all work is done.
1981          * If the reason that stop_queue timed out is that the work will never
1982          * finish, then it does no good to call flush/stop thread, so
1983          * return anyway.
1984          */
1985         if (ret) {
1986                 dev_err(&ctlr->dev, "problem destroying queue\n");
1987                 return ret;
1988         }
1989
1990         kthread_destroy_worker(ctlr->kworker);
1991
1992         return 0;
1993 }
1994
1995 static int __spi_queued_transfer(struct spi_device *spi,
1996                                  struct spi_message *msg,
1997                                  bool need_pump)
1998 {
1999         struct spi_controller *ctlr = spi->controller;
2000         unsigned long flags;
2001
2002         spin_lock_irqsave(&ctlr->queue_lock, flags);
2003
2004         if (!ctlr->running) {
2005                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2006                 return -ESHUTDOWN;
2007         }
2008         msg->actual_length = 0;
2009         msg->status = -EINPROGRESS;
2010
2011         list_add_tail(&msg->queue, &ctlr->queue);
2012         if (!ctlr->busy && need_pump)
2013                 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2014
2015         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2016         return 0;
2017 }
2018
2019 /**
2020  * spi_queued_transfer - transfer function for queued transfers
2021  * @spi: spi device which is requesting transfer
2022  * @msg: spi message which is to handled is queued to driver queue
2023  *
2024  * Return: zero on success, else a negative error code.
2025  */
2026 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2027 {
2028         return __spi_queued_transfer(spi, msg, true);
2029 }
2030
2031 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2032 {
2033         int ret;
2034
2035         ctlr->transfer = spi_queued_transfer;
2036         if (!ctlr->transfer_one_message)
2037                 ctlr->transfer_one_message = spi_transfer_one_message;
2038
2039         /* Initialize and start queue */
2040         ret = spi_init_queue(ctlr);
2041         if (ret) {
2042                 dev_err(&ctlr->dev, "problem initializing queue\n");
2043                 goto err_init_queue;
2044         }
2045         ctlr->queued = true;
2046         ret = spi_start_queue(ctlr);
2047         if (ret) {
2048                 dev_err(&ctlr->dev, "problem starting queue\n");
2049                 goto err_start_queue;
2050         }
2051
2052         return 0;
2053
2054 err_start_queue:
2055         spi_destroy_queue(ctlr);
2056 err_init_queue:
2057         return ret;
2058 }
2059
2060 /**
2061  * spi_flush_queue - Send all pending messages in the queue from the callers'
2062  *                   context
2063  * @ctlr: controller to process queue for
2064  *
2065  * This should be used when one wants to ensure all pending messages have been
2066  * sent before doing something. Is used by the spi-mem code to make sure SPI
2067  * memory operations do not preempt regular SPI transfers that have been queued
2068  * before the spi-mem operation.
2069  */
2070 void spi_flush_queue(struct spi_controller *ctlr)
2071 {
2072         if (ctlr->transfer == spi_queued_transfer)
2073                 __spi_pump_messages(ctlr, false);
2074 }
2075
2076 /*-------------------------------------------------------------------------*/
2077
2078 #if defined(CONFIG_OF)
2079 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2080                            struct device_node *nc)
2081 {
2082         u32 value;
2083         int rc;
2084
2085         /* Mode (clock phase/polarity/etc.) */
2086         if (of_property_read_bool(nc, "spi-cpha"))
2087                 spi->mode |= SPI_CPHA;
2088         if (of_property_read_bool(nc, "spi-cpol"))
2089                 spi->mode |= SPI_CPOL;
2090         if (of_property_read_bool(nc, "spi-3wire"))
2091                 spi->mode |= SPI_3WIRE;
2092         if (of_property_read_bool(nc, "spi-lsb-first"))
2093                 spi->mode |= SPI_LSB_FIRST;
2094         if (of_property_read_bool(nc, "spi-cs-high"))
2095                 spi->mode |= SPI_CS_HIGH;
2096
2097         /* Device DUAL/QUAD mode */
2098         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2099                 switch (value) {
2100                 case 0:
2101                         spi->mode |= SPI_NO_TX;
2102                         break;
2103                 case 1:
2104                         break;
2105                 case 2:
2106                         spi->mode |= SPI_TX_DUAL;
2107                         break;
2108                 case 4:
2109                         spi->mode |= SPI_TX_QUAD;
2110                         break;
2111                 case 8:
2112                         spi->mode |= SPI_TX_OCTAL;
2113                         break;
2114                 default:
2115                         dev_warn(&ctlr->dev,
2116                                 "spi-tx-bus-width %d not supported\n",
2117                                 value);
2118                         break;
2119                 }
2120         }
2121
2122         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2123                 switch (value) {
2124                 case 0:
2125                         spi->mode |= SPI_NO_RX;
2126                         break;
2127                 case 1:
2128                         break;
2129                 case 2:
2130                         spi->mode |= SPI_RX_DUAL;
2131                         break;
2132                 case 4:
2133                         spi->mode |= SPI_RX_QUAD;
2134                         break;
2135                 case 8:
2136                         spi->mode |= SPI_RX_OCTAL;
2137                         break;
2138                 default:
2139                         dev_warn(&ctlr->dev,
2140                                 "spi-rx-bus-width %d not supported\n",
2141                                 value);
2142                         break;
2143                 }
2144         }
2145
2146         if (spi_controller_is_slave(ctlr)) {
2147                 if (!of_node_name_eq(nc, "slave")) {
2148                         dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2149                                 nc);
2150                         return -EINVAL;
2151                 }
2152                 return 0;
2153         }
2154
2155         /* Device address */
2156         rc = of_property_read_u32(nc, "reg", &value);
2157         if (rc) {
2158                 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2159                         nc, rc);
2160                 return rc;
2161         }
2162         spi->chip_select = value;
2163
2164         /* Device speed */
2165         if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2166                 spi->max_speed_hz = value;
2167
2168         return 0;
2169 }
2170
2171 static struct spi_device *
2172 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2173 {
2174         struct spi_device *spi;
2175         int rc;
2176
2177         /* Alloc an spi_device */
2178         spi = spi_alloc_device(ctlr);
2179         if (!spi) {
2180                 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2181                 rc = -ENOMEM;
2182                 goto err_out;
2183         }
2184
2185         /* Select device driver */
2186         rc = of_modalias_node(nc, spi->modalias,
2187                                 sizeof(spi->modalias));
2188         if (rc < 0) {
2189                 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2190                 goto err_out;
2191         }
2192
2193         rc = of_spi_parse_dt(ctlr, spi, nc);
2194         if (rc)
2195                 goto err_out;
2196
2197         /* Store a pointer to the node in the device structure */
2198         of_node_get(nc);
2199         spi->dev.of_node = nc;
2200         spi->dev.fwnode = of_fwnode_handle(nc);
2201
2202         /* Register the new device */
2203         rc = spi_add_device(spi);
2204         if (rc) {
2205                 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2206                 goto err_of_node_put;
2207         }
2208
2209         return spi;
2210
2211 err_of_node_put:
2212         of_node_put(nc);
2213 err_out:
2214         spi_dev_put(spi);
2215         return ERR_PTR(rc);
2216 }
2217
2218 /**
2219  * of_register_spi_devices() - Register child devices onto the SPI bus
2220  * @ctlr:       Pointer to spi_controller device
2221  *
2222  * Registers an spi_device for each child node of controller node which
2223  * represents a valid SPI slave.
2224  */
2225 static void of_register_spi_devices(struct spi_controller *ctlr)
2226 {
2227         struct spi_device *spi;
2228         struct device_node *nc;
2229
2230         if (!ctlr->dev.of_node)
2231                 return;
2232
2233         for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2234                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2235                         continue;
2236                 spi = of_register_spi_device(ctlr, nc);
2237                 if (IS_ERR(spi)) {
2238                         dev_warn(&ctlr->dev,
2239                                  "Failed to create SPI device for %pOF\n", nc);
2240                         of_node_clear_flag(nc, OF_POPULATED);
2241                 }
2242         }
2243 }
2244 #else
2245 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2246 #endif
2247
2248 /**
2249  * spi_new_ancillary_device() - Register ancillary SPI device
2250  * @spi:         Pointer to the main SPI device registering the ancillary device
2251  * @chip_select: Chip Select of the ancillary device
2252  *
2253  * Register an ancillary SPI device; for example some chips have a chip-select
2254  * for normal device usage and another one for setup/firmware upload.
2255  *
2256  * This may only be called from main SPI device's probe routine.
2257  *
2258  * Return: 0 on success; negative errno on failure
2259  */
2260 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2261                                              u8 chip_select)
2262 {
2263         struct spi_device *ancillary;
2264         int rc = 0;
2265
2266         /* Alloc an spi_device */
2267         ancillary = spi_alloc_device(spi->controller);
2268         if (!ancillary) {
2269                 rc = -ENOMEM;
2270                 goto err_out;
2271         }
2272
2273         strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2274
2275         /* Use provided chip-select for ancillary device */
2276         ancillary->chip_select = chip_select;
2277
2278         /* Take over SPI mode/speed from SPI main device */
2279         ancillary->max_speed_hz = spi->max_speed_hz;
2280         ancillary->mode = spi->mode;
2281
2282         /* Register the new device */
2283         rc = spi_add_device_locked(ancillary);
2284         if (rc) {
2285                 dev_err(&spi->dev, "failed to register ancillary device\n");
2286                 goto err_out;
2287         }
2288
2289         return ancillary;
2290
2291 err_out:
2292         spi_dev_put(ancillary);
2293         return ERR_PTR(rc);
2294 }
2295 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2296
2297 #ifdef CONFIG_ACPI
2298 struct acpi_spi_lookup {
2299         struct spi_controller   *ctlr;
2300         u32                     max_speed_hz;
2301         u32                     mode;
2302         int                     irq;
2303         u8                      bits_per_word;
2304         u8                      chip_select;
2305         int                     n;
2306         int                     index;
2307 };
2308
2309 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2310 {
2311         struct acpi_resource_spi_serialbus *sb;
2312         int *count = data;
2313
2314         if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2315                 return 1;
2316
2317         sb = &ares->data.spi_serial_bus;
2318         if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2319                 return 1;
2320
2321         *count = *count + 1;
2322
2323         return 1;
2324 }
2325
2326 /**
2327  * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2328  * @adev:       ACPI device
2329  *
2330  * Returns the number of SpiSerialBus resources in the ACPI-device's
2331  * resource-list; or a negative error code.
2332  */
2333 int acpi_spi_count_resources(struct acpi_device *adev)
2334 {
2335         LIST_HEAD(r);
2336         int count = 0;
2337         int ret;
2338
2339         ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2340         if (ret < 0)
2341                 return ret;
2342
2343         acpi_dev_free_resource_list(&r);
2344
2345         return count;
2346 }
2347 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2348
2349 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2350                                             struct acpi_spi_lookup *lookup)
2351 {
2352         const union acpi_object *obj;
2353
2354         if (!x86_apple_machine)
2355                 return;
2356
2357         if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2358             && obj->buffer.length >= 4)
2359                 lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2360
2361         if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2362             && obj->buffer.length == 8)
2363                 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2364
2365         if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2366             && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2367                 lookup->mode |= SPI_LSB_FIRST;
2368
2369         if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2370             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2371                 lookup->mode |= SPI_CPOL;
2372
2373         if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2374             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2375                 lookup->mode |= SPI_CPHA;
2376 }
2377
2378 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
2379
2380 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2381 {
2382         struct acpi_spi_lookup *lookup = data;
2383         struct spi_controller *ctlr = lookup->ctlr;
2384
2385         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2386                 struct acpi_resource_spi_serialbus *sb;
2387                 acpi_handle parent_handle;
2388                 acpi_status status;
2389
2390                 sb = &ares->data.spi_serial_bus;
2391                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2392
2393                         if (lookup->index != -1 && lookup->n++ != lookup->index)
2394                                 return 1;
2395
2396                         if (lookup->index == -1 && !ctlr)
2397                                 return -ENODEV;
2398
2399                         status = acpi_get_handle(NULL,
2400                                                  sb->resource_source.string_ptr,
2401                                                  &parent_handle);
2402
2403                         if (ACPI_FAILURE(status))
2404                                 return -ENODEV;
2405
2406                         if (ctlr) {
2407                                 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2408                                         return -ENODEV;
2409                         } else {
2410                                 struct acpi_device *adev;
2411
2412                                 adev = acpi_fetch_acpi_dev(parent_handle);
2413                                 if (!adev)
2414                                         return -ENODEV;
2415
2416                                 ctlr = acpi_spi_find_controller_by_adev(adev);
2417                                 if (!ctlr)
2418                                         return -ENODEV;
2419
2420                                 lookup->ctlr = ctlr;
2421                         }
2422
2423                         /*
2424                          * ACPI DeviceSelection numbering is handled by the
2425                          * host controller driver in Windows and can vary
2426                          * from driver to driver. In Linux we always expect
2427                          * 0 .. max - 1 so we need to ask the driver to
2428                          * translate between the two schemes.
2429                          */
2430                         if (ctlr->fw_translate_cs) {
2431                                 int cs = ctlr->fw_translate_cs(ctlr,
2432                                                 sb->device_selection);
2433                                 if (cs < 0)
2434                                         return cs;
2435                                 lookup->chip_select = cs;
2436                         } else {
2437                                 lookup->chip_select = sb->device_selection;
2438                         }
2439
2440                         lookup->max_speed_hz = sb->connection_speed;
2441                         lookup->bits_per_word = sb->data_bit_length;
2442
2443                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2444                                 lookup->mode |= SPI_CPHA;
2445                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2446                                 lookup->mode |= SPI_CPOL;
2447                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2448                                 lookup->mode |= SPI_CS_HIGH;
2449                 }
2450         } else if (lookup->irq < 0) {
2451                 struct resource r;
2452
2453                 if (acpi_dev_resource_interrupt(ares, 0, &r))
2454                         lookup->irq = r.start;
2455         }
2456
2457         /* Always tell the ACPI core to skip this resource */
2458         return 1;
2459 }
2460
2461 /**
2462  * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2463  * @ctlr: controller to which the spi device belongs
2464  * @adev: ACPI Device for the spi device
2465  * @index: Index of the spi resource inside the ACPI Node
2466  *
2467  * This should be used to allocate a new spi device from and ACPI Node.
2468  * The caller is responsible for calling spi_add_device to register the spi device.
2469  *
2470  * If ctlr is set to NULL, the Controller for the spi device will be looked up
2471  * using the resource.
2472  * If index is set to -1, index is not used.
2473  * Note: If index is -1, ctlr must be set.
2474  *
2475  * Return: a pointer to the new device, or ERR_PTR on error.
2476  */
2477 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2478                                          struct acpi_device *adev,
2479                                          int index)
2480 {
2481         acpi_handle parent_handle = NULL;
2482         struct list_head resource_list;
2483         struct acpi_spi_lookup lookup = {};
2484         struct spi_device *spi;
2485         int ret;
2486
2487         if (!ctlr && index == -1)
2488                 return ERR_PTR(-EINVAL);
2489
2490         lookup.ctlr             = ctlr;
2491         lookup.irq              = -1;
2492         lookup.index            = index;
2493         lookup.n                = 0;
2494
2495         INIT_LIST_HEAD(&resource_list);
2496         ret = acpi_dev_get_resources(adev, &resource_list,
2497                                      acpi_spi_add_resource, &lookup);
2498         acpi_dev_free_resource_list(&resource_list);
2499
2500         if (ret < 0)
2501                 /* found SPI in _CRS but it points to another controller */
2502                 return ERR_PTR(-ENODEV);
2503
2504         if (!lookup.max_speed_hz &&
2505             ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2506             ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2507                 /* Apple does not use _CRS but nested devices for SPI slaves */
2508                 acpi_spi_parse_apple_properties(adev, &lookup);
2509         }
2510
2511         if (!lookup.max_speed_hz)
2512                 return ERR_PTR(-ENODEV);
2513
2514         spi = spi_alloc_device(lookup.ctlr);
2515         if (!spi) {
2516                 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2517                         dev_name(&adev->dev));
2518                 return ERR_PTR(-ENOMEM);
2519         }
2520
2521         ACPI_COMPANION_SET(&spi->dev, adev);
2522         spi->max_speed_hz       = lookup.max_speed_hz;
2523         spi->mode               |= lookup.mode;
2524         spi->irq                = lookup.irq;
2525         spi->bits_per_word      = lookup.bits_per_word;
2526         spi->chip_select        = lookup.chip_select;
2527
2528         return spi;
2529 }
2530 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2531
2532 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2533                                             struct acpi_device *adev)
2534 {
2535         struct spi_device *spi;
2536
2537         if (acpi_bus_get_status(adev) || !adev->status.present ||
2538             acpi_device_enumerated(adev))
2539                 return AE_OK;
2540
2541         spi = acpi_spi_device_alloc(ctlr, adev, -1);
2542         if (IS_ERR(spi)) {
2543                 if (PTR_ERR(spi) == -ENOMEM)
2544                         return AE_NO_MEMORY;
2545                 else
2546                         return AE_OK;
2547         }
2548
2549         acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2550                           sizeof(spi->modalias));
2551
2552         if (spi->irq < 0)
2553                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2554
2555         acpi_device_set_enumerated(adev);
2556
2557         adev->power.flags.ignore_parent = true;
2558         if (spi_add_device(spi)) {
2559                 adev->power.flags.ignore_parent = false;
2560                 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2561                         dev_name(&adev->dev));
2562                 spi_dev_put(spi);
2563         }
2564
2565         return AE_OK;
2566 }
2567
2568 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2569                                        void *data, void **return_value)
2570 {
2571         struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2572         struct spi_controller *ctlr = data;
2573
2574         if (!adev)
2575                 return AE_OK;
2576
2577         return acpi_register_spi_device(ctlr, adev);
2578 }
2579
2580 #define SPI_ACPI_ENUMERATE_MAX_DEPTH            32
2581
2582 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2583 {
2584         acpi_status status;
2585         acpi_handle handle;
2586
2587         handle = ACPI_HANDLE(ctlr->dev.parent);
2588         if (!handle)
2589                 return;
2590
2591         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2592                                      SPI_ACPI_ENUMERATE_MAX_DEPTH,
2593                                      acpi_spi_add_device, NULL, ctlr, NULL);
2594         if (ACPI_FAILURE(status))
2595                 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2596 }
2597 #else
2598 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2599 #endif /* CONFIG_ACPI */
2600
2601 static void spi_controller_release(struct device *dev)
2602 {
2603         struct spi_controller *ctlr;
2604
2605         ctlr = container_of(dev, struct spi_controller, dev);
2606         kfree(ctlr);
2607 }
2608
2609 static struct class spi_master_class = {
2610         .name           = "spi_master",
2611         .owner          = THIS_MODULE,
2612         .dev_release    = spi_controller_release,
2613         .dev_groups     = spi_master_groups,
2614 };
2615
2616 #ifdef CONFIG_SPI_SLAVE
2617 /**
2618  * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2619  *                   controller
2620  * @spi: device used for the current transfer
2621  */
2622 int spi_slave_abort(struct spi_device *spi)
2623 {
2624         struct spi_controller *ctlr = spi->controller;
2625
2626         if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2627                 return ctlr->slave_abort(ctlr);
2628
2629         return -ENOTSUPP;
2630 }
2631 EXPORT_SYMBOL_GPL(spi_slave_abort);
2632
2633 static int match_true(struct device *dev, void *data)
2634 {
2635         return 1;
2636 }
2637
2638 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2639                           char *buf)
2640 {
2641         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2642                                                    dev);
2643         struct device *child;
2644
2645         child = device_find_child(&ctlr->dev, NULL, match_true);
2646         return sprintf(buf, "%s\n",
2647                        child ? to_spi_device(child)->modalias : NULL);
2648 }
2649
2650 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2651                            const char *buf, size_t count)
2652 {
2653         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2654                                                    dev);
2655         struct spi_device *spi;
2656         struct device *child;
2657         char name[32];
2658         int rc;
2659
2660         rc = sscanf(buf, "%31s", name);
2661         if (rc != 1 || !name[0])
2662                 return -EINVAL;
2663
2664         child = device_find_child(&ctlr->dev, NULL, match_true);
2665         if (child) {
2666                 /* Remove registered slave */
2667                 device_unregister(child);
2668                 put_device(child);
2669         }
2670
2671         if (strcmp(name, "(null)")) {
2672                 /* Register new slave */
2673                 spi = spi_alloc_device(ctlr);
2674                 if (!spi)
2675                         return -ENOMEM;
2676
2677                 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2678
2679                 rc = spi_add_device(spi);
2680                 if (rc) {
2681                         spi_dev_put(spi);
2682                         return rc;
2683                 }
2684         }
2685
2686         return count;
2687 }
2688
2689 static DEVICE_ATTR_RW(slave);
2690
2691 static struct attribute *spi_slave_attrs[] = {
2692         &dev_attr_slave.attr,
2693         NULL,
2694 };
2695
2696 static const struct attribute_group spi_slave_group = {
2697         .attrs = spi_slave_attrs,
2698 };
2699
2700 static const struct attribute_group *spi_slave_groups[] = {
2701         &spi_controller_statistics_group,
2702         &spi_slave_group,
2703         NULL,
2704 };
2705
2706 static struct class spi_slave_class = {
2707         .name           = "spi_slave",
2708         .owner          = THIS_MODULE,
2709         .dev_release    = spi_controller_release,
2710         .dev_groups     = spi_slave_groups,
2711 };
2712 #else
2713 extern struct class spi_slave_class;    /* dummy */
2714 #endif
2715
2716 /**
2717  * __spi_alloc_controller - allocate an SPI master or slave controller
2718  * @dev: the controller, possibly using the platform_bus
2719  * @size: how much zeroed driver-private data to allocate; the pointer to this
2720  *      memory is in the driver_data field of the returned device, accessible
2721  *      with spi_controller_get_devdata(); the memory is cacheline aligned;
2722  *      drivers granting DMA access to portions of their private data need to
2723  *      round up @size using ALIGN(size, dma_get_cache_alignment()).
2724  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2725  *      slave (true) controller
2726  * Context: can sleep
2727  *
2728  * This call is used only by SPI controller drivers, which are the
2729  * only ones directly touching chip registers.  It's how they allocate
2730  * an spi_controller structure, prior to calling spi_register_controller().
2731  *
2732  * This must be called from context that can sleep.
2733  *
2734  * The caller is responsible for assigning the bus number and initializing the
2735  * controller's methods before calling spi_register_controller(); and (after
2736  * errors adding the device) calling spi_controller_put() to prevent a memory
2737  * leak.
2738  *
2739  * Return: the SPI controller structure on success, else NULL.
2740  */
2741 struct spi_controller *__spi_alloc_controller(struct device *dev,
2742                                               unsigned int size, bool slave)
2743 {
2744         struct spi_controller   *ctlr;
2745         size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2746
2747         if (!dev)
2748                 return NULL;
2749
2750         ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2751         if (!ctlr)
2752                 return NULL;
2753
2754         device_initialize(&ctlr->dev);
2755         INIT_LIST_HEAD(&ctlr->queue);
2756         spin_lock_init(&ctlr->queue_lock);
2757         spin_lock_init(&ctlr->bus_lock_spinlock);
2758         mutex_init(&ctlr->bus_lock_mutex);
2759         mutex_init(&ctlr->io_mutex);
2760         mutex_init(&ctlr->add_lock);
2761         ctlr->bus_num = -1;
2762         ctlr->num_chipselect = 1;
2763         ctlr->slave = slave;
2764         if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2765                 ctlr->dev.class = &spi_slave_class;
2766         else
2767                 ctlr->dev.class = &spi_master_class;
2768         ctlr->dev.parent = dev;
2769         pm_suspend_ignore_children(&ctlr->dev, true);
2770         spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2771
2772         return ctlr;
2773 }
2774 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2775
2776 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2777 {
2778         spi_controller_put(*(struct spi_controller **)ctlr);
2779 }
2780
2781 /**
2782  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2783  * @dev: physical device of SPI controller
2784  * @size: how much zeroed driver-private data to allocate
2785  * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2786  * Context: can sleep
2787  *
2788  * Allocate an SPI controller and automatically release a reference on it
2789  * when @dev is unbound from its driver.  Drivers are thus relieved from
2790  * having to call spi_controller_put().
2791  *
2792  * The arguments to this function are identical to __spi_alloc_controller().
2793  *
2794  * Return: the SPI controller structure on success, else NULL.
2795  */
2796 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2797                                                    unsigned int size,
2798                                                    bool slave)
2799 {
2800         struct spi_controller **ptr, *ctlr;
2801
2802         ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2803                            GFP_KERNEL);
2804         if (!ptr)
2805                 return NULL;
2806
2807         ctlr = __spi_alloc_controller(dev, size, slave);
2808         if (ctlr) {
2809                 ctlr->devm_allocated = true;
2810                 *ptr = ctlr;
2811                 devres_add(dev, ptr);
2812         } else {
2813                 devres_free(ptr);
2814         }
2815
2816         return ctlr;
2817 }
2818 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2819
2820 /**
2821  * spi_get_gpio_descs() - grab chip select GPIOs for the master
2822  * @ctlr: The SPI master to grab GPIO descriptors for
2823  */
2824 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2825 {
2826         int nb, i;
2827         struct gpio_desc **cs;
2828         struct device *dev = &ctlr->dev;
2829         unsigned long native_cs_mask = 0;
2830         unsigned int num_cs_gpios = 0;
2831
2832         nb = gpiod_count(dev, "cs");
2833         if (nb < 0) {
2834                 /* No GPIOs at all is fine, else return the error */
2835                 if (nb == -ENOENT)
2836                         return 0;
2837                 return nb;
2838         }
2839
2840         ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2841
2842         cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2843                           GFP_KERNEL);
2844         if (!cs)
2845                 return -ENOMEM;
2846         ctlr->cs_gpiods = cs;
2847
2848         for (i = 0; i < nb; i++) {
2849                 /*
2850                  * Most chipselects are active low, the inverted
2851                  * semantics are handled by special quirks in gpiolib,
2852                  * so initializing them GPIOD_OUT_LOW here means
2853                  * "unasserted", in most cases this will drive the physical
2854                  * line high.
2855                  */
2856                 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2857                                                       GPIOD_OUT_LOW);
2858                 if (IS_ERR(cs[i]))
2859                         return PTR_ERR(cs[i]);
2860
2861                 if (cs[i]) {
2862                         /*
2863                          * If we find a CS GPIO, name it after the device and
2864                          * chip select line.
2865                          */
2866                         char *gpioname;
2867
2868                         gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2869                                                   dev_name(dev), i);
2870                         if (!gpioname)
2871                                 return -ENOMEM;
2872                         gpiod_set_consumer_name(cs[i], gpioname);
2873                         num_cs_gpios++;
2874                         continue;
2875                 }
2876
2877                 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2878                         dev_err(dev, "Invalid native chip select %d\n", i);
2879                         return -EINVAL;
2880                 }
2881                 native_cs_mask |= BIT(i);
2882         }
2883
2884         ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
2885
2886         if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
2887             ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
2888                 dev_err(dev, "No unused native chip select available\n");
2889                 return -EINVAL;
2890         }
2891
2892         return 0;
2893 }
2894
2895 static int spi_controller_check_ops(struct spi_controller *ctlr)
2896 {
2897         /*
2898          * The controller may implement only the high-level SPI-memory like
2899          * operations if it does not support regular SPI transfers, and this is
2900          * valid use case.
2901          * If ->mem_ops is NULL, we request that at least one of the
2902          * ->transfer_xxx() method be implemented.
2903          */
2904         if (ctlr->mem_ops) {
2905                 if (!ctlr->mem_ops->exec_op)
2906                         return -EINVAL;
2907         } else if (!ctlr->transfer && !ctlr->transfer_one &&
2908                    !ctlr->transfer_one_message) {
2909                 return -EINVAL;
2910         }
2911
2912         return 0;
2913 }
2914
2915 /**
2916  * spi_register_controller - register SPI master or slave controller
2917  * @ctlr: initialized master, originally from spi_alloc_master() or
2918  *      spi_alloc_slave()
2919  * Context: can sleep
2920  *
2921  * SPI controllers connect to their drivers using some non-SPI bus,
2922  * such as the platform bus.  The final stage of probe() in that code
2923  * includes calling spi_register_controller() to hook up to this SPI bus glue.
2924  *
2925  * SPI controllers use board specific (often SOC specific) bus numbers,
2926  * and board-specific addressing for SPI devices combines those numbers
2927  * with chip select numbers.  Since SPI does not directly support dynamic
2928  * device identification, boards need configuration tables telling which
2929  * chip is at which address.
2930  *
2931  * This must be called from context that can sleep.  It returns zero on
2932  * success, else a negative error code (dropping the controller's refcount).
2933  * After a successful return, the caller is responsible for calling
2934  * spi_unregister_controller().
2935  *
2936  * Return: zero on success, else a negative error code.
2937  */
2938 int spi_register_controller(struct spi_controller *ctlr)
2939 {
2940         struct device           *dev = ctlr->dev.parent;
2941         struct boardinfo        *bi;
2942         int                     status;
2943         int                     id, first_dynamic;
2944
2945         if (!dev)
2946                 return -ENODEV;
2947
2948         /*
2949          * Make sure all necessary hooks are implemented before registering
2950          * the SPI controller.
2951          */
2952         status = spi_controller_check_ops(ctlr);
2953         if (status)
2954                 return status;
2955
2956         if (ctlr->bus_num >= 0) {
2957                 /* devices with a fixed bus num must check-in with the num */
2958                 mutex_lock(&board_lock);
2959                 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2960                         ctlr->bus_num + 1, GFP_KERNEL);
2961                 mutex_unlock(&board_lock);
2962                 if (WARN(id < 0, "couldn't get idr"))
2963                         return id == -ENOSPC ? -EBUSY : id;
2964                 ctlr->bus_num = id;
2965         } else if (ctlr->dev.of_node) {
2966                 /* allocate dynamic bus number using Linux idr */
2967                 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2968                 if (id >= 0) {
2969                         ctlr->bus_num = id;
2970                         mutex_lock(&board_lock);
2971                         id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2972                                        ctlr->bus_num + 1, GFP_KERNEL);
2973                         mutex_unlock(&board_lock);
2974                         if (WARN(id < 0, "couldn't get idr"))
2975                                 return id == -ENOSPC ? -EBUSY : id;
2976                 }
2977         }
2978         if (ctlr->bus_num < 0) {
2979                 first_dynamic = of_alias_get_highest_id("spi");
2980                 if (first_dynamic < 0)
2981                         first_dynamic = 0;
2982                 else
2983                         first_dynamic++;
2984
2985                 mutex_lock(&board_lock);
2986                 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2987                                0, GFP_KERNEL);
2988                 mutex_unlock(&board_lock);
2989                 if (WARN(id < 0, "couldn't get idr"))
2990                         return id;
2991                 ctlr->bus_num = id;
2992         }
2993         ctlr->bus_lock_flag = 0;
2994         init_completion(&ctlr->xfer_completion);
2995         if (!ctlr->max_dma_len)
2996                 ctlr->max_dma_len = INT_MAX;
2997
2998         /*
2999          * Register the device, then userspace will see it.
3000          * Registration fails if the bus ID is in use.
3001          */
3002         dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3003
3004         if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3005                 status = spi_get_gpio_descs(ctlr);
3006                 if (status)
3007                         goto free_bus_id;
3008                 /*
3009                  * A controller using GPIO descriptors always
3010                  * supports SPI_CS_HIGH if need be.
3011                  */
3012                 ctlr->mode_bits |= SPI_CS_HIGH;
3013         }
3014
3015         /*
3016          * Even if it's just one always-selected device, there must
3017          * be at least one chipselect.
3018          */
3019         if (!ctlr->num_chipselect) {
3020                 status = -EINVAL;
3021                 goto free_bus_id;
3022         }
3023
3024         /* setting last_cs to -1 means no chip selected */
3025         ctlr->last_cs = -1;
3026
3027         status = device_add(&ctlr->dev);
3028         if (status < 0)
3029                 goto free_bus_id;
3030         dev_dbg(dev, "registered %s %s\n",
3031                         spi_controller_is_slave(ctlr) ? "slave" : "master",
3032                         dev_name(&ctlr->dev));
3033
3034         /*
3035          * If we're using a queued driver, start the queue. Note that we don't
3036          * need the queueing logic if the driver is only supporting high-level
3037          * memory operations.
3038          */
3039         if (ctlr->transfer) {
3040                 dev_info(dev, "controller is unqueued, this is deprecated\n");
3041         } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3042                 status = spi_controller_initialize_queue(ctlr);
3043                 if (status) {
3044                         device_del(&ctlr->dev);
3045                         goto free_bus_id;
3046                 }
3047         }
3048         /* add statistics */
3049         spin_lock_init(&ctlr->statistics.lock);
3050
3051         mutex_lock(&board_lock);
3052         list_add_tail(&ctlr->list, &spi_controller_list);
3053         list_for_each_entry(bi, &board_list, list)
3054                 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3055         mutex_unlock(&board_lock);
3056
3057         /* Register devices from the device tree and ACPI */
3058         of_register_spi_devices(ctlr);
3059         acpi_register_spi_devices(ctlr);
3060         return status;
3061
3062 free_bus_id:
3063         mutex_lock(&board_lock);
3064         idr_remove(&spi_master_idr, ctlr->bus_num);
3065         mutex_unlock(&board_lock);
3066         return status;
3067 }
3068 EXPORT_SYMBOL_GPL(spi_register_controller);
3069
3070 static void devm_spi_unregister(void *ctlr)
3071 {
3072         spi_unregister_controller(ctlr);
3073 }
3074
3075 /**
3076  * devm_spi_register_controller - register managed SPI master or slave
3077  *      controller
3078  * @dev:    device managing SPI controller
3079  * @ctlr: initialized controller, originally from spi_alloc_master() or
3080  *      spi_alloc_slave()
3081  * Context: can sleep
3082  *
3083  * Register a SPI device as with spi_register_controller() which will
3084  * automatically be unregistered and freed.
3085  *
3086  * Return: zero on success, else a negative error code.
3087  */
3088 int devm_spi_register_controller(struct device *dev,
3089                                  struct spi_controller *ctlr)
3090 {
3091         int ret;
3092
3093         ret = spi_register_controller(ctlr);
3094         if (ret)
3095                 return ret;
3096
3097         return devm_add_action_or_reset(dev, devm_spi_unregister, ctlr);
3098 }
3099 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3100
3101 static int __unregister(struct device *dev, void *null)
3102 {
3103         spi_unregister_device(to_spi_device(dev));
3104         return 0;
3105 }
3106
3107 /**
3108  * spi_unregister_controller - unregister SPI master or slave controller
3109  * @ctlr: the controller being unregistered
3110  * Context: can sleep
3111  *
3112  * This call is used only by SPI controller drivers, which are the
3113  * only ones directly touching chip registers.
3114  *
3115  * This must be called from context that can sleep.
3116  *
3117  * Note that this function also drops a reference to the controller.
3118  */
3119 void spi_unregister_controller(struct spi_controller *ctlr)
3120 {
3121         struct spi_controller *found;
3122         int id = ctlr->bus_num;
3123
3124         /* Prevent addition of new devices, unregister existing ones */
3125         if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3126                 mutex_lock(&ctlr->add_lock);
3127
3128         device_for_each_child(&ctlr->dev, NULL, __unregister);
3129
3130         /* First make sure that this controller was ever added */
3131         mutex_lock(&board_lock);
3132         found = idr_find(&spi_master_idr, id);
3133         mutex_unlock(&board_lock);
3134         if (ctlr->queued) {
3135                 if (spi_destroy_queue(ctlr))
3136                         dev_err(&ctlr->dev, "queue remove failed\n");
3137         }
3138         mutex_lock(&board_lock);
3139         list_del(&ctlr->list);
3140         mutex_unlock(&board_lock);
3141
3142         device_del(&ctlr->dev);
3143
3144         /* free bus id */
3145         mutex_lock(&board_lock);
3146         if (found == ctlr)
3147                 idr_remove(&spi_master_idr, id);
3148         mutex_unlock(&board_lock);
3149
3150         if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3151                 mutex_unlock(&ctlr->add_lock);
3152
3153         /* Release the last reference on the controller if its driver
3154          * has not yet been converted to devm_spi_alloc_master/slave().
3155          */
3156         if (!ctlr->devm_allocated)
3157                 put_device(&ctlr->dev);
3158 }
3159 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3160
3161 int spi_controller_suspend(struct spi_controller *ctlr)
3162 {
3163         int ret;
3164
3165         /* Basically no-ops for non-queued controllers */
3166         if (!ctlr->queued)
3167                 return 0;
3168
3169         ret = spi_stop_queue(ctlr);
3170         if (ret)
3171                 dev_err(&ctlr->dev, "queue stop failed\n");
3172
3173         return ret;
3174 }
3175 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3176
3177 int spi_controller_resume(struct spi_controller *ctlr)
3178 {
3179         int ret;
3180
3181         if (!ctlr->queued)
3182                 return 0;
3183
3184         ret = spi_start_queue(ctlr);
3185         if (ret)
3186                 dev_err(&ctlr->dev, "queue restart failed\n");
3187
3188         return ret;
3189 }
3190 EXPORT_SYMBOL_GPL(spi_controller_resume);
3191
3192 /*-------------------------------------------------------------------------*/
3193
3194 /* Core methods for spi_message alterations */
3195
3196 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3197                                             struct spi_message *msg,
3198                                             void *res)
3199 {
3200         struct spi_replaced_transfers *rxfer = res;
3201         size_t i;
3202
3203         /* call extra callback if requested */
3204         if (rxfer->release)
3205                 rxfer->release(ctlr, msg, res);
3206
3207         /* insert replaced transfers back into the message */
3208         list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3209
3210         /* remove the formerly inserted entries */
3211         for (i = 0; i < rxfer->inserted; i++)
3212                 list_del(&rxfer->inserted_transfers[i].transfer_list);
3213 }
3214
3215 /**
3216  * spi_replace_transfers - replace transfers with several transfers
3217  *                         and register change with spi_message.resources
3218  * @msg:           the spi_message we work upon
3219  * @xfer_first:    the first spi_transfer we want to replace
3220  * @remove:        number of transfers to remove
3221  * @insert:        the number of transfers we want to insert instead
3222  * @release:       extra release code necessary in some circumstances
3223  * @extradatasize: extra data to allocate (with alignment guarantees
3224  *                 of struct @spi_transfer)
3225  * @gfp:           gfp flags
3226  *
3227  * Returns: pointer to @spi_replaced_transfers,
3228  *          PTR_ERR(...) in case of errors.
3229  */
3230 static struct spi_replaced_transfers *spi_replace_transfers(
3231         struct spi_message *msg,
3232         struct spi_transfer *xfer_first,
3233         size_t remove,
3234         size_t insert,
3235         spi_replaced_release_t release,
3236         size_t extradatasize,
3237         gfp_t gfp)
3238 {
3239         struct spi_replaced_transfers *rxfer;
3240         struct spi_transfer *xfer;
3241         size_t i;
3242
3243         /* allocate the structure using spi_res */
3244         rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3245                               struct_size(rxfer, inserted_transfers, insert)
3246                               + extradatasize,
3247                               gfp);
3248         if (!rxfer)
3249                 return ERR_PTR(-ENOMEM);
3250
3251         /* the release code to invoke before running the generic release */
3252         rxfer->release = release;
3253
3254         /* assign extradata */
3255         if (extradatasize)
3256                 rxfer->extradata =
3257                         &rxfer->inserted_transfers[insert];
3258
3259         /* init the replaced_transfers list */
3260         INIT_LIST_HEAD(&rxfer->replaced_transfers);
3261
3262         /*
3263          * Assign the list_entry after which we should reinsert
3264          * the @replaced_transfers - it may be spi_message.messages!
3265          */
3266         rxfer->replaced_after = xfer_first->transfer_list.prev;
3267
3268         /* remove the requested number of transfers */
3269         for (i = 0; i < remove; i++) {
3270                 /*
3271                  * If the entry after replaced_after it is msg->transfers
3272                  * then we have been requested to remove more transfers
3273                  * than are in the list.
3274                  */
3275                 if (rxfer->replaced_after->next == &msg->transfers) {
3276                         dev_err(&msg->spi->dev,
3277                                 "requested to remove more spi_transfers than are available\n");
3278                         /* insert replaced transfers back into the message */
3279                         list_splice(&rxfer->replaced_transfers,
3280                                     rxfer->replaced_after);
3281
3282                         /* free the spi_replace_transfer structure */
3283                         spi_res_free(rxfer);
3284
3285                         /* and return with an error */
3286                         return ERR_PTR(-EINVAL);
3287                 }
3288
3289                 /*
3290                  * Remove the entry after replaced_after from list of
3291                  * transfers and add it to list of replaced_transfers.
3292                  */
3293                 list_move_tail(rxfer->replaced_after->next,
3294                                &rxfer->replaced_transfers);
3295         }
3296
3297         /*
3298          * Create copy of the given xfer with identical settings
3299          * based on the first transfer to get removed.
3300          */
3301         for (i = 0; i < insert; i++) {
3302                 /* we need to run in reverse order */
3303                 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3304
3305                 /* copy all spi_transfer data */
3306                 memcpy(xfer, xfer_first, sizeof(*xfer));
3307
3308                 /* add to list */
3309                 list_add(&xfer->transfer_list, rxfer->replaced_after);
3310
3311                 /* clear cs_change and delay for all but the last */
3312                 if (i) {
3313                         xfer->cs_change = false;
3314                         xfer->delay.value = 0;
3315                 }
3316         }
3317
3318         /* set up inserted */
3319         rxfer->inserted = insert;
3320
3321         /* and register it with spi_res/spi_message */
3322         spi_res_add(msg, rxfer);
3323
3324         return rxfer;
3325 }
3326
3327 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3328                                         struct spi_message *msg,
3329                                         struct spi_transfer **xferp,
3330                                         size_t maxsize,
3331                                         gfp_t gfp)
3332 {
3333         struct spi_transfer *xfer = *xferp, *xfers;
3334         struct spi_replaced_transfers *srt;
3335         size_t offset;
3336         size_t count, i;
3337
3338         /* calculate how many we have to replace */
3339         count = DIV_ROUND_UP(xfer->len, maxsize);
3340
3341         /* create replacement */
3342         srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3343         if (IS_ERR(srt))
3344                 return PTR_ERR(srt);
3345         xfers = srt->inserted_transfers;
3346
3347         /*
3348          * Now handle each of those newly inserted spi_transfers.
3349          * Note that the replacements spi_transfers all are preset
3350          * to the same values as *xferp, so tx_buf, rx_buf and len
3351          * are all identical (as well as most others)
3352          * so we just have to fix up len and the pointers.
3353          *
3354          * This also includes support for the depreciated
3355          * spi_message.is_dma_mapped interface.
3356          */
3357
3358         /*
3359          * The first transfer just needs the length modified, so we
3360          * run it outside the loop.
3361          */
3362         xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3363
3364         /* all the others need rx_buf/tx_buf also set */
3365         for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3366                 /* update rx_buf, tx_buf and dma */
3367                 if (xfers[i].rx_buf)
3368                         xfers[i].rx_buf += offset;
3369                 if (xfers[i].rx_dma)
3370                         xfers[i].rx_dma += offset;
3371                 if (xfers[i].tx_buf)
3372                         xfers[i].tx_buf += offset;
3373                 if (xfers[i].tx_dma)
3374                         xfers[i].tx_dma += offset;
3375
3376                 /* update length */
3377                 xfers[i].len = min(maxsize, xfers[i].len - offset);
3378         }
3379
3380         /*
3381          * We set up xferp to the last entry we have inserted,
3382          * so that we skip those already split transfers.
3383          */
3384         *xferp = &xfers[count - 1];
3385
3386         /* increment statistics counters */
3387         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3388                                        transfers_split_maxsize);
3389         SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
3390                                        transfers_split_maxsize);
3391
3392         return 0;
3393 }
3394
3395 /**
3396  * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3397  *                               when an individual transfer exceeds a
3398  *                               certain size
3399  * @ctlr:    the @spi_controller for this transfer
3400  * @msg:   the @spi_message to transform
3401  * @maxsize:  the maximum when to apply this
3402  * @gfp: GFP allocation flags
3403  *
3404  * Return: status of transformation
3405  */
3406 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3407                                 struct spi_message *msg,
3408                                 size_t maxsize,
3409                                 gfp_t gfp)
3410 {
3411         struct spi_transfer *xfer;
3412         int ret;
3413
3414         /*
3415          * Iterate over the transfer_list,
3416          * but note that xfer is advanced to the last transfer inserted
3417          * to avoid checking sizes again unnecessarily (also xfer does
3418          * potentially belong to a different list by the time the
3419          * replacement has happened).
3420          */
3421         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3422                 if (xfer->len > maxsize) {
3423                         ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3424                                                            maxsize, gfp);
3425                         if (ret)
3426                                 return ret;
3427                 }
3428         }
3429
3430         return 0;
3431 }
3432 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3433
3434 /*-------------------------------------------------------------------------*/
3435
3436 /* Core methods for SPI controller protocol drivers.  Some of the
3437  * other core methods are currently defined as inline functions.
3438  */
3439
3440 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3441                                         u8 bits_per_word)
3442 {
3443         if (ctlr->bits_per_word_mask) {
3444                 /* Only 32 bits fit in the mask */
3445                 if (bits_per_word > 32)
3446                         return -EINVAL;
3447                 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3448                         return -EINVAL;
3449         }
3450
3451         return 0;
3452 }
3453
3454 /**
3455  * spi_setup - setup SPI mode and clock rate
3456  * @spi: the device whose settings are being modified
3457  * Context: can sleep, and no requests are queued to the device
3458  *
3459  * SPI protocol drivers may need to update the transfer mode if the
3460  * device doesn't work with its default.  They may likewise need
3461  * to update clock rates or word sizes from initial values.  This function
3462  * changes those settings, and must be called from a context that can sleep.
3463  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3464  * effect the next time the device is selected and data is transferred to
3465  * or from it.  When this function returns, the spi device is deselected.
3466  *
3467  * Note that this call will fail if the protocol driver specifies an option
3468  * that the underlying controller or its driver does not support.  For
3469  * example, not all hardware supports wire transfers using nine bit words,
3470  * LSB-first wire encoding, or active-high chipselects.
3471  *
3472  * Return: zero on success, else a negative error code.
3473  */
3474 int spi_setup(struct spi_device *spi)
3475 {
3476         unsigned        bad_bits, ugly_bits;
3477         int             status = 0;
3478
3479         /*
3480          * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3481          * are set at the same time.
3482          */
3483         if ((hweight_long(spi->mode &
3484                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3485             (hweight_long(spi->mode &
3486                 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3487                 dev_err(&spi->dev,
3488                 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3489                 return -EINVAL;
3490         }
3491         /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3492         if ((spi->mode & SPI_3WIRE) && (spi->mode &
3493                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3494                  SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3495                 return -EINVAL;
3496         /*
3497          * Help drivers fail *cleanly* when they need options
3498          * that aren't supported with their current controller.
3499          * SPI_CS_WORD has a fallback software implementation,
3500          * so it is ignored here.
3501          */
3502         bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3503                                  SPI_NO_TX | SPI_NO_RX);
3504         ugly_bits = bad_bits &
3505                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3506                      SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3507         if (ugly_bits) {
3508                 dev_warn(&spi->dev,
3509                          "setup: ignoring unsupported mode bits %x\n",
3510                          ugly_bits);
3511                 spi->mode &= ~ugly_bits;
3512                 bad_bits &= ~ugly_bits;
3513         }
3514         if (bad_bits) {
3515                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3516                         bad_bits);
3517                 return -EINVAL;
3518         }
3519
3520         if (!spi->bits_per_word) {
3521                 spi->bits_per_word = 8;
3522         } else {
3523                 /*
3524                  * Some controllers may not support the default 8 bits-per-word
3525                  * so only perform the check when this is explicitly provided.
3526                  */
3527                 status = __spi_validate_bits_per_word(spi->controller,
3528                                                       spi->bits_per_word);
3529                 if (status)
3530                         return status;
3531         }
3532
3533         if (spi->controller->max_speed_hz &&
3534             (!spi->max_speed_hz ||
3535              spi->max_speed_hz > spi->controller->max_speed_hz))
3536                 spi->max_speed_hz = spi->controller->max_speed_hz;
3537
3538         mutex_lock(&spi->controller->io_mutex);
3539
3540         if (spi->controller->setup) {
3541                 status = spi->controller->setup(spi);
3542                 if (status) {
3543                         mutex_unlock(&spi->controller->io_mutex);
3544                         dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3545                                 status);
3546                         return status;
3547                 }
3548         }
3549
3550         if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3551                 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3552                 if (status < 0) {
3553                         mutex_unlock(&spi->controller->io_mutex);
3554                         dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3555                                 status);
3556                         return status;
3557                 }
3558
3559                 /*
3560                  * We do not want to return positive value from pm_runtime_get,
3561                  * there are many instances of devices calling spi_setup() and
3562                  * checking for a non-zero return value instead of a negative
3563                  * return value.
3564                  */
3565                 status = 0;
3566
3567                 spi_set_cs(spi, false, true);
3568                 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3569                 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3570         } else {
3571                 spi_set_cs(spi, false, true);
3572         }
3573
3574         mutex_unlock(&spi->controller->io_mutex);
3575
3576         if (spi->rt && !spi->controller->rt) {
3577                 spi->controller->rt = true;
3578                 spi_set_thread_rt(spi->controller);
3579         }
3580
3581         trace_spi_setup(spi, status);
3582
3583         dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3584                         spi->mode & SPI_MODE_X_MASK,
3585                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3586                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3587                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3588                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
3589                         spi->bits_per_word, spi->max_speed_hz,
3590                         status);
3591
3592         return status;
3593 }
3594 EXPORT_SYMBOL_GPL(spi_setup);
3595
3596 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3597                                        struct spi_device *spi)
3598 {
3599         int delay1, delay2;
3600
3601         delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3602         if (delay1 < 0)
3603                 return delay1;
3604
3605         delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3606         if (delay2 < 0)
3607                 return delay2;
3608
3609         if (delay1 < delay2)
3610                 memcpy(&xfer->word_delay, &spi->word_delay,
3611                        sizeof(xfer->word_delay));
3612
3613         return 0;
3614 }
3615
3616 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3617 {
3618         struct spi_controller *ctlr = spi->controller;
3619         struct spi_transfer *xfer;
3620         int w_size;
3621
3622         if (list_empty(&message->transfers))
3623                 return -EINVAL;
3624
3625         /*
3626          * If an SPI controller does not support toggling the CS line on each
3627          * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3628          * for the CS line, we can emulate the CS-per-word hardware function by
3629          * splitting transfers into one-word transfers and ensuring that
3630          * cs_change is set for each transfer.
3631          */
3632         if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3633                                           spi->cs_gpiod)) {
3634                 size_t maxsize;
3635                 int ret;
3636
3637                 maxsize = (spi->bits_per_word + 7) / 8;
3638
3639                 /* spi_split_transfers_maxsize() requires message->spi */
3640                 message->spi = spi;
3641
3642                 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3643                                                   GFP_KERNEL);
3644                 if (ret)
3645                         return ret;
3646
3647                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3648                         /* don't change cs_change on the last entry in the list */
3649                         if (list_is_last(&xfer->transfer_list, &message->transfers))
3650                                 break;
3651                         xfer->cs_change = 1;
3652                 }
3653         }
3654
3655         /*
3656          * Half-duplex links include original MicroWire, and ones with
3657          * only one data pin like SPI_3WIRE (switches direction) or where
3658          * either MOSI or MISO is missing.  They can also be caused by
3659          * software limitations.
3660          */
3661         if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3662             (spi->mode & SPI_3WIRE)) {
3663                 unsigned flags = ctlr->flags;
3664
3665                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3666                         if (xfer->rx_buf && xfer->tx_buf)
3667                                 return -EINVAL;
3668                         if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3669                                 return -EINVAL;
3670                         if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3671                                 return -EINVAL;
3672                 }
3673         }
3674
3675         /*
3676          * Set transfer bits_per_word and max speed as spi device default if
3677          * it is not set for this transfer.
3678          * Set transfer tx_nbits and rx_nbits as single transfer default
3679          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3680          * Ensure transfer word_delay is at least as long as that required by
3681          * device itself.
3682          */
3683         message->frame_length = 0;
3684         list_for_each_entry(xfer, &message->transfers, transfer_list) {
3685                 xfer->effective_speed_hz = 0;
3686                 message->frame_length += xfer->len;
3687                 if (!xfer->bits_per_word)
3688                         xfer->bits_per_word = spi->bits_per_word;
3689
3690                 if (!xfer->speed_hz)
3691                         xfer->speed_hz = spi->max_speed_hz;
3692
3693                 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3694                         xfer->speed_hz = ctlr->max_speed_hz;
3695
3696                 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3697                         return -EINVAL;
3698
3699                 /*
3700                  * SPI transfer length should be multiple of SPI word size
3701                  * where SPI word size should be power-of-two multiple.
3702                  */
3703                 if (xfer->bits_per_word <= 8)
3704                         w_size = 1;
3705                 else if (xfer->bits_per_word <= 16)
3706                         w_size = 2;
3707                 else
3708                         w_size = 4;
3709
3710                 /* No partial transfers accepted */
3711                 if (xfer->len % w_size)
3712                         return -EINVAL;
3713
3714                 if (xfer->speed_hz && ctlr->min_speed_hz &&
3715                     xfer->speed_hz < ctlr->min_speed_hz)
3716                         return -EINVAL;
3717
3718                 if (xfer->tx_buf && !xfer->tx_nbits)
3719                         xfer->tx_nbits = SPI_NBITS_SINGLE;
3720                 if (xfer->rx_buf && !xfer->rx_nbits)
3721                         xfer->rx_nbits = SPI_NBITS_SINGLE;
3722                 /*
3723                  * Check transfer tx/rx_nbits:
3724                  * 1. check the value matches one of single, dual and quad
3725                  * 2. check tx/rx_nbits match the mode in spi_device
3726                  */
3727                 if (xfer->tx_buf) {
3728                         if (spi->mode & SPI_NO_TX)
3729                                 return -EINVAL;
3730                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3731                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
3732                                 xfer->tx_nbits != SPI_NBITS_QUAD)
3733                                 return -EINVAL;
3734                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3735                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3736                                 return -EINVAL;
3737                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3738                                 !(spi->mode & SPI_TX_QUAD))
3739                                 return -EINVAL;
3740                 }
3741                 /* check transfer rx_nbits */
3742                 if (xfer->rx_buf) {
3743                         if (spi->mode & SPI_NO_RX)
3744                                 return -EINVAL;
3745                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3746                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
3747                                 xfer->rx_nbits != SPI_NBITS_QUAD)
3748                                 return -EINVAL;
3749                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3750                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3751                                 return -EINVAL;
3752                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3753                                 !(spi->mode & SPI_RX_QUAD))
3754                                 return -EINVAL;
3755                 }
3756
3757                 if (_spi_xfer_word_delay_update(xfer, spi))
3758                         return -EINVAL;
3759         }
3760
3761         message->status = -EINPROGRESS;
3762
3763         return 0;
3764 }
3765
3766 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3767 {
3768         struct spi_controller *ctlr = spi->controller;
3769         struct spi_transfer *xfer;
3770
3771         /*
3772          * Some controllers do not support doing regular SPI transfers. Return
3773          * ENOTSUPP when this is the case.
3774          */
3775         if (!ctlr->transfer)
3776                 return -ENOTSUPP;
3777
3778         message->spi = spi;
3779
3780         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3781         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3782
3783         trace_spi_message_submit(message);
3784
3785         if (!ctlr->ptp_sts_supported) {
3786                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3787                         xfer->ptp_sts_word_pre = 0;
3788                         ptp_read_system_prets(xfer->ptp_sts);
3789                 }
3790         }
3791
3792         return ctlr->transfer(spi, message);
3793 }
3794
3795 /**
3796  * spi_async - asynchronous SPI transfer
3797  * @spi: device with which data will be exchanged
3798  * @message: describes the data transfers, including completion callback
3799  * Context: any (irqs may be blocked, etc)
3800  *
3801  * This call may be used in_irq and other contexts which can't sleep,
3802  * as well as from task contexts which can sleep.
3803  *
3804  * The completion callback is invoked in a context which can't sleep.
3805  * Before that invocation, the value of message->status is undefined.
3806  * When the callback is issued, message->status holds either zero (to
3807  * indicate complete success) or a negative error code.  After that
3808  * callback returns, the driver which issued the transfer request may
3809  * deallocate the associated memory; it's no longer in use by any SPI
3810  * core or controller driver code.
3811  *
3812  * Note that although all messages to a spi_device are handled in
3813  * FIFO order, messages may go to different devices in other orders.
3814  * Some device might be higher priority, or have various "hard" access
3815  * time requirements, for example.
3816  *
3817  * On detection of any fault during the transfer, processing of
3818  * the entire message is aborted, and the device is deselected.
3819  * Until returning from the associated message completion callback,
3820  * no other spi_message queued to that device will be processed.
3821  * (This rule applies equally to all the synchronous transfer calls,
3822  * which are wrappers around this core asynchronous primitive.)
3823  *
3824  * Return: zero on success, else a negative error code.
3825  */
3826 int spi_async(struct spi_device *spi, struct spi_message *message)
3827 {
3828         struct spi_controller *ctlr = spi->controller;
3829         int ret;
3830         unsigned long flags;
3831
3832         ret = __spi_validate(spi, message);
3833         if (ret != 0)
3834                 return ret;
3835
3836         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3837
3838         if (ctlr->bus_lock_flag)
3839                 ret = -EBUSY;
3840         else
3841                 ret = __spi_async(spi, message);
3842
3843         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3844
3845         return ret;
3846 }
3847 EXPORT_SYMBOL_GPL(spi_async);
3848
3849 /**
3850  * spi_async_locked - version of spi_async with exclusive bus usage
3851  * @spi: device with which data will be exchanged
3852  * @message: describes the data transfers, including completion callback
3853  * Context: any (irqs may be blocked, etc)
3854  *
3855  * This call may be used in_irq and other contexts which can't sleep,
3856  * as well as from task contexts which can sleep.
3857  *
3858  * The completion callback is invoked in a context which can't sleep.
3859  * Before that invocation, the value of message->status is undefined.
3860  * When the callback is issued, message->status holds either zero (to
3861  * indicate complete success) or a negative error code.  After that
3862  * callback returns, the driver which issued the transfer request may
3863  * deallocate the associated memory; it's no longer in use by any SPI
3864  * core or controller driver code.
3865  *
3866  * Note that although all messages to a spi_device are handled in
3867  * FIFO order, messages may go to different devices in other orders.
3868  * Some device might be higher priority, or have various "hard" access
3869  * time requirements, for example.
3870  *
3871  * On detection of any fault during the transfer, processing of
3872  * the entire message is aborted, and the device is deselected.
3873  * Until returning from the associated message completion callback,
3874  * no other spi_message queued to that device will be processed.
3875  * (This rule applies equally to all the synchronous transfer calls,
3876  * which are wrappers around this core asynchronous primitive.)
3877  *
3878  * Return: zero on success, else a negative error code.
3879  */
3880 static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3881 {
3882         struct spi_controller *ctlr = spi->controller;
3883         int ret;
3884         unsigned long flags;
3885
3886         ret = __spi_validate(spi, message);
3887         if (ret != 0)
3888                 return ret;
3889
3890         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3891
3892         ret = __spi_async(spi, message);
3893
3894         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3895
3896         return ret;
3897
3898 }
3899
3900 /*-------------------------------------------------------------------------*/
3901
3902 /*
3903  * Utility methods for SPI protocol drivers, layered on
3904  * top of the core.  Some other utility methods are defined as
3905  * inline functions.
3906  */
3907
3908 static void spi_complete(void *arg)
3909 {
3910         complete(arg);
3911 }
3912
3913 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3914 {
3915         DECLARE_COMPLETION_ONSTACK(done);
3916         int status;
3917         struct spi_controller *ctlr = spi->controller;
3918         unsigned long flags;
3919
3920         status = __spi_validate(spi, message);
3921         if (status != 0)
3922                 return status;
3923
3924         message->complete = spi_complete;
3925         message->context = &done;
3926         message->spi = spi;
3927
3928         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3929         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3930
3931         /*
3932          * If we're not using the legacy transfer method then we will
3933          * try to transfer in the calling context so special case.
3934          * This code would be less tricky if we could remove the
3935          * support for driver implemented message queues.
3936          */
3937         if (ctlr->transfer == spi_queued_transfer) {
3938                 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3939
3940                 trace_spi_message_submit(message);
3941
3942                 status = __spi_queued_transfer(spi, message, false);
3943
3944                 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3945         } else {
3946                 status = spi_async_locked(spi, message);
3947         }
3948
3949         if (status == 0) {
3950                 /* Push out the messages in the calling context if we can */
3951                 if (ctlr->transfer == spi_queued_transfer) {
3952                         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3953                                                        spi_sync_immediate);
3954                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3955                                                        spi_sync_immediate);
3956                         __spi_pump_messages(ctlr, false);
3957                 }
3958
3959                 wait_for_completion(&done);
3960                 status = message->status;
3961         }
3962         message->context = NULL;
3963         return status;
3964 }
3965
3966 /**
3967  * spi_sync - blocking/synchronous SPI data transfers
3968  * @spi: device with which data will be exchanged
3969  * @message: describes the data transfers
3970  * Context: can sleep
3971  *
3972  * This call may only be used from a context that may sleep.  The sleep
3973  * is non-interruptible, and has no timeout.  Low-overhead controller
3974  * drivers may DMA directly into and out of the message buffers.
3975  *
3976  * Note that the SPI device's chip select is active during the message,
3977  * and then is normally disabled between messages.  Drivers for some
3978  * frequently-used devices may want to minimize costs of selecting a chip,
3979  * by leaving it selected in anticipation that the next message will go
3980  * to the same chip.  (That may increase power usage.)
3981  *
3982  * Also, the caller is guaranteeing that the memory associated with the
3983  * message will not be freed before this call returns.
3984  *
3985  * Return: zero on success, else a negative error code.
3986  */
3987 int spi_sync(struct spi_device *spi, struct spi_message *message)
3988 {
3989         int ret;
3990
3991         mutex_lock(&spi->controller->bus_lock_mutex);
3992         ret = __spi_sync(spi, message);
3993         mutex_unlock(&spi->controller->bus_lock_mutex);
3994
3995         return ret;
3996 }
3997 EXPORT_SYMBOL_GPL(spi_sync);
3998
3999 /**
4000  * spi_sync_locked - version of spi_sync with exclusive bus usage
4001  * @spi: device with which data will be exchanged
4002  * @message: describes the data transfers
4003  * Context: can sleep
4004  *
4005  * This call may only be used from a context that may sleep.  The sleep
4006  * is non-interruptible, and has no timeout.  Low-overhead controller
4007  * drivers may DMA directly into and out of the message buffers.
4008  *
4009  * This call should be used by drivers that require exclusive access to the
4010  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4011  * be released by a spi_bus_unlock call when the exclusive access is over.
4012  *
4013  * Return: zero on success, else a negative error code.
4014  */
4015 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4016 {
4017         return __spi_sync(spi, message);
4018 }
4019 EXPORT_SYMBOL_GPL(spi_sync_locked);
4020
4021 /**
4022  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4023  * @ctlr: SPI bus master that should be locked for exclusive bus access
4024  * Context: can sleep
4025  *
4026  * This call may only be used from a context that may sleep.  The sleep
4027  * is non-interruptible, and has no timeout.
4028  *
4029  * This call should be used by drivers that require exclusive access to the
4030  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4031  * exclusive access is over. Data transfer must be done by spi_sync_locked
4032  * and spi_async_locked calls when the SPI bus lock is held.
4033  *
4034  * Return: always zero.
4035  */
4036 int spi_bus_lock(struct spi_controller *ctlr)
4037 {
4038         unsigned long flags;
4039
4040         mutex_lock(&ctlr->bus_lock_mutex);
4041
4042         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4043         ctlr->bus_lock_flag = 1;
4044         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4045
4046         /* mutex remains locked until spi_bus_unlock is called */
4047
4048         return 0;
4049 }
4050 EXPORT_SYMBOL_GPL(spi_bus_lock);
4051
4052 /**
4053  * spi_bus_unlock - release the lock for exclusive SPI bus usage
4054  * @ctlr: SPI bus master that was locked for exclusive bus access
4055  * Context: can sleep
4056  *
4057  * This call may only be used from a context that may sleep.  The sleep
4058  * is non-interruptible, and has no timeout.
4059  *
4060  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4061  * call.
4062  *
4063  * Return: always zero.
4064  */
4065 int spi_bus_unlock(struct spi_controller *ctlr)
4066 {
4067         ctlr->bus_lock_flag = 0;
4068
4069         mutex_unlock(&ctlr->bus_lock_mutex);
4070
4071         return 0;
4072 }
4073 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4074
4075 /* portable code must never pass more than 32 bytes */
4076 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
4077
4078 static u8       *buf;
4079
4080 /**
4081  * spi_write_then_read - SPI synchronous write followed by read
4082  * @spi: device with which data will be exchanged
4083  * @txbuf: data to be written (need not be dma-safe)
4084  * @n_tx: size of txbuf, in bytes
4085  * @rxbuf: buffer into which data will be read (need not be dma-safe)
4086  * @n_rx: size of rxbuf, in bytes
4087  * Context: can sleep
4088  *
4089  * This performs a half duplex MicroWire style transaction with the
4090  * device, sending txbuf and then reading rxbuf.  The return value
4091  * is zero for success, else a negative errno status code.
4092  * This call may only be used from a context that may sleep.
4093  *
4094  * Parameters to this routine are always copied using a small buffer.
4095  * Performance-sensitive or bulk transfer code should instead use
4096  * spi_{async,sync}() calls with dma-safe buffers.
4097  *
4098  * Return: zero on success, else a negative error code.
4099  */
4100 int spi_write_then_read(struct spi_device *spi,
4101                 const void *txbuf, unsigned n_tx,
4102                 void *rxbuf, unsigned n_rx)
4103 {
4104         static DEFINE_MUTEX(lock);
4105
4106         int                     status;
4107         struct spi_message      message;
4108         struct spi_transfer     x[2];
4109         u8                      *local_buf;
4110
4111         /*
4112          * Use preallocated DMA-safe buffer if we can. We can't avoid
4113          * copying here, (as a pure convenience thing), but we can
4114          * keep heap costs out of the hot path unless someone else is
4115          * using the pre-allocated buffer or the transfer is too large.
4116          */
4117         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4118                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4119                                     GFP_KERNEL | GFP_DMA);
4120                 if (!local_buf)
4121                         return -ENOMEM;
4122         } else {
4123                 local_buf = buf;
4124         }
4125
4126         spi_message_init(&message);
4127         memset(x, 0, sizeof(x));
4128         if (n_tx) {
4129                 x[0].len = n_tx;
4130                 spi_message_add_tail(&x[0], &message);
4131         }
4132         if (n_rx) {
4133                 x[1].len = n_rx;
4134                 spi_message_add_tail(&x[1], &message);
4135         }
4136
4137         memcpy(local_buf, txbuf, n_tx);
4138         x[0].tx_buf = local_buf;
4139         x[1].rx_buf = local_buf + n_tx;
4140
4141         /* do the i/o */
4142         status = spi_sync(spi, &message);
4143         if (status == 0)
4144                 memcpy(rxbuf, x[1].rx_buf, n_rx);
4145
4146         if (x[0].tx_buf == buf)
4147                 mutex_unlock(&lock);
4148         else
4149                 kfree(local_buf);
4150
4151         return status;
4152 }
4153 EXPORT_SYMBOL_GPL(spi_write_then_read);
4154
4155 /*-------------------------------------------------------------------------*/
4156
4157 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4158 /* must call put_device() when done with returned spi_device device */
4159 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4160 {
4161         struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4162
4163         return dev ? to_spi_device(dev) : NULL;
4164 }
4165
4166 /* the spi controllers are not using spi_bus, so we find it with another way */
4167 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4168 {
4169         struct device *dev;
4170
4171         dev = class_find_device_by_of_node(&spi_master_class, node);
4172         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4173                 dev = class_find_device_by_of_node(&spi_slave_class, node);
4174         if (!dev)
4175                 return NULL;
4176
4177         /* reference got in class_find_device */
4178         return container_of(dev, struct spi_controller, dev);
4179 }
4180
4181 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4182                          void *arg)
4183 {
4184         struct of_reconfig_data *rd = arg;
4185         struct spi_controller *ctlr;
4186         struct spi_device *spi;
4187
4188         switch (of_reconfig_get_state_change(action, arg)) {
4189         case OF_RECONFIG_CHANGE_ADD:
4190                 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4191                 if (ctlr == NULL)
4192                         return NOTIFY_OK;       /* not for us */
4193
4194                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4195                         put_device(&ctlr->dev);
4196                         return NOTIFY_OK;
4197                 }
4198
4199                 spi = of_register_spi_device(ctlr, rd->dn);
4200                 put_device(&ctlr->dev);
4201
4202                 if (IS_ERR(spi)) {
4203                         pr_err("%s: failed to create for '%pOF'\n",
4204                                         __func__, rd->dn);
4205                         of_node_clear_flag(rd->dn, OF_POPULATED);
4206                         return notifier_from_errno(PTR_ERR(spi));
4207                 }
4208                 break;
4209
4210         case OF_RECONFIG_CHANGE_REMOVE:
4211                 /* already depopulated? */
4212                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4213                         return NOTIFY_OK;
4214
4215                 /* find our device by node */
4216                 spi = of_find_spi_device_by_node(rd->dn);
4217                 if (spi == NULL)
4218                         return NOTIFY_OK;       /* no? not meant for us */
4219
4220                 /* unregister takes one ref away */
4221                 spi_unregister_device(spi);
4222
4223                 /* and put the reference of the find */
4224                 put_device(&spi->dev);
4225                 break;
4226         }
4227
4228         return NOTIFY_OK;
4229 }
4230
4231 static struct notifier_block spi_of_notifier = {
4232         .notifier_call = of_spi_notify,
4233 };
4234 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4235 extern struct notifier_block spi_of_notifier;
4236 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4237
4238 #if IS_ENABLED(CONFIG_ACPI)
4239 static int spi_acpi_controller_match(struct device *dev, const void *data)
4240 {
4241         return ACPI_COMPANION(dev->parent) == data;
4242 }
4243
4244 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4245 {
4246         struct device *dev;
4247
4248         dev = class_find_device(&spi_master_class, NULL, adev,
4249                                 spi_acpi_controller_match);
4250         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4251                 dev = class_find_device(&spi_slave_class, NULL, adev,
4252                                         spi_acpi_controller_match);
4253         if (!dev)
4254                 return NULL;
4255
4256         return container_of(dev, struct spi_controller, dev);
4257 }
4258
4259 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4260 {
4261         struct device *dev;
4262
4263         dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4264         return to_spi_device(dev);
4265 }
4266
4267 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4268                            void *arg)
4269 {
4270         struct acpi_device *adev = arg;
4271         struct spi_controller *ctlr;
4272         struct spi_device *spi;
4273
4274         switch (value) {
4275         case ACPI_RECONFIG_DEVICE_ADD:
4276                 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4277                 if (!ctlr)
4278                         break;
4279
4280                 acpi_register_spi_device(ctlr, adev);
4281                 put_device(&ctlr->dev);
4282                 break;
4283         case ACPI_RECONFIG_DEVICE_REMOVE:
4284                 if (!acpi_device_enumerated(adev))
4285                         break;
4286
4287                 spi = acpi_spi_find_device_by_adev(adev);
4288                 if (!spi)
4289                         break;
4290
4291                 spi_unregister_device(spi);
4292                 put_device(&spi->dev);
4293                 break;
4294         }
4295
4296         return NOTIFY_OK;
4297 }
4298
4299 static struct notifier_block spi_acpi_notifier = {
4300         .notifier_call = acpi_spi_notify,
4301 };
4302 #else
4303 extern struct notifier_block spi_acpi_notifier;
4304 #endif
4305
4306 static int __init spi_init(void)
4307 {
4308         int     status;
4309
4310         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4311         if (!buf) {
4312                 status = -ENOMEM;
4313                 goto err0;
4314         }
4315
4316         status = bus_register(&spi_bus_type);
4317         if (status < 0)
4318                 goto err1;
4319
4320         status = class_register(&spi_master_class);
4321         if (status < 0)
4322                 goto err2;
4323
4324         if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4325                 status = class_register(&spi_slave_class);
4326                 if (status < 0)
4327                         goto err3;
4328         }
4329
4330         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4331                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4332         if (IS_ENABLED(CONFIG_ACPI))
4333                 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4334
4335         return 0;
4336
4337 err3:
4338         class_unregister(&spi_master_class);
4339 err2:
4340         bus_unregister(&spi_bus_type);
4341 err1:
4342         kfree(buf);
4343         buf = NULL;
4344 err0:
4345         return status;
4346 }
4347
4348 /*
4349  * A board_info is normally registered in arch_initcall(),
4350  * but even essential drivers wait till later.
4351  *
4352  * REVISIT only boardinfo really needs static linking. The rest (device and
4353  * driver registration) _could_ be dynamically linked (modular) ... Costs
4354  * include needing to have boardinfo data structures be much more public.
4355  */
4356 postcore_initcall(spi_init);