Merge tag 'dmaengine-fix-5.4-rc6' of git://git.infradead.org/users/vkoul/slave-dma
[linux-2.6-microblaze.git] / drivers / input / rmi4 / rmi_spi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2011-2016 Synaptics Incorporated
4  * Copyright (c) 2011 Unixphere
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/rmi.h>
10 #include <linux/slab.h>
11 #include <linux/spi/spi.h>
12 #include <linux/of.h>
13 #include "rmi_driver.h"
14
15 #define RMI_SPI_DEFAULT_XFER_BUF_SIZE   64
16
17 #define RMI_PAGE_SELECT_REGISTER        0x00FF
18 #define RMI_SPI_PAGE(addr)              (((addr) >> 8) & 0x80)
19 #define RMI_SPI_XFER_SIZE_LIMIT         255
20
21 #define BUFFER_SIZE_INCREMENT 32
22
23 enum rmi_spi_op {
24         RMI_SPI_WRITE = 0,
25         RMI_SPI_READ,
26         RMI_SPI_V2_READ_UNIFIED,
27         RMI_SPI_V2_READ_SPLIT,
28         RMI_SPI_V2_WRITE,
29 };
30
31 struct rmi_spi_cmd {
32         enum rmi_spi_op op;
33         u16 addr;
34 };
35
36 struct rmi_spi_xport {
37         struct rmi_transport_dev xport;
38         struct spi_device *spi;
39
40         struct mutex page_mutex;
41         int page;
42
43         u8 *rx_buf;
44         u8 *tx_buf;
45         int xfer_buf_size;
46
47         struct spi_transfer *rx_xfers;
48         struct spi_transfer *tx_xfers;
49         int rx_xfer_count;
50         int tx_xfer_count;
51 };
52
53 static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
54 {
55         struct spi_device *spi = rmi_spi->spi;
56         int buf_size = rmi_spi->xfer_buf_size
57                 ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
58         struct spi_transfer *xfer_buf;
59         void *buf;
60         void *tmp;
61
62         while (buf_size < len)
63                 buf_size *= 2;
64
65         if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
66                 buf_size = RMI_SPI_XFER_SIZE_LIMIT;
67
68         tmp = rmi_spi->rx_buf;
69         buf = devm_kcalloc(&spi->dev, buf_size, 2,
70                                 GFP_KERNEL | GFP_DMA);
71         if (!buf)
72                 return -ENOMEM;
73
74         rmi_spi->rx_buf = buf;
75         rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
76         rmi_spi->xfer_buf_size = buf_size;
77
78         if (tmp)
79                 devm_kfree(&spi->dev, tmp);
80
81         if (rmi_spi->xport.pdata.spi_data.read_delay_us)
82                 rmi_spi->rx_xfer_count = buf_size;
83         else
84                 rmi_spi->rx_xfer_count = 1;
85
86         if (rmi_spi->xport.pdata.spi_data.write_delay_us)
87                 rmi_spi->tx_xfer_count = buf_size;
88         else
89                 rmi_spi->tx_xfer_count = 1;
90
91         /*
92          * Allocate a pool of spi_transfer buffers for devices which need
93          * per byte delays.
94          */
95         tmp = rmi_spi->rx_xfers;
96         xfer_buf = devm_kcalloc(&spi->dev,
97                 rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count,
98                 sizeof(struct spi_transfer),
99                 GFP_KERNEL);
100         if (!xfer_buf)
101                 return -ENOMEM;
102
103         rmi_spi->rx_xfers = xfer_buf;
104         rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
105
106         if (tmp)
107                 devm_kfree(&spi->dev, tmp);
108
109         return 0;
110 }
111
112 static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
113                         const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
114                         int tx_len, u8 *rx_buf, int rx_len)
115 {
116         struct spi_device *spi = rmi_spi->spi;
117         struct rmi_device_platform_data_spi *spi_data =
118                                         &rmi_spi->xport.pdata.spi_data;
119         struct spi_message msg;
120         struct spi_transfer *xfer;
121         int ret = 0;
122         int len;
123         int cmd_len = 0;
124         int total_tx_len;
125         int i;
126         u16 addr = cmd->addr;
127
128         spi_message_init(&msg);
129
130         switch (cmd->op) {
131         case RMI_SPI_WRITE:
132         case RMI_SPI_READ:
133                 cmd_len += 2;
134                 break;
135         case RMI_SPI_V2_READ_UNIFIED:
136         case RMI_SPI_V2_READ_SPLIT:
137         case RMI_SPI_V2_WRITE:
138                 cmd_len += 4;
139                 break;
140         }
141
142         total_tx_len = cmd_len + tx_len;
143         len = max(total_tx_len, rx_len);
144
145         if (len > RMI_SPI_XFER_SIZE_LIMIT)
146                 return -EINVAL;
147
148         if (rmi_spi->xfer_buf_size < len) {
149                 ret = rmi_spi_manage_pools(rmi_spi, len);
150                 if (ret < 0)
151                         return ret;
152         }
153
154         if (addr == 0)
155                 /*
156                  * SPI needs an address. Use 0x7FF if we want to keep
157                  * reading from the last position of the register pointer.
158                  */
159                 addr = 0x7FF;
160
161         switch (cmd->op) {
162         case RMI_SPI_WRITE:
163                 rmi_spi->tx_buf[0] = (addr >> 8);
164                 rmi_spi->tx_buf[1] = addr & 0xFF;
165                 break;
166         case RMI_SPI_READ:
167                 rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
168                 rmi_spi->tx_buf[1] = addr & 0xFF;
169                 break;
170         case RMI_SPI_V2_READ_UNIFIED:
171                 break;
172         case RMI_SPI_V2_READ_SPLIT:
173                 break;
174         case RMI_SPI_V2_WRITE:
175                 rmi_spi->tx_buf[0] = 0x40;
176                 rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
177                 rmi_spi->tx_buf[2] = addr & 0xFF;
178                 rmi_spi->tx_buf[3] = tx_len;
179                 break;
180         }
181
182         if (tx_buf)
183                 memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
184
185         if (rmi_spi->tx_xfer_count > 1) {
186                 for (i = 0; i < total_tx_len; i++) {
187                         xfer = &rmi_spi->tx_xfers[i];
188                         memset(xfer, 0, sizeof(struct spi_transfer));
189                         xfer->tx_buf = &rmi_spi->tx_buf[i];
190                         xfer->len = 1;
191                         xfer->delay_usecs = spi_data->write_delay_us;
192                         spi_message_add_tail(xfer, &msg);
193                 }
194         } else {
195                 xfer = rmi_spi->tx_xfers;
196                 memset(xfer, 0, sizeof(struct spi_transfer));
197                 xfer->tx_buf = rmi_spi->tx_buf;
198                 xfer->len = total_tx_len;
199                 spi_message_add_tail(xfer, &msg);
200         }
201
202         rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
203                 __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
204                 total_tx_len, total_tx_len, rmi_spi->tx_buf);
205
206         if (rx_buf) {
207                 if (rmi_spi->rx_xfer_count > 1) {
208                         for (i = 0; i < rx_len; i++) {
209                                 xfer = &rmi_spi->rx_xfers[i];
210                                 memset(xfer, 0, sizeof(struct spi_transfer));
211                                 xfer->rx_buf = &rmi_spi->rx_buf[i];
212                                 xfer->len = 1;
213                                 xfer->delay_usecs = spi_data->read_delay_us;
214                                 spi_message_add_tail(xfer, &msg);
215                         }
216                 } else {
217                         xfer = rmi_spi->rx_xfers;
218                         memset(xfer, 0, sizeof(struct spi_transfer));
219                         xfer->rx_buf = rmi_spi->rx_buf;
220                         xfer->len = rx_len;
221                         spi_message_add_tail(xfer, &msg);
222                 }
223         }
224
225         ret = spi_sync(spi, &msg);
226         if (ret < 0) {
227                 dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
228                 return ret;
229         }
230
231         if (rx_buf) {
232                 memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
233                 rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
234                         __func__, rx_len, rx_len, rx_buf);
235         }
236
237         return 0;
238 }
239
240 /*
241  * rmi_set_page - Set RMI page
242  * @xport: The pointer to the rmi_transport_dev struct
243  * @page: The new page address.
244  *
245  * RMI devices have 16-bit addressing, but some of the transport
246  * implementations (like SMBus) only have 8-bit addressing. So RMI implements
247  * a page address at 0xff of every page so we can reliable page addresses
248  * every 256 registers.
249  *
250  * The page_mutex lock must be held when this function is entered.
251  *
252  * Returns zero on success, non-zero on failure.
253  */
254 static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
255 {
256         struct rmi_spi_cmd cmd;
257         int ret;
258
259         cmd.op = RMI_SPI_WRITE;
260         cmd.addr = RMI_PAGE_SELECT_REGISTER;
261
262         ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
263
264         if (ret)
265                 rmi_spi->page = page;
266
267         return ret;
268 }
269
270 static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
271                                const void *buf, size_t len)
272 {
273         struct rmi_spi_xport *rmi_spi =
274                 container_of(xport, struct rmi_spi_xport, xport);
275         struct rmi_spi_cmd cmd;
276         int ret;
277
278         mutex_lock(&rmi_spi->page_mutex);
279
280         if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
281                 ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
282                 if (ret)
283                         goto exit;
284         }
285
286         cmd.op = RMI_SPI_WRITE;
287         cmd.addr = addr;
288
289         ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
290
291 exit:
292         mutex_unlock(&rmi_spi->page_mutex);
293         return ret;
294 }
295
296 static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
297                               void *buf, size_t len)
298 {
299         struct rmi_spi_xport *rmi_spi =
300                 container_of(xport, struct rmi_spi_xport, xport);
301         struct rmi_spi_cmd cmd;
302         int ret;
303
304         mutex_lock(&rmi_spi->page_mutex);
305
306         if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
307                 ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
308                 if (ret)
309                         goto exit;
310         }
311
312         cmd.op = RMI_SPI_READ;
313         cmd.addr = addr;
314
315         ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
316
317 exit:
318         mutex_unlock(&rmi_spi->page_mutex);
319         return ret;
320 }
321
322 static const struct rmi_transport_ops rmi_spi_ops = {
323         .write_block    = rmi_spi_write_block,
324         .read_block     = rmi_spi_read_block,
325 };
326
327 #ifdef CONFIG_OF
328 static int rmi_spi_of_probe(struct spi_device *spi,
329                         struct rmi_device_platform_data *pdata)
330 {
331         struct device *dev = &spi->dev;
332         int retval;
333
334         retval = rmi_of_property_read_u32(dev,
335                         &pdata->spi_data.read_delay_us,
336                         "spi-rx-delay-us", 1);
337         if (retval)
338                 return retval;
339
340         retval = rmi_of_property_read_u32(dev,
341                         &pdata->spi_data.write_delay_us,
342                         "spi-tx-delay-us", 1);
343         if (retval)
344                 return retval;
345
346         return 0;
347 }
348
349 static const struct of_device_id rmi_spi_of_match[] = {
350         { .compatible = "syna,rmi4-spi" },
351         {},
352 };
353 MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
354 #else
355 static inline int rmi_spi_of_probe(struct spi_device *spi,
356                                 struct rmi_device_platform_data *pdata)
357 {
358         return -ENODEV;
359 }
360 #endif
361
362 static void rmi_spi_unregister_transport(void *data)
363 {
364         struct rmi_spi_xport *rmi_spi = data;
365
366         rmi_unregister_transport_device(&rmi_spi->xport);
367 }
368
369 static int rmi_spi_probe(struct spi_device *spi)
370 {
371         struct rmi_spi_xport *rmi_spi;
372         struct rmi_device_platform_data *pdata;
373         struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
374         int error;
375
376         if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
377                 return -EINVAL;
378
379         rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
380                         GFP_KERNEL);
381         if (!rmi_spi)
382                 return -ENOMEM;
383
384         pdata = &rmi_spi->xport.pdata;
385
386         if (spi->dev.of_node) {
387                 error = rmi_spi_of_probe(spi, pdata);
388                 if (error)
389                         return error;
390         } else if (spi_pdata) {
391                 *pdata = *spi_pdata;
392         }
393
394         if (pdata->spi_data.bits_per_word)
395                 spi->bits_per_word = pdata->spi_data.bits_per_word;
396
397         if (pdata->spi_data.mode)
398                 spi->mode = pdata->spi_data.mode;
399
400         error = spi_setup(spi);
401         if (error < 0) {
402                 dev_err(&spi->dev, "spi_setup failed!\n");
403                 return error;
404         }
405
406         pdata->irq = spi->irq;
407
408         rmi_spi->spi = spi;
409         mutex_init(&rmi_spi->page_mutex);
410
411         rmi_spi->xport.dev = &spi->dev;
412         rmi_spi->xport.proto_name = "spi";
413         rmi_spi->xport.ops = &rmi_spi_ops;
414
415         spi_set_drvdata(spi, rmi_spi);
416
417         error = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
418         if (error)
419                 return error;
420
421         /*
422          * Setting the page to zero will (a) make sure the PSR is in a
423          * known state, and (b) make sure we can talk to the device.
424          */
425         error = rmi_set_page(rmi_spi, 0);
426         if (error) {
427                 dev_err(&spi->dev, "Failed to set page select to 0.\n");
428                 return error;
429         }
430
431         dev_info(&spi->dev, "registering SPI-connected sensor\n");
432
433         error = rmi_register_transport_device(&rmi_spi->xport);
434         if (error) {
435                 dev_err(&spi->dev, "failed to register sensor: %d\n", error);
436                 return error;
437         }
438
439         error = devm_add_action_or_reset(&spi->dev,
440                                           rmi_spi_unregister_transport,
441                                           rmi_spi);
442         if (error)
443                 return error;
444
445         return 0;
446 }
447
448 #ifdef CONFIG_PM_SLEEP
449 static int rmi_spi_suspend(struct device *dev)
450 {
451         struct spi_device *spi = to_spi_device(dev);
452         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
453         int ret;
454
455         ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
456         if (ret)
457                 dev_warn(dev, "Failed to resume device: %d\n", ret);
458
459         return ret;
460 }
461
462 static int rmi_spi_resume(struct device *dev)
463 {
464         struct spi_device *spi = to_spi_device(dev);
465         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
466         int ret;
467
468         ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
469         if (ret)
470                 dev_warn(dev, "Failed to resume device: %d\n", ret);
471
472         return ret;
473 }
474 #endif
475
476 #ifdef CONFIG_PM
477 static int rmi_spi_runtime_suspend(struct device *dev)
478 {
479         struct spi_device *spi = to_spi_device(dev);
480         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
481         int ret;
482
483         ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
484         if (ret)
485                 dev_warn(dev, "Failed to resume device: %d\n", ret);
486
487         return 0;
488 }
489
490 static int rmi_spi_runtime_resume(struct device *dev)
491 {
492         struct spi_device *spi = to_spi_device(dev);
493         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
494         int ret;
495
496         ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
497         if (ret)
498                 dev_warn(dev, "Failed to resume device: %d\n", ret);
499
500         return 0;
501 }
502 #endif
503
504 static const struct dev_pm_ops rmi_spi_pm = {
505         SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
506         SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
507                            NULL)
508 };
509
510 static const struct spi_device_id rmi_id[] = {
511         { "rmi4_spi", 0 },
512         { }
513 };
514 MODULE_DEVICE_TABLE(spi, rmi_id);
515
516 static struct spi_driver rmi_spi_driver = {
517         .driver = {
518                 .name   = "rmi4_spi",
519                 .pm     = &rmi_spi_pm,
520                 .of_match_table = of_match_ptr(rmi_spi_of_match),
521         },
522         .id_table       = rmi_id,
523         .probe          = rmi_spi_probe,
524 };
525
526 module_spi_driver(rmi_spi_driver);
527
528 MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
529 MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
530 MODULE_DESCRIPTION("RMI SPI driver");
531 MODULE_LICENSE("GPL");