Merge branch 'regmap-4.20' into regmap-next
[linux-2.6-microblaze.git] / drivers / base / regmap / regmap.c
1 /*
2  * Register map access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
18 #include <linux/of.h>
19 #include <linux/rbtree.h>
20 #include <linux/sched.h>
21 #include <linux/delay.h>
22 #include <linux/log2.h>
23 #include <linux/hwspinlock.h>
24
25 #define CREATE_TRACE_POINTS
26 #include "trace.h"
27
28 #include "internal.h"
29
30 /*
31  * Sometimes for failures during very early init the trace
32  * infrastructure isn't available early enough to be used.  For this
33  * sort of problem defining LOG_DEVICE will add printks for basic
34  * register I/O on a specific device.
35  */
36 #undef LOG_DEVICE
37
38 #ifdef LOG_DEVICE
39 static inline bool regmap_should_log(struct regmap *map)
40 {
41         return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
42 }
43 #else
44 static inline bool regmap_should_log(struct regmap *map) { return false; }
45 #endif
46
47
48 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
49                                unsigned int mask, unsigned int val,
50                                bool *change, bool force_write);
51
52 static int _regmap_bus_reg_read(void *context, unsigned int reg,
53                                 unsigned int *val);
54 static int _regmap_bus_read(void *context, unsigned int reg,
55                             unsigned int *val);
56 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
57                                        unsigned int val);
58 static int _regmap_bus_reg_write(void *context, unsigned int reg,
59                                  unsigned int val);
60 static int _regmap_bus_raw_write(void *context, unsigned int reg,
61                                  unsigned int val);
62
63 bool regmap_reg_in_ranges(unsigned int reg,
64                           const struct regmap_range *ranges,
65                           unsigned int nranges)
66 {
67         const struct regmap_range *r;
68         int i;
69
70         for (i = 0, r = ranges; i < nranges; i++, r++)
71                 if (regmap_reg_in_range(reg, r))
72                         return true;
73         return false;
74 }
75 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
76
77 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
78                               const struct regmap_access_table *table)
79 {
80         /* Check "no ranges" first */
81         if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
82                 return false;
83
84         /* In case zero "yes ranges" are supplied, any reg is OK */
85         if (!table->n_yes_ranges)
86                 return true;
87
88         return regmap_reg_in_ranges(reg, table->yes_ranges,
89                                     table->n_yes_ranges);
90 }
91 EXPORT_SYMBOL_GPL(regmap_check_range_table);
92
93 bool regmap_writeable(struct regmap *map, unsigned int reg)
94 {
95         if (map->max_register && reg > map->max_register)
96                 return false;
97
98         if (map->writeable_reg)
99                 return map->writeable_reg(map->dev, reg);
100
101         if (map->wr_table)
102                 return regmap_check_range_table(map, reg, map->wr_table);
103
104         return true;
105 }
106
107 bool regmap_cached(struct regmap *map, unsigned int reg)
108 {
109         int ret;
110         unsigned int val;
111
112         if (map->cache_type == REGCACHE_NONE)
113                 return false;
114
115         if (!map->cache_ops)
116                 return false;
117
118         if (map->max_register && reg > map->max_register)
119                 return false;
120
121         map->lock(map->lock_arg);
122         ret = regcache_read(map, reg, &val);
123         map->unlock(map->lock_arg);
124         if (ret)
125                 return false;
126
127         return true;
128 }
129
130 bool regmap_readable(struct regmap *map, unsigned int reg)
131 {
132         if (!map->reg_read)
133                 return false;
134
135         if (map->max_register && reg > map->max_register)
136                 return false;
137
138         if (map->format.format_write)
139                 return false;
140
141         if (map->readable_reg)
142                 return map->readable_reg(map->dev, reg);
143
144         if (map->rd_table)
145                 return regmap_check_range_table(map, reg, map->rd_table);
146
147         return true;
148 }
149
150 bool regmap_volatile(struct regmap *map, unsigned int reg)
151 {
152         if (!map->format.format_write && !regmap_readable(map, reg))
153                 return false;
154
155         if (map->volatile_reg)
156                 return map->volatile_reg(map->dev, reg);
157
158         if (map->volatile_table)
159                 return regmap_check_range_table(map, reg, map->volatile_table);
160
161         if (map->cache_ops)
162                 return false;
163         else
164                 return true;
165 }
166
167 bool regmap_precious(struct regmap *map, unsigned int reg)
168 {
169         if (!regmap_readable(map, reg))
170                 return false;
171
172         if (map->precious_reg)
173                 return map->precious_reg(map->dev, reg);
174
175         if (map->precious_table)
176                 return regmap_check_range_table(map, reg, map->precious_table);
177
178         return false;
179 }
180
181 bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
182 {
183         if (map->readable_noinc_reg)
184                 return map->readable_noinc_reg(map->dev, reg);
185
186         if (map->rd_noinc_table)
187                 return regmap_check_range_table(map, reg, map->rd_noinc_table);
188
189         return true;
190 }
191
192 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
193         size_t num)
194 {
195         unsigned int i;
196
197         for (i = 0; i < num; i++)
198                 if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
199                         return false;
200
201         return true;
202 }
203
204 static void regmap_format_2_6_write(struct regmap *map,
205                                      unsigned int reg, unsigned int val)
206 {
207         u8 *out = map->work_buf;
208
209         *out = (reg << 6) | val;
210 }
211
212 static void regmap_format_4_12_write(struct regmap *map,
213                                      unsigned int reg, unsigned int val)
214 {
215         __be16 *out = map->work_buf;
216         *out = cpu_to_be16((reg << 12) | val);
217 }
218
219 static void regmap_format_7_9_write(struct regmap *map,
220                                     unsigned int reg, unsigned int val)
221 {
222         __be16 *out = map->work_buf;
223         *out = cpu_to_be16((reg << 9) | val);
224 }
225
226 static void regmap_format_10_14_write(struct regmap *map,
227                                     unsigned int reg, unsigned int val)
228 {
229         u8 *out = map->work_buf;
230
231         out[2] = val;
232         out[1] = (val >> 8) | (reg << 6);
233         out[0] = reg >> 2;
234 }
235
236 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
237 {
238         u8 *b = buf;
239
240         b[0] = val << shift;
241 }
242
243 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
244 {
245         __be16 *b = buf;
246
247         b[0] = cpu_to_be16(val << shift);
248 }
249
250 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
251 {
252         __le16 *b = buf;
253
254         b[0] = cpu_to_le16(val << shift);
255 }
256
257 static void regmap_format_16_native(void *buf, unsigned int val,
258                                     unsigned int shift)
259 {
260         *(u16 *)buf = val << shift;
261 }
262
263 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
264 {
265         u8 *b = buf;
266
267         val <<= shift;
268
269         b[0] = val >> 16;
270         b[1] = val >> 8;
271         b[2] = val;
272 }
273
274 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
275 {
276         __be32 *b = buf;
277
278         b[0] = cpu_to_be32(val << shift);
279 }
280
281 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
282 {
283         __le32 *b = buf;
284
285         b[0] = cpu_to_le32(val << shift);
286 }
287
288 static void regmap_format_32_native(void *buf, unsigned int val,
289                                     unsigned int shift)
290 {
291         *(u32 *)buf = val << shift;
292 }
293
294 #ifdef CONFIG_64BIT
295 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
296 {
297         __be64 *b = buf;
298
299         b[0] = cpu_to_be64((u64)val << shift);
300 }
301
302 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
303 {
304         __le64 *b = buf;
305
306         b[0] = cpu_to_le64((u64)val << shift);
307 }
308
309 static void regmap_format_64_native(void *buf, unsigned int val,
310                                     unsigned int shift)
311 {
312         *(u64 *)buf = (u64)val << shift;
313 }
314 #endif
315
316 static void regmap_parse_inplace_noop(void *buf)
317 {
318 }
319
320 static unsigned int regmap_parse_8(const void *buf)
321 {
322         const u8 *b = buf;
323
324         return b[0];
325 }
326
327 static unsigned int regmap_parse_16_be(const void *buf)
328 {
329         const __be16 *b = buf;
330
331         return be16_to_cpu(b[0]);
332 }
333
334 static unsigned int regmap_parse_16_le(const void *buf)
335 {
336         const __le16 *b = buf;
337
338         return le16_to_cpu(b[0]);
339 }
340
341 static void regmap_parse_16_be_inplace(void *buf)
342 {
343         __be16 *b = buf;
344
345         b[0] = be16_to_cpu(b[0]);
346 }
347
348 static void regmap_parse_16_le_inplace(void *buf)
349 {
350         __le16 *b = buf;
351
352         b[0] = le16_to_cpu(b[0]);
353 }
354
355 static unsigned int regmap_parse_16_native(const void *buf)
356 {
357         return *(u16 *)buf;
358 }
359
360 static unsigned int regmap_parse_24(const void *buf)
361 {
362         const u8 *b = buf;
363         unsigned int ret = b[2];
364         ret |= ((unsigned int)b[1]) << 8;
365         ret |= ((unsigned int)b[0]) << 16;
366
367         return ret;
368 }
369
370 static unsigned int regmap_parse_32_be(const void *buf)
371 {
372         const __be32 *b = buf;
373
374         return be32_to_cpu(b[0]);
375 }
376
377 static unsigned int regmap_parse_32_le(const void *buf)
378 {
379         const __le32 *b = buf;
380
381         return le32_to_cpu(b[0]);
382 }
383
384 static void regmap_parse_32_be_inplace(void *buf)
385 {
386         __be32 *b = buf;
387
388         b[0] = be32_to_cpu(b[0]);
389 }
390
391 static void regmap_parse_32_le_inplace(void *buf)
392 {
393         __le32 *b = buf;
394
395         b[0] = le32_to_cpu(b[0]);
396 }
397
398 static unsigned int regmap_parse_32_native(const void *buf)
399 {
400         return *(u32 *)buf;
401 }
402
403 #ifdef CONFIG_64BIT
404 static unsigned int regmap_parse_64_be(const void *buf)
405 {
406         const __be64 *b = buf;
407
408         return be64_to_cpu(b[0]);
409 }
410
411 static unsigned int regmap_parse_64_le(const void *buf)
412 {
413         const __le64 *b = buf;
414
415         return le64_to_cpu(b[0]);
416 }
417
418 static void regmap_parse_64_be_inplace(void *buf)
419 {
420         __be64 *b = buf;
421
422         b[0] = be64_to_cpu(b[0]);
423 }
424
425 static void regmap_parse_64_le_inplace(void *buf)
426 {
427         __le64 *b = buf;
428
429         b[0] = le64_to_cpu(b[0]);
430 }
431
432 static unsigned int regmap_parse_64_native(const void *buf)
433 {
434         return *(u64 *)buf;
435 }
436 #endif
437
438 static void regmap_lock_hwlock(void *__map)
439 {
440         struct regmap *map = __map;
441
442         hwspin_lock_timeout(map->hwlock, UINT_MAX);
443 }
444
445 static void regmap_lock_hwlock_irq(void *__map)
446 {
447         struct regmap *map = __map;
448
449         hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
450 }
451
452 static void regmap_lock_hwlock_irqsave(void *__map)
453 {
454         struct regmap *map = __map;
455
456         hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
457                                     &map->spinlock_flags);
458 }
459
460 static void regmap_unlock_hwlock(void *__map)
461 {
462         struct regmap *map = __map;
463
464         hwspin_unlock(map->hwlock);
465 }
466
467 static void regmap_unlock_hwlock_irq(void *__map)
468 {
469         struct regmap *map = __map;
470
471         hwspin_unlock_irq(map->hwlock);
472 }
473
474 static void regmap_unlock_hwlock_irqrestore(void *__map)
475 {
476         struct regmap *map = __map;
477
478         hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
479 }
480
481 static void regmap_lock_unlock_none(void *__map)
482 {
483
484 }
485
486 static void regmap_lock_mutex(void *__map)
487 {
488         struct regmap *map = __map;
489         mutex_lock(&map->mutex);
490 }
491
492 static void regmap_unlock_mutex(void *__map)
493 {
494         struct regmap *map = __map;
495         mutex_unlock(&map->mutex);
496 }
497
498 static void regmap_lock_spinlock(void *__map)
499 __acquires(&map->spinlock)
500 {
501         struct regmap *map = __map;
502         unsigned long flags;
503
504         spin_lock_irqsave(&map->spinlock, flags);
505         map->spinlock_flags = flags;
506 }
507
508 static void regmap_unlock_spinlock(void *__map)
509 __releases(&map->spinlock)
510 {
511         struct regmap *map = __map;
512         spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
513 }
514
515 static void dev_get_regmap_release(struct device *dev, void *res)
516 {
517         /*
518          * We don't actually have anything to do here; the goal here
519          * is not to manage the regmap but to provide a simple way to
520          * get the regmap back given a struct device.
521          */
522 }
523
524 static bool _regmap_range_add(struct regmap *map,
525                               struct regmap_range_node *data)
526 {
527         struct rb_root *root = &map->range_tree;
528         struct rb_node **new = &(root->rb_node), *parent = NULL;
529
530         while (*new) {
531                 struct regmap_range_node *this =
532                         rb_entry(*new, struct regmap_range_node, node);
533
534                 parent = *new;
535                 if (data->range_max < this->range_min)
536                         new = &((*new)->rb_left);
537                 else if (data->range_min > this->range_max)
538                         new = &((*new)->rb_right);
539                 else
540                         return false;
541         }
542
543         rb_link_node(&data->node, parent, new);
544         rb_insert_color(&data->node, root);
545
546         return true;
547 }
548
549 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
550                                                       unsigned int reg)
551 {
552         struct rb_node *node = map->range_tree.rb_node;
553
554         while (node) {
555                 struct regmap_range_node *this =
556                         rb_entry(node, struct regmap_range_node, node);
557
558                 if (reg < this->range_min)
559                         node = node->rb_left;
560                 else if (reg > this->range_max)
561                         node = node->rb_right;
562                 else
563                         return this;
564         }
565
566         return NULL;
567 }
568
569 static void regmap_range_exit(struct regmap *map)
570 {
571         struct rb_node *next;
572         struct regmap_range_node *range_node;
573
574         next = rb_first(&map->range_tree);
575         while (next) {
576                 range_node = rb_entry(next, struct regmap_range_node, node);
577                 next = rb_next(&range_node->node);
578                 rb_erase(&range_node->node, &map->range_tree);
579                 kfree(range_node);
580         }
581
582         kfree(map->selector_work_buf);
583 }
584
585 int regmap_attach_dev(struct device *dev, struct regmap *map,
586                       const struct regmap_config *config)
587 {
588         struct regmap **m;
589
590         map->dev = dev;
591
592         regmap_debugfs_init(map, config->name);
593
594         /* Add a devres resource for dev_get_regmap() */
595         m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
596         if (!m) {
597                 regmap_debugfs_exit(map);
598                 return -ENOMEM;
599         }
600         *m = map;
601         devres_add(dev, m);
602
603         return 0;
604 }
605 EXPORT_SYMBOL_GPL(regmap_attach_dev);
606
607 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
608                                         const struct regmap_config *config)
609 {
610         enum regmap_endian endian;
611
612         /* Retrieve the endianness specification from the regmap config */
613         endian = config->reg_format_endian;
614
615         /* If the regmap config specified a non-default value, use that */
616         if (endian != REGMAP_ENDIAN_DEFAULT)
617                 return endian;
618
619         /* Retrieve the endianness specification from the bus config */
620         if (bus && bus->reg_format_endian_default)
621                 endian = bus->reg_format_endian_default;
622
623         /* If the bus specified a non-default value, use that */
624         if (endian != REGMAP_ENDIAN_DEFAULT)
625                 return endian;
626
627         /* Use this if no other value was found */
628         return REGMAP_ENDIAN_BIG;
629 }
630
631 enum regmap_endian regmap_get_val_endian(struct device *dev,
632                                          const struct regmap_bus *bus,
633                                          const struct regmap_config *config)
634 {
635         struct device_node *np;
636         enum regmap_endian endian;
637
638         /* Retrieve the endianness specification from the regmap config */
639         endian = config->val_format_endian;
640
641         /* If the regmap config specified a non-default value, use that */
642         if (endian != REGMAP_ENDIAN_DEFAULT)
643                 return endian;
644
645         /* If the dev and dev->of_node exist try to get endianness from DT */
646         if (dev && dev->of_node) {
647                 np = dev->of_node;
648
649                 /* Parse the device's DT node for an endianness specification */
650                 if (of_property_read_bool(np, "big-endian"))
651                         endian = REGMAP_ENDIAN_BIG;
652                 else if (of_property_read_bool(np, "little-endian"))
653                         endian = REGMAP_ENDIAN_LITTLE;
654                 else if (of_property_read_bool(np, "native-endian"))
655                         endian = REGMAP_ENDIAN_NATIVE;
656
657                 /* If the endianness was specified in DT, use that */
658                 if (endian != REGMAP_ENDIAN_DEFAULT)
659                         return endian;
660         }
661
662         /* Retrieve the endianness specification from the bus config */
663         if (bus && bus->val_format_endian_default)
664                 endian = bus->val_format_endian_default;
665
666         /* If the bus specified a non-default value, use that */
667         if (endian != REGMAP_ENDIAN_DEFAULT)
668                 return endian;
669
670         /* Use this if no other value was found */
671         return REGMAP_ENDIAN_BIG;
672 }
673 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
674
675 struct regmap *__regmap_init(struct device *dev,
676                              const struct regmap_bus *bus,
677                              void *bus_context,
678                              const struct regmap_config *config,
679                              struct lock_class_key *lock_key,
680                              const char *lock_name)
681 {
682         struct regmap *map;
683         int ret = -EINVAL;
684         enum regmap_endian reg_endian, val_endian;
685         int i, j;
686
687         if (!config)
688                 goto err;
689
690         map = kzalloc(sizeof(*map), GFP_KERNEL);
691         if (map == NULL) {
692                 ret = -ENOMEM;
693                 goto err;
694         }
695
696         if (config->name) {
697                 map->name = kstrdup_const(config->name, GFP_KERNEL);
698                 if (!map->name) {
699                         ret = -ENOMEM;
700                         goto err_map;
701                 }
702         }
703
704         if (config->disable_locking) {
705                 map->lock = map->unlock = regmap_lock_unlock_none;
706                 regmap_debugfs_disable(map);
707         } else if (config->lock && config->unlock) {
708                 map->lock = config->lock;
709                 map->unlock = config->unlock;
710                 map->lock_arg = config->lock_arg;
711         } else if (config->use_hwlock) {
712                 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
713                 if (!map->hwlock) {
714                         ret = -ENXIO;
715                         goto err_name;
716                 }
717
718                 switch (config->hwlock_mode) {
719                 case HWLOCK_IRQSTATE:
720                         map->lock = regmap_lock_hwlock_irqsave;
721                         map->unlock = regmap_unlock_hwlock_irqrestore;
722                         break;
723                 case HWLOCK_IRQ:
724                         map->lock = regmap_lock_hwlock_irq;
725                         map->unlock = regmap_unlock_hwlock_irq;
726                         break;
727                 default:
728                         map->lock = regmap_lock_hwlock;
729                         map->unlock = regmap_unlock_hwlock;
730                         break;
731                 }
732
733                 map->lock_arg = map;
734         } else {
735                 if ((bus && bus->fast_io) ||
736                     config->fast_io) {
737                         spin_lock_init(&map->spinlock);
738                         map->lock = regmap_lock_spinlock;
739                         map->unlock = regmap_unlock_spinlock;
740                         lockdep_set_class_and_name(&map->spinlock,
741                                                    lock_key, lock_name);
742                 } else {
743                         mutex_init(&map->mutex);
744                         map->lock = regmap_lock_mutex;
745                         map->unlock = regmap_unlock_mutex;
746                         lockdep_set_class_and_name(&map->mutex,
747                                                    lock_key, lock_name);
748                 }
749                 map->lock_arg = map;
750         }
751
752         /*
753          * When we write in fast-paths with regmap_bulk_write() don't allocate
754          * scratch buffers with sleeping allocations.
755          */
756         if ((bus && bus->fast_io) || config->fast_io)
757                 map->alloc_flags = GFP_ATOMIC;
758         else
759                 map->alloc_flags = GFP_KERNEL;
760
761         map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
762         map->format.pad_bytes = config->pad_bits / 8;
763         map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
764         map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
765                         config->val_bits + config->pad_bits, 8);
766         map->reg_shift = config->pad_bits % 8;
767         if (config->reg_stride)
768                 map->reg_stride = config->reg_stride;
769         else
770                 map->reg_stride = 1;
771         if (is_power_of_2(map->reg_stride))
772                 map->reg_stride_order = ilog2(map->reg_stride);
773         else
774                 map->reg_stride_order = -1;
775         map->use_single_read = config->use_single_rw || !bus || !bus->read;
776         map->use_single_write = config->use_single_rw || !bus || !bus->write;
777         map->can_multi_write = config->can_multi_write && bus && bus->write;
778         if (bus) {
779                 map->max_raw_read = bus->max_raw_read;
780                 map->max_raw_write = bus->max_raw_write;
781         }
782         map->dev = dev;
783         map->bus = bus;
784         map->bus_context = bus_context;
785         map->max_register = config->max_register;
786         map->wr_table = config->wr_table;
787         map->rd_table = config->rd_table;
788         map->volatile_table = config->volatile_table;
789         map->precious_table = config->precious_table;
790         map->rd_noinc_table = config->rd_noinc_table;
791         map->writeable_reg = config->writeable_reg;
792         map->readable_reg = config->readable_reg;
793         map->volatile_reg = config->volatile_reg;
794         map->precious_reg = config->precious_reg;
795         map->readable_noinc_reg = config->readable_noinc_reg;
796         map->cache_type = config->cache_type;
797
798         spin_lock_init(&map->async_lock);
799         INIT_LIST_HEAD(&map->async_list);
800         INIT_LIST_HEAD(&map->async_free);
801         init_waitqueue_head(&map->async_waitq);
802
803         if (config->read_flag_mask ||
804             config->write_flag_mask ||
805             config->zero_flag_mask) {
806                 map->read_flag_mask = config->read_flag_mask;
807                 map->write_flag_mask = config->write_flag_mask;
808         } else if (bus) {
809                 map->read_flag_mask = bus->read_flag_mask;
810         }
811
812         if (!bus) {
813                 map->reg_read  = config->reg_read;
814                 map->reg_write = config->reg_write;
815
816                 map->defer_caching = false;
817                 goto skip_format_initialization;
818         } else if (!bus->read || !bus->write) {
819                 map->reg_read = _regmap_bus_reg_read;
820                 map->reg_write = _regmap_bus_reg_write;
821
822                 map->defer_caching = false;
823                 goto skip_format_initialization;
824         } else {
825                 map->reg_read  = _regmap_bus_read;
826                 map->reg_update_bits = bus->reg_update_bits;
827         }
828
829         reg_endian = regmap_get_reg_endian(bus, config);
830         val_endian = regmap_get_val_endian(dev, bus, config);
831
832         switch (config->reg_bits + map->reg_shift) {
833         case 2:
834                 switch (config->val_bits) {
835                 case 6:
836                         map->format.format_write = regmap_format_2_6_write;
837                         break;
838                 default:
839                         goto err_hwlock;
840                 }
841                 break;
842
843         case 4:
844                 switch (config->val_bits) {
845                 case 12:
846                         map->format.format_write = regmap_format_4_12_write;
847                         break;
848                 default:
849                         goto err_hwlock;
850                 }
851                 break;
852
853         case 7:
854                 switch (config->val_bits) {
855                 case 9:
856                         map->format.format_write = regmap_format_7_9_write;
857                         break;
858                 default:
859                         goto err_hwlock;
860                 }
861                 break;
862
863         case 10:
864                 switch (config->val_bits) {
865                 case 14:
866                         map->format.format_write = regmap_format_10_14_write;
867                         break;
868                 default:
869                         goto err_hwlock;
870                 }
871                 break;
872
873         case 8:
874                 map->format.format_reg = regmap_format_8;
875                 break;
876
877         case 16:
878                 switch (reg_endian) {
879                 case REGMAP_ENDIAN_BIG:
880                         map->format.format_reg = regmap_format_16_be;
881                         break;
882                 case REGMAP_ENDIAN_LITTLE:
883                         map->format.format_reg = regmap_format_16_le;
884                         break;
885                 case REGMAP_ENDIAN_NATIVE:
886                         map->format.format_reg = regmap_format_16_native;
887                         break;
888                 default:
889                         goto err_hwlock;
890                 }
891                 break;
892
893         case 24:
894                 if (reg_endian != REGMAP_ENDIAN_BIG)
895                         goto err_hwlock;
896                 map->format.format_reg = regmap_format_24;
897                 break;
898
899         case 32:
900                 switch (reg_endian) {
901                 case REGMAP_ENDIAN_BIG:
902                         map->format.format_reg = regmap_format_32_be;
903                         break;
904                 case REGMAP_ENDIAN_LITTLE:
905                         map->format.format_reg = regmap_format_32_le;
906                         break;
907                 case REGMAP_ENDIAN_NATIVE:
908                         map->format.format_reg = regmap_format_32_native;
909                         break;
910                 default:
911                         goto err_hwlock;
912                 }
913                 break;
914
915 #ifdef CONFIG_64BIT
916         case 64:
917                 switch (reg_endian) {
918                 case REGMAP_ENDIAN_BIG:
919                         map->format.format_reg = regmap_format_64_be;
920                         break;
921                 case REGMAP_ENDIAN_LITTLE:
922                         map->format.format_reg = regmap_format_64_le;
923                         break;
924                 case REGMAP_ENDIAN_NATIVE:
925                         map->format.format_reg = regmap_format_64_native;
926                         break;
927                 default:
928                         goto err_hwlock;
929                 }
930                 break;
931 #endif
932
933         default:
934                 goto err_hwlock;
935         }
936
937         if (val_endian == REGMAP_ENDIAN_NATIVE)
938                 map->format.parse_inplace = regmap_parse_inplace_noop;
939
940         switch (config->val_bits) {
941         case 8:
942                 map->format.format_val = regmap_format_8;
943                 map->format.parse_val = regmap_parse_8;
944                 map->format.parse_inplace = regmap_parse_inplace_noop;
945                 break;
946         case 16:
947                 switch (val_endian) {
948                 case REGMAP_ENDIAN_BIG:
949                         map->format.format_val = regmap_format_16_be;
950                         map->format.parse_val = regmap_parse_16_be;
951                         map->format.parse_inplace = regmap_parse_16_be_inplace;
952                         break;
953                 case REGMAP_ENDIAN_LITTLE:
954                         map->format.format_val = regmap_format_16_le;
955                         map->format.parse_val = regmap_parse_16_le;
956                         map->format.parse_inplace = regmap_parse_16_le_inplace;
957                         break;
958                 case REGMAP_ENDIAN_NATIVE:
959                         map->format.format_val = regmap_format_16_native;
960                         map->format.parse_val = regmap_parse_16_native;
961                         break;
962                 default:
963                         goto err_hwlock;
964                 }
965                 break;
966         case 24:
967                 if (val_endian != REGMAP_ENDIAN_BIG)
968                         goto err_hwlock;
969                 map->format.format_val = regmap_format_24;
970                 map->format.parse_val = regmap_parse_24;
971                 break;
972         case 32:
973                 switch (val_endian) {
974                 case REGMAP_ENDIAN_BIG:
975                         map->format.format_val = regmap_format_32_be;
976                         map->format.parse_val = regmap_parse_32_be;
977                         map->format.parse_inplace = regmap_parse_32_be_inplace;
978                         break;
979                 case REGMAP_ENDIAN_LITTLE:
980                         map->format.format_val = regmap_format_32_le;
981                         map->format.parse_val = regmap_parse_32_le;
982                         map->format.parse_inplace = regmap_parse_32_le_inplace;
983                         break;
984                 case REGMAP_ENDIAN_NATIVE:
985                         map->format.format_val = regmap_format_32_native;
986                         map->format.parse_val = regmap_parse_32_native;
987                         break;
988                 default:
989                         goto err_hwlock;
990                 }
991                 break;
992 #ifdef CONFIG_64BIT
993         case 64:
994                 switch (val_endian) {
995                 case REGMAP_ENDIAN_BIG:
996                         map->format.format_val = regmap_format_64_be;
997                         map->format.parse_val = regmap_parse_64_be;
998                         map->format.parse_inplace = regmap_parse_64_be_inplace;
999                         break;
1000                 case REGMAP_ENDIAN_LITTLE:
1001                         map->format.format_val = regmap_format_64_le;
1002                         map->format.parse_val = regmap_parse_64_le;
1003                         map->format.parse_inplace = regmap_parse_64_le_inplace;
1004                         break;
1005                 case REGMAP_ENDIAN_NATIVE:
1006                         map->format.format_val = regmap_format_64_native;
1007                         map->format.parse_val = regmap_parse_64_native;
1008                         break;
1009                 default:
1010                         goto err_hwlock;
1011                 }
1012                 break;
1013 #endif
1014         }
1015
1016         if (map->format.format_write) {
1017                 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1018                     (val_endian != REGMAP_ENDIAN_BIG))
1019                         goto err_hwlock;
1020                 map->use_single_write = true;
1021         }
1022
1023         if (!map->format.format_write &&
1024             !(map->format.format_reg && map->format.format_val))
1025                 goto err_hwlock;
1026
1027         map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1028         if (map->work_buf == NULL) {
1029                 ret = -ENOMEM;
1030                 goto err_hwlock;
1031         }
1032
1033         if (map->format.format_write) {
1034                 map->defer_caching = false;
1035                 map->reg_write = _regmap_bus_formatted_write;
1036         } else if (map->format.format_val) {
1037                 map->defer_caching = true;
1038                 map->reg_write = _regmap_bus_raw_write;
1039         }
1040
1041 skip_format_initialization:
1042
1043         map->range_tree = RB_ROOT;
1044         for (i = 0; i < config->num_ranges; i++) {
1045                 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1046                 struct regmap_range_node *new;
1047
1048                 /* Sanity check */
1049                 if (range_cfg->range_max < range_cfg->range_min) {
1050                         dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1051                                 range_cfg->range_max, range_cfg->range_min);
1052                         goto err_range;
1053                 }
1054
1055                 if (range_cfg->range_max > map->max_register) {
1056                         dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1057                                 range_cfg->range_max, map->max_register);
1058                         goto err_range;
1059                 }
1060
1061                 if (range_cfg->selector_reg > map->max_register) {
1062                         dev_err(map->dev,
1063                                 "Invalid range %d: selector out of map\n", i);
1064                         goto err_range;
1065                 }
1066
1067                 if (range_cfg->window_len == 0) {
1068                         dev_err(map->dev, "Invalid range %d: window_len 0\n",
1069                                 i);
1070                         goto err_range;
1071                 }
1072
1073                 /* Make sure, that this register range has no selector
1074                    or data window within its boundary */
1075                 for (j = 0; j < config->num_ranges; j++) {
1076                         unsigned sel_reg = config->ranges[j].selector_reg;
1077                         unsigned win_min = config->ranges[j].window_start;
1078                         unsigned win_max = win_min +
1079                                            config->ranges[j].window_len - 1;
1080
1081                         /* Allow data window inside its own virtual range */
1082                         if (j == i)
1083                                 continue;
1084
1085                         if (range_cfg->range_min <= sel_reg &&
1086                             sel_reg <= range_cfg->range_max) {
1087                                 dev_err(map->dev,
1088                                         "Range %d: selector for %d in window\n",
1089                                         i, j);
1090                                 goto err_range;
1091                         }
1092
1093                         if (!(win_max < range_cfg->range_min ||
1094                               win_min > range_cfg->range_max)) {
1095                                 dev_err(map->dev,
1096                                         "Range %d: window for %d in window\n",
1097                                         i, j);
1098                                 goto err_range;
1099                         }
1100                 }
1101
1102                 new = kzalloc(sizeof(*new), GFP_KERNEL);
1103                 if (new == NULL) {
1104                         ret = -ENOMEM;
1105                         goto err_range;
1106                 }
1107
1108                 new->map = map;
1109                 new->name = range_cfg->name;
1110                 new->range_min = range_cfg->range_min;
1111                 new->range_max = range_cfg->range_max;
1112                 new->selector_reg = range_cfg->selector_reg;
1113                 new->selector_mask = range_cfg->selector_mask;
1114                 new->selector_shift = range_cfg->selector_shift;
1115                 new->window_start = range_cfg->window_start;
1116                 new->window_len = range_cfg->window_len;
1117
1118                 if (!_regmap_range_add(map, new)) {
1119                         dev_err(map->dev, "Failed to add range %d\n", i);
1120                         kfree(new);
1121                         goto err_range;
1122                 }
1123
1124                 if (map->selector_work_buf == NULL) {
1125                         map->selector_work_buf =
1126                                 kzalloc(map->format.buf_size, GFP_KERNEL);
1127                         if (map->selector_work_buf == NULL) {
1128                                 ret = -ENOMEM;
1129                                 goto err_range;
1130                         }
1131                 }
1132         }
1133
1134         ret = regcache_init(map, config);
1135         if (ret != 0)
1136                 goto err_range;
1137
1138         if (dev) {
1139                 ret = regmap_attach_dev(dev, map, config);
1140                 if (ret != 0)
1141                         goto err_regcache;
1142         } else {
1143                 regmap_debugfs_init(map, config->name);
1144         }
1145
1146         return map;
1147
1148 err_regcache:
1149         regcache_exit(map);
1150 err_range:
1151         regmap_range_exit(map);
1152         kfree(map->work_buf);
1153 err_hwlock:
1154         if (map->hwlock)
1155                 hwspin_lock_free(map->hwlock);
1156 err_name:
1157         kfree_const(map->name);
1158 err_map:
1159         kfree(map);
1160 err:
1161         return ERR_PTR(ret);
1162 }
1163 EXPORT_SYMBOL_GPL(__regmap_init);
1164
1165 static void devm_regmap_release(struct device *dev, void *res)
1166 {
1167         regmap_exit(*(struct regmap **)res);
1168 }
1169
1170 struct regmap *__devm_regmap_init(struct device *dev,
1171                                   const struct regmap_bus *bus,
1172                                   void *bus_context,
1173                                   const struct regmap_config *config,
1174                                   struct lock_class_key *lock_key,
1175                                   const char *lock_name)
1176 {
1177         struct regmap **ptr, *regmap;
1178
1179         ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1180         if (!ptr)
1181                 return ERR_PTR(-ENOMEM);
1182
1183         regmap = __regmap_init(dev, bus, bus_context, config,
1184                                lock_key, lock_name);
1185         if (!IS_ERR(regmap)) {
1186                 *ptr = regmap;
1187                 devres_add(dev, ptr);
1188         } else {
1189                 devres_free(ptr);
1190         }
1191
1192         return regmap;
1193 }
1194 EXPORT_SYMBOL_GPL(__devm_regmap_init);
1195
1196 static void regmap_field_init(struct regmap_field *rm_field,
1197         struct regmap *regmap, struct reg_field reg_field)
1198 {
1199         rm_field->regmap = regmap;
1200         rm_field->reg = reg_field.reg;
1201         rm_field->shift = reg_field.lsb;
1202         rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1203         rm_field->id_size = reg_field.id_size;
1204         rm_field->id_offset = reg_field.id_offset;
1205 }
1206
1207 /**
1208  * devm_regmap_field_alloc() - Allocate and initialise a register field.
1209  *
1210  * @dev: Device that will be interacted with
1211  * @regmap: regmap bank in which this register field is located.
1212  * @reg_field: Register field with in the bank.
1213  *
1214  * The return value will be an ERR_PTR() on error or a valid pointer
1215  * to a struct regmap_field. The regmap_field will be automatically freed
1216  * by the device management code.
1217  */
1218 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1219                 struct regmap *regmap, struct reg_field reg_field)
1220 {
1221         struct regmap_field *rm_field = devm_kzalloc(dev,
1222                                         sizeof(*rm_field), GFP_KERNEL);
1223         if (!rm_field)
1224                 return ERR_PTR(-ENOMEM);
1225
1226         regmap_field_init(rm_field, regmap, reg_field);
1227
1228         return rm_field;
1229
1230 }
1231 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1232
1233 /**
1234  * devm_regmap_field_free() - Free a register field allocated using
1235  *                            devm_regmap_field_alloc.
1236  *
1237  * @dev: Device that will be interacted with
1238  * @field: regmap field which should be freed.
1239  *
1240  * Free register field allocated using devm_regmap_field_alloc(). Usually
1241  * drivers need not call this function, as the memory allocated via devm
1242  * will be freed as per device-driver life-cyle.
1243  */
1244 void devm_regmap_field_free(struct device *dev,
1245         struct regmap_field *field)
1246 {
1247         devm_kfree(dev, field);
1248 }
1249 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1250
1251 /**
1252  * regmap_field_alloc() - Allocate and initialise a register field.
1253  *
1254  * @regmap: regmap bank in which this register field is located.
1255  * @reg_field: Register field with in the bank.
1256  *
1257  * The return value will be an ERR_PTR() on error or a valid pointer
1258  * to a struct regmap_field. The regmap_field should be freed by the
1259  * user once its finished working with it using regmap_field_free().
1260  */
1261 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1262                 struct reg_field reg_field)
1263 {
1264         struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1265
1266         if (!rm_field)
1267                 return ERR_PTR(-ENOMEM);
1268
1269         regmap_field_init(rm_field, regmap, reg_field);
1270
1271         return rm_field;
1272 }
1273 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1274
1275 /**
1276  * regmap_field_free() - Free register field allocated using
1277  *                       regmap_field_alloc.
1278  *
1279  * @field: regmap field which should be freed.
1280  */
1281 void regmap_field_free(struct regmap_field *field)
1282 {
1283         kfree(field);
1284 }
1285 EXPORT_SYMBOL_GPL(regmap_field_free);
1286
1287 /**
1288  * regmap_reinit_cache() - Reinitialise the current register cache
1289  *
1290  * @map: Register map to operate on.
1291  * @config: New configuration.  Only the cache data will be used.
1292  *
1293  * Discard any existing register cache for the map and initialize a
1294  * new cache.  This can be used to restore the cache to defaults or to
1295  * update the cache configuration to reflect runtime discovery of the
1296  * hardware.
1297  *
1298  * No explicit locking is done here, the user needs to ensure that
1299  * this function will not race with other calls to regmap.
1300  */
1301 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1302 {
1303         regcache_exit(map);
1304         regmap_debugfs_exit(map);
1305
1306         map->max_register = config->max_register;
1307         map->writeable_reg = config->writeable_reg;
1308         map->readable_reg = config->readable_reg;
1309         map->volatile_reg = config->volatile_reg;
1310         map->precious_reg = config->precious_reg;
1311         map->readable_noinc_reg = config->readable_noinc_reg;
1312         map->cache_type = config->cache_type;
1313
1314         regmap_debugfs_init(map, config->name);
1315
1316         map->cache_bypass = false;
1317         map->cache_only = false;
1318
1319         return regcache_init(map, config);
1320 }
1321 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1322
1323 /**
1324  * regmap_exit() - Free a previously allocated register map
1325  *
1326  * @map: Register map to operate on.
1327  */
1328 void regmap_exit(struct regmap *map)
1329 {
1330         struct regmap_async *async;
1331
1332         regcache_exit(map);
1333         regmap_debugfs_exit(map);
1334         regmap_range_exit(map);
1335         if (map->bus && map->bus->free_context)
1336                 map->bus->free_context(map->bus_context);
1337         kfree(map->work_buf);
1338         while (!list_empty(&map->async_free)) {
1339                 async = list_first_entry_or_null(&map->async_free,
1340                                                  struct regmap_async,
1341                                                  list);
1342                 list_del(&async->list);
1343                 kfree(async->work_buf);
1344                 kfree(async);
1345         }
1346         if (map->hwlock)
1347                 hwspin_lock_free(map->hwlock);
1348         kfree_const(map->name);
1349         kfree(map);
1350 }
1351 EXPORT_SYMBOL_GPL(regmap_exit);
1352
1353 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1354 {
1355         struct regmap **r = res;
1356         if (!r || !*r) {
1357                 WARN_ON(!r || !*r);
1358                 return 0;
1359         }
1360
1361         /* If the user didn't specify a name match any */
1362         if (data)
1363                 return (*r)->name == data;
1364         else
1365                 return 1;
1366 }
1367
1368 /**
1369  * dev_get_regmap() - Obtain the regmap (if any) for a device
1370  *
1371  * @dev: Device to retrieve the map for
1372  * @name: Optional name for the register map, usually NULL.
1373  *
1374  * Returns the regmap for the device if one is present, or NULL.  If
1375  * name is specified then it must match the name specified when
1376  * registering the device, if it is NULL then the first regmap found
1377  * will be used.  Devices with multiple register maps are very rare,
1378  * generic code should normally not need to specify a name.
1379  */
1380 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1381 {
1382         struct regmap **r = devres_find(dev, dev_get_regmap_release,
1383                                         dev_get_regmap_match, (void *)name);
1384
1385         if (!r)
1386                 return NULL;
1387         return *r;
1388 }
1389 EXPORT_SYMBOL_GPL(dev_get_regmap);
1390
1391 /**
1392  * regmap_get_device() - Obtain the device from a regmap
1393  *
1394  * @map: Register map to operate on.
1395  *
1396  * Returns the underlying device that the regmap has been created for.
1397  */
1398 struct device *regmap_get_device(struct regmap *map)
1399 {
1400         return map->dev;
1401 }
1402 EXPORT_SYMBOL_GPL(regmap_get_device);
1403
1404 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1405                                struct regmap_range_node *range,
1406                                unsigned int val_num)
1407 {
1408         void *orig_work_buf;
1409         unsigned int win_offset;
1410         unsigned int win_page;
1411         bool page_chg;
1412         int ret;
1413
1414         win_offset = (*reg - range->range_min) % range->window_len;
1415         win_page = (*reg - range->range_min) / range->window_len;
1416
1417         if (val_num > 1) {
1418                 /* Bulk write shouldn't cross range boundary */
1419                 if (*reg + val_num - 1 > range->range_max)
1420                         return -EINVAL;
1421
1422                 /* ... or single page boundary */
1423                 if (val_num > range->window_len - win_offset)
1424                         return -EINVAL;
1425         }
1426
1427         /* It is possible to have selector register inside data window.
1428            In that case, selector register is located on every page and
1429            it needs no page switching, when accessed alone. */
1430         if (val_num > 1 ||
1431             range->window_start + win_offset != range->selector_reg) {
1432                 /* Use separate work_buf during page switching */
1433                 orig_work_buf = map->work_buf;
1434                 map->work_buf = map->selector_work_buf;
1435
1436                 ret = _regmap_update_bits(map, range->selector_reg,
1437                                           range->selector_mask,
1438                                           win_page << range->selector_shift,
1439                                           &page_chg, false);
1440
1441                 map->work_buf = orig_work_buf;
1442
1443                 if (ret != 0)
1444                         return ret;
1445         }
1446
1447         *reg = range->window_start + win_offset;
1448
1449         return 0;
1450 }
1451
1452 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1453                                           unsigned long mask)
1454 {
1455         u8 *buf;
1456         int i;
1457
1458         if (!mask || !map->work_buf)
1459                 return;
1460
1461         buf = map->work_buf;
1462
1463         for (i = 0; i < max_bytes; i++)
1464                 buf[i] |= (mask >> (8 * i)) & 0xff;
1465 }
1466
1467 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1468                                   const void *val, size_t val_len)
1469 {
1470         struct regmap_range_node *range;
1471         unsigned long flags;
1472         void *work_val = map->work_buf + map->format.reg_bytes +
1473                 map->format.pad_bytes;
1474         void *buf;
1475         int ret = -ENOTSUPP;
1476         size_t len;
1477         int i;
1478
1479         WARN_ON(!map->bus);
1480
1481         /* Check for unwritable registers before we start */
1482         if (map->writeable_reg)
1483                 for (i = 0; i < val_len / map->format.val_bytes; i++)
1484                         if (!map->writeable_reg(map->dev,
1485                                                reg + regmap_get_offset(map, i)))
1486                                 return -EINVAL;
1487
1488         if (!map->cache_bypass && map->format.parse_val) {
1489                 unsigned int ival;
1490                 int val_bytes = map->format.val_bytes;
1491                 for (i = 0; i < val_len / val_bytes; i++) {
1492                         ival = map->format.parse_val(val + (i * val_bytes));
1493                         ret = regcache_write(map,
1494                                              reg + regmap_get_offset(map, i),
1495                                              ival);
1496                         if (ret) {
1497                                 dev_err(map->dev,
1498                                         "Error in caching of register: %x ret: %d\n",
1499                                         reg + i, ret);
1500                                 return ret;
1501                         }
1502                 }
1503                 if (map->cache_only) {
1504                         map->cache_dirty = true;
1505                         return 0;
1506                 }
1507         }
1508
1509         range = _regmap_range_lookup(map, reg);
1510         if (range) {
1511                 int val_num = val_len / map->format.val_bytes;
1512                 int win_offset = (reg - range->range_min) % range->window_len;
1513                 int win_residue = range->window_len - win_offset;
1514
1515                 /* If the write goes beyond the end of the window split it */
1516                 while (val_num > win_residue) {
1517                         dev_dbg(map->dev, "Writing window %d/%zu\n",
1518                                 win_residue, val_len / map->format.val_bytes);
1519                         ret = _regmap_raw_write_impl(map, reg, val,
1520                                                      win_residue *
1521                                                      map->format.val_bytes);
1522                         if (ret != 0)
1523                                 return ret;
1524
1525                         reg += win_residue;
1526                         val_num -= win_residue;
1527                         val += win_residue * map->format.val_bytes;
1528                         val_len -= win_residue * map->format.val_bytes;
1529
1530                         win_offset = (reg - range->range_min) %
1531                                 range->window_len;
1532                         win_residue = range->window_len - win_offset;
1533                 }
1534
1535                 ret = _regmap_select_page(map, &reg, range, val_num);
1536                 if (ret != 0)
1537                         return ret;
1538         }
1539
1540         map->format.format_reg(map->work_buf, reg, map->reg_shift);
1541         regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1542                                       map->write_flag_mask);
1543
1544         /*
1545          * Essentially all I/O mechanisms will be faster with a single
1546          * buffer to write.  Since register syncs often generate raw
1547          * writes of single registers optimise that case.
1548          */
1549         if (val != work_val && val_len == map->format.val_bytes) {
1550                 memcpy(work_val, val, map->format.val_bytes);
1551                 val = work_val;
1552         }
1553
1554         if (map->async && map->bus->async_write) {
1555                 struct regmap_async *async;
1556
1557                 trace_regmap_async_write_start(map, reg, val_len);
1558
1559                 spin_lock_irqsave(&map->async_lock, flags);
1560                 async = list_first_entry_or_null(&map->async_free,
1561                                                  struct regmap_async,
1562                                                  list);
1563                 if (async)
1564                         list_del(&async->list);
1565                 spin_unlock_irqrestore(&map->async_lock, flags);
1566
1567                 if (!async) {
1568                         async = map->bus->async_alloc();
1569                         if (!async)
1570                                 return -ENOMEM;
1571
1572                         async->work_buf = kzalloc(map->format.buf_size,
1573                                                   GFP_KERNEL | GFP_DMA);
1574                         if (!async->work_buf) {
1575                                 kfree(async);
1576                                 return -ENOMEM;
1577                         }
1578                 }
1579
1580                 async->map = map;
1581
1582                 /* If the caller supplied the value we can use it safely. */
1583                 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1584                        map->format.reg_bytes + map->format.val_bytes);
1585
1586                 spin_lock_irqsave(&map->async_lock, flags);
1587                 list_add_tail(&async->list, &map->async_list);
1588                 spin_unlock_irqrestore(&map->async_lock, flags);
1589
1590                 if (val != work_val)
1591                         ret = map->bus->async_write(map->bus_context,
1592                                                     async->work_buf,
1593                                                     map->format.reg_bytes +
1594                                                     map->format.pad_bytes,
1595                                                     val, val_len, async);
1596                 else
1597                         ret = map->bus->async_write(map->bus_context,
1598                                                     async->work_buf,
1599                                                     map->format.reg_bytes +
1600                                                     map->format.pad_bytes +
1601                                                     val_len, NULL, 0, async);
1602
1603                 if (ret != 0) {
1604                         dev_err(map->dev, "Failed to schedule write: %d\n",
1605                                 ret);
1606
1607                         spin_lock_irqsave(&map->async_lock, flags);
1608                         list_move(&async->list, &map->async_free);
1609                         spin_unlock_irqrestore(&map->async_lock, flags);
1610                 }
1611
1612                 return ret;
1613         }
1614
1615         trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1616
1617         /* If we're doing a single register write we can probably just
1618          * send the work_buf directly, otherwise try to do a gather
1619          * write.
1620          */
1621         if (val == work_val)
1622                 ret = map->bus->write(map->bus_context, map->work_buf,
1623                                       map->format.reg_bytes +
1624                                       map->format.pad_bytes +
1625                                       val_len);
1626         else if (map->bus->gather_write)
1627                 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1628                                              map->format.reg_bytes +
1629                                              map->format.pad_bytes,
1630                                              val, val_len);
1631
1632         /* If that didn't work fall back on linearising by hand. */
1633         if (ret == -ENOTSUPP) {
1634                 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1635                 buf = kzalloc(len, GFP_KERNEL);
1636                 if (!buf)
1637                         return -ENOMEM;
1638
1639                 memcpy(buf, map->work_buf, map->format.reg_bytes);
1640                 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1641                        val, val_len);
1642                 ret = map->bus->write(map->bus_context, buf, len);
1643
1644                 kfree(buf);
1645         } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1646                 /* regcache_drop_region() takes lock that we already have,
1647                  * thus call map->cache_ops->drop() directly
1648                  */
1649                 if (map->cache_ops && map->cache_ops->drop)
1650                         map->cache_ops->drop(map, reg, reg + 1);
1651         }
1652
1653         trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1654
1655         return ret;
1656 }
1657
1658 /**
1659  * regmap_can_raw_write - Test if regmap_raw_write() is supported
1660  *
1661  * @map: Map to check.
1662  */
1663 bool regmap_can_raw_write(struct regmap *map)
1664 {
1665         return map->bus && map->bus->write && map->format.format_val &&
1666                 map->format.format_reg;
1667 }
1668 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1669
1670 /**
1671  * regmap_get_raw_read_max - Get the maximum size we can read
1672  *
1673  * @map: Map to check.
1674  */
1675 size_t regmap_get_raw_read_max(struct regmap *map)
1676 {
1677         return map->max_raw_read;
1678 }
1679 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1680
1681 /**
1682  * regmap_get_raw_write_max - Get the maximum size we can read
1683  *
1684  * @map: Map to check.
1685  */
1686 size_t regmap_get_raw_write_max(struct regmap *map)
1687 {
1688         return map->max_raw_write;
1689 }
1690 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1691
1692 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1693                                        unsigned int val)
1694 {
1695         int ret;
1696         struct regmap_range_node *range;
1697         struct regmap *map = context;
1698
1699         WARN_ON(!map->bus || !map->format.format_write);
1700
1701         range = _regmap_range_lookup(map, reg);
1702         if (range) {
1703                 ret = _regmap_select_page(map, &reg, range, 1);
1704                 if (ret != 0)
1705                         return ret;
1706         }
1707
1708         map->format.format_write(map, reg, val);
1709
1710         trace_regmap_hw_write_start(map, reg, 1);
1711
1712         ret = map->bus->write(map->bus_context, map->work_buf,
1713                               map->format.buf_size);
1714
1715         trace_regmap_hw_write_done(map, reg, 1);
1716
1717         return ret;
1718 }
1719
1720 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1721                                  unsigned int val)
1722 {
1723         struct regmap *map = context;
1724
1725         return map->bus->reg_write(map->bus_context, reg, val);
1726 }
1727
1728 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1729                                  unsigned int val)
1730 {
1731         struct regmap *map = context;
1732
1733         WARN_ON(!map->bus || !map->format.format_val);
1734
1735         map->format.format_val(map->work_buf + map->format.reg_bytes
1736                                + map->format.pad_bytes, val, 0);
1737         return _regmap_raw_write_impl(map, reg,
1738                                       map->work_buf +
1739                                       map->format.reg_bytes +
1740                                       map->format.pad_bytes,
1741                                       map->format.val_bytes);
1742 }
1743
1744 static inline void *_regmap_map_get_context(struct regmap *map)
1745 {
1746         return (map->bus) ? map : map->bus_context;
1747 }
1748
1749 int _regmap_write(struct regmap *map, unsigned int reg,
1750                   unsigned int val)
1751 {
1752         int ret;
1753         void *context = _regmap_map_get_context(map);
1754
1755         if (!regmap_writeable(map, reg))
1756                 return -EIO;
1757
1758         if (!map->cache_bypass && !map->defer_caching) {
1759                 ret = regcache_write(map, reg, val);
1760                 if (ret != 0)
1761                         return ret;
1762                 if (map->cache_only) {
1763                         map->cache_dirty = true;
1764                         return 0;
1765                 }
1766         }
1767
1768         if (regmap_should_log(map))
1769                 dev_info(map->dev, "%x <= %x\n", reg, val);
1770
1771         trace_regmap_reg_write(map, reg, val);
1772
1773         return map->reg_write(context, reg, val);
1774 }
1775
1776 /**
1777  * regmap_write() - Write a value to a single register
1778  *
1779  * @map: Register map to write to
1780  * @reg: Register to write to
1781  * @val: Value to be written
1782  *
1783  * A value of zero will be returned on success, a negative errno will
1784  * be returned in error cases.
1785  */
1786 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1787 {
1788         int ret;
1789
1790         if (!IS_ALIGNED(reg, map->reg_stride))
1791                 return -EINVAL;
1792
1793         map->lock(map->lock_arg);
1794
1795         ret = _regmap_write(map, reg, val);
1796
1797         map->unlock(map->lock_arg);
1798
1799         return ret;
1800 }
1801 EXPORT_SYMBOL_GPL(regmap_write);
1802
1803 /**
1804  * regmap_write_async() - Write a value to a single register asynchronously
1805  *
1806  * @map: Register map to write to
1807  * @reg: Register to write to
1808  * @val: Value to be written
1809  *
1810  * A value of zero will be returned on success, a negative errno will
1811  * be returned in error cases.
1812  */
1813 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1814 {
1815         int ret;
1816
1817         if (!IS_ALIGNED(reg, map->reg_stride))
1818                 return -EINVAL;
1819
1820         map->lock(map->lock_arg);
1821
1822         map->async = true;
1823
1824         ret = _regmap_write(map, reg, val);
1825
1826         map->async = false;
1827
1828         map->unlock(map->lock_arg);
1829
1830         return ret;
1831 }
1832 EXPORT_SYMBOL_GPL(regmap_write_async);
1833
1834 int _regmap_raw_write(struct regmap *map, unsigned int reg,
1835                       const void *val, size_t val_len)
1836 {
1837         size_t val_bytes = map->format.val_bytes;
1838         size_t val_count = val_len / val_bytes;
1839         size_t chunk_count, chunk_bytes;
1840         size_t chunk_regs = val_count;
1841         int ret, i;
1842
1843         if (!val_count)
1844                 return -EINVAL;
1845
1846         if (map->use_single_write)
1847                 chunk_regs = 1;
1848         else if (map->max_raw_write && val_len > map->max_raw_write)
1849                 chunk_regs = map->max_raw_write / val_bytes;
1850
1851         chunk_count = val_count / chunk_regs;
1852         chunk_bytes = chunk_regs * val_bytes;
1853
1854         /* Write as many bytes as possible with chunk_size */
1855         for (i = 0; i < chunk_count; i++) {
1856                 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
1857                 if (ret)
1858                         return ret;
1859
1860                 reg += regmap_get_offset(map, chunk_regs);
1861                 val += chunk_bytes;
1862                 val_len -= chunk_bytes;
1863         }
1864
1865         /* Write remaining bytes */
1866         if (val_len)
1867                 ret = _regmap_raw_write_impl(map, reg, val, val_len);
1868
1869         return ret;
1870 }
1871
1872 /**
1873  * regmap_raw_write() - Write raw values to one or more registers
1874  *
1875  * @map: Register map to write to
1876  * @reg: Initial register to write to
1877  * @val: Block of data to be written, laid out for direct transmission to the
1878  *       device
1879  * @val_len: Length of data pointed to by val.
1880  *
1881  * This function is intended to be used for things like firmware
1882  * download where a large block of data needs to be transferred to the
1883  * device.  No formatting will be done on the data provided.
1884  *
1885  * A value of zero will be returned on success, a negative errno will
1886  * be returned in error cases.
1887  */
1888 int regmap_raw_write(struct regmap *map, unsigned int reg,
1889                      const void *val, size_t val_len)
1890 {
1891         int ret;
1892
1893         if (!regmap_can_raw_write(map))
1894                 return -EINVAL;
1895         if (val_len % map->format.val_bytes)
1896                 return -EINVAL;
1897
1898         map->lock(map->lock_arg);
1899
1900         ret = _regmap_raw_write(map, reg, val, val_len);
1901
1902         map->unlock(map->lock_arg);
1903
1904         return ret;
1905 }
1906 EXPORT_SYMBOL_GPL(regmap_raw_write);
1907
1908 /**
1909  * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
1910  *                                   register field.
1911  *
1912  * @field: Register field to write to
1913  * @mask: Bitmask to change
1914  * @val: Value to be written
1915  * @change: Boolean indicating if a write was done
1916  * @async: Boolean indicating asynchronously
1917  * @force: Boolean indicating use force update
1918  *
1919  * Perform a read/modify/write cycle on the register field with change,
1920  * async, force option.
1921  *
1922  * A value of zero will be returned on success, a negative errno will
1923  * be returned in error cases.
1924  */
1925 int regmap_field_update_bits_base(struct regmap_field *field,
1926                                   unsigned int mask, unsigned int val,
1927                                   bool *change, bool async, bool force)
1928 {
1929         mask = (mask << field->shift) & field->mask;
1930
1931         return regmap_update_bits_base(field->regmap, field->reg,
1932                                        mask, val << field->shift,
1933                                        change, async, force);
1934 }
1935 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
1936
1937 /**
1938  * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
1939  *                                    register field with port ID
1940  *
1941  * @field: Register field to write to
1942  * @id: port ID
1943  * @mask: Bitmask to change
1944  * @val: Value to be written
1945  * @change: Boolean indicating if a write was done
1946  * @async: Boolean indicating asynchronously
1947  * @force: Boolean indicating use force update
1948  *
1949  * A value of zero will be returned on success, a negative errno will
1950  * be returned in error cases.
1951  */
1952 int regmap_fields_update_bits_base(struct regmap_field *field,  unsigned int id,
1953                                    unsigned int mask, unsigned int val,
1954                                    bool *change, bool async, bool force)
1955 {
1956         if (id >= field->id_size)
1957                 return -EINVAL;
1958
1959         mask = (mask << field->shift) & field->mask;
1960
1961         return regmap_update_bits_base(field->regmap,
1962                                        field->reg + (field->id_offset * id),
1963                                        mask, val << field->shift,
1964                                        change, async, force);
1965 }
1966 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
1967
1968 /**
1969  * regmap_bulk_write() - Write multiple registers to the device
1970  *
1971  * @map: Register map to write to
1972  * @reg: First register to be write from
1973  * @val: Block of data to be written, in native register size for device
1974  * @val_count: Number of registers to write
1975  *
1976  * This function is intended to be used for writing a large block of
1977  * data to the device either in single transfer or multiple transfer.
1978  *
1979  * A value of zero will be returned on success, a negative errno will
1980  * be returned in error cases.
1981  */
1982 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1983                      size_t val_count)
1984 {
1985         int ret = 0, i;
1986         size_t val_bytes = map->format.val_bytes;
1987
1988         if (!IS_ALIGNED(reg, map->reg_stride))
1989                 return -EINVAL;
1990
1991         /*
1992          * Some devices don't support bulk write, for them we have a series of
1993          * single write operations.
1994          */
1995         if (!map->bus || !map->format.parse_inplace) {
1996                 map->lock(map->lock_arg);
1997                 for (i = 0; i < val_count; i++) {
1998                         unsigned int ival;
1999
2000                         switch (val_bytes) {
2001                         case 1:
2002                                 ival = *(u8 *)(val + (i * val_bytes));
2003                                 break;
2004                         case 2:
2005                                 ival = *(u16 *)(val + (i * val_bytes));
2006                                 break;
2007                         case 4:
2008                                 ival = *(u32 *)(val + (i * val_bytes));
2009                                 break;
2010 #ifdef CONFIG_64BIT
2011                         case 8:
2012                                 ival = *(u64 *)(val + (i * val_bytes));
2013                                 break;
2014 #endif
2015                         default:
2016                                 ret = -EINVAL;
2017                                 goto out;
2018                         }
2019
2020                         ret = _regmap_write(map,
2021                                             reg + regmap_get_offset(map, i),
2022                                             ival);
2023                         if (ret != 0)
2024                                 goto out;
2025                 }
2026 out:
2027                 map->unlock(map->lock_arg);
2028         } else {
2029                 void *wval;
2030
2031                 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2032                 if (!wval)
2033                         return -ENOMEM;
2034
2035                 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2036                         map->format.parse_inplace(wval + i);
2037
2038                 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2039
2040                 kfree(wval);
2041         }
2042         return ret;
2043 }
2044 EXPORT_SYMBOL_GPL(regmap_bulk_write);
2045
2046 /*
2047  * _regmap_raw_multi_reg_write()
2048  *
2049  * the (register,newvalue) pairs in regs have not been formatted, but
2050  * they are all in the same page and have been changed to being page
2051  * relative. The page register has been written if that was necessary.
2052  */
2053 static int _regmap_raw_multi_reg_write(struct regmap *map,
2054                                        const struct reg_sequence *regs,
2055                                        size_t num_regs)
2056 {
2057         int ret;
2058         void *buf;
2059         int i;
2060         u8 *u8;
2061         size_t val_bytes = map->format.val_bytes;
2062         size_t reg_bytes = map->format.reg_bytes;
2063         size_t pad_bytes = map->format.pad_bytes;
2064         size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2065         size_t len = pair_size * num_regs;
2066
2067         if (!len)
2068                 return -EINVAL;
2069
2070         buf = kzalloc(len, GFP_KERNEL);
2071         if (!buf)
2072                 return -ENOMEM;
2073
2074         /* We have to linearise by hand. */
2075
2076         u8 = buf;
2077
2078         for (i = 0; i < num_regs; i++) {
2079                 unsigned int reg = regs[i].reg;
2080                 unsigned int val = regs[i].def;
2081                 trace_regmap_hw_write_start(map, reg, 1);
2082                 map->format.format_reg(u8, reg, map->reg_shift);
2083                 u8 += reg_bytes + pad_bytes;
2084                 map->format.format_val(u8, val, 0);
2085                 u8 += val_bytes;
2086         }
2087         u8 = buf;
2088         *u8 |= map->write_flag_mask;
2089
2090         ret = map->bus->write(map->bus_context, buf, len);
2091
2092         kfree(buf);
2093
2094         for (i = 0; i < num_regs; i++) {
2095                 int reg = regs[i].reg;
2096                 trace_regmap_hw_write_done(map, reg, 1);
2097         }
2098         return ret;
2099 }
2100
2101 static unsigned int _regmap_register_page(struct regmap *map,
2102                                           unsigned int reg,
2103                                           struct regmap_range_node *range)
2104 {
2105         unsigned int win_page = (reg - range->range_min) / range->window_len;
2106
2107         return win_page;
2108 }
2109
2110 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2111                                                struct reg_sequence *regs,
2112                                                size_t num_regs)
2113 {
2114         int ret;
2115         int i, n;
2116         struct reg_sequence *base;
2117         unsigned int this_page = 0;
2118         unsigned int page_change = 0;
2119         /*
2120          * the set of registers are not neccessarily in order, but
2121          * since the order of write must be preserved this algorithm
2122          * chops the set each time the page changes. This also applies
2123          * if there is a delay required at any point in the sequence.
2124          */
2125         base = regs;
2126         for (i = 0, n = 0; i < num_regs; i++, n++) {
2127                 unsigned int reg = regs[i].reg;
2128                 struct regmap_range_node *range;
2129
2130                 range = _regmap_range_lookup(map, reg);
2131                 if (range) {
2132                         unsigned int win_page = _regmap_register_page(map, reg,
2133                                                                       range);
2134
2135                         if (i == 0)
2136                                 this_page = win_page;
2137                         if (win_page != this_page) {
2138                                 this_page = win_page;
2139                                 page_change = 1;
2140                         }
2141                 }
2142
2143                 /* If we have both a page change and a delay make sure to
2144                  * write the regs and apply the delay before we change the
2145                  * page.
2146                  */
2147
2148                 if (page_change || regs[i].delay_us) {
2149
2150                                 /* For situations where the first write requires
2151                                  * a delay we need to make sure we don't call
2152                                  * raw_multi_reg_write with n=0
2153                                  * This can't occur with page breaks as we
2154                                  * never write on the first iteration
2155                                  */
2156                                 if (regs[i].delay_us && i == 0)
2157                                         n = 1;
2158
2159                                 ret = _regmap_raw_multi_reg_write(map, base, n);
2160                                 if (ret != 0)
2161                                         return ret;
2162
2163                                 if (regs[i].delay_us)
2164                                         udelay(regs[i].delay_us);
2165
2166                                 base += n;
2167                                 n = 0;
2168
2169                                 if (page_change) {
2170                                         ret = _regmap_select_page(map,
2171                                                                   &base[n].reg,
2172                                                                   range, 1);
2173                                         if (ret != 0)
2174                                                 return ret;
2175
2176                                         page_change = 0;
2177                                 }
2178
2179                 }
2180
2181         }
2182         if (n > 0)
2183                 return _regmap_raw_multi_reg_write(map, base, n);
2184         return 0;
2185 }
2186
2187 static int _regmap_multi_reg_write(struct regmap *map,
2188                                    const struct reg_sequence *regs,
2189                                    size_t num_regs)
2190 {
2191         int i;
2192         int ret;
2193
2194         if (!map->can_multi_write) {
2195                 for (i = 0; i < num_regs; i++) {
2196                         ret = _regmap_write(map, regs[i].reg, regs[i].def);
2197                         if (ret != 0)
2198                                 return ret;
2199
2200                         if (regs[i].delay_us)
2201                                 udelay(regs[i].delay_us);
2202                 }
2203                 return 0;
2204         }
2205
2206         if (!map->format.parse_inplace)
2207                 return -EINVAL;
2208
2209         if (map->writeable_reg)
2210                 for (i = 0; i < num_regs; i++) {
2211                         int reg = regs[i].reg;
2212                         if (!map->writeable_reg(map->dev, reg))
2213                                 return -EINVAL;
2214                         if (!IS_ALIGNED(reg, map->reg_stride))
2215                                 return -EINVAL;
2216                 }
2217
2218         if (!map->cache_bypass) {
2219                 for (i = 0; i < num_regs; i++) {
2220                         unsigned int val = regs[i].def;
2221                         unsigned int reg = regs[i].reg;
2222                         ret = regcache_write(map, reg, val);
2223                         if (ret) {
2224                                 dev_err(map->dev,
2225                                 "Error in caching of register: %x ret: %d\n",
2226                                                                 reg, ret);
2227                                 return ret;
2228                         }
2229                 }
2230                 if (map->cache_only) {
2231                         map->cache_dirty = true;
2232                         return 0;
2233                 }
2234         }
2235
2236         WARN_ON(!map->bus);
2237
2238         for (i = 0; i < num_regs; i++) {
2239                 unsigned int reg = regs[i].reg;
2240                 struct regmap_range_node *range;
2241
2242                 /* Coalesce all the writes between a page break or a delay
2243                  * in a sequence
2244                  */
2245                 range = _regmap_range_lookup(map, reg);
2246                 if (range || regs[i].delay_us) {
2247                         size_t len = sizeof(struct reg_sequence)*num_regs;
2248                         struct reg_sequence *base = kmemdup(regs, len,
2249                                                            GFP_KERNEL);
2250                         if (!base)
2251                                 return -ENOMEM;
2252                         ret = _regmap_range_multi_paged_reg_write(map, base,
2253                                                                   num_regs);
2254                         kfree(base);
2255
2256                         return ret;
2257                 }
2258         }
2259         return _regmap_raw_multi_reg_write(map, regs, num_regs);
2260 }
2261
2262 /**
2263  * regmap_multi_reg_write() - Write multiple registers to the device
2264  *
2265  * @map: Register map to write to
2266  * @regs: Array of structures containing register,value to be written
2267  * @num_regs: Number of registers to write
2268  *
2269  * Write multiple registers to the device where the set of register, value
2270  * pairs are supplied in any order, possibly not all in a single range.
2271  *
2272  * The 'normal' block write mode will send ultimately send data on the
2273  * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2274  * addressed. However, this alternative block multi write mode will send
2275  * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2276  * must of course support the mode.
2277  *
2278  * A value of zero will be returned on success, a negative errno will be
2279  * returned in error cases.
2280  */
2281 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2282                            int num_regs)
2283 {
2284         int ret;
2285
2286         map->lock(map->lock_arg);
2287
2288         ret = _regmap_multi_reg_write(map, regs, num_regs);
2289
2290         map->unlock(map->lock_arg);
2291
2292         return ret;
2293 }
2294 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2295
2296 /**
2297  * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2298  *                                     device but not the cache
2299  *
2300  * @map: Register map to write to
2301  * @regs: Array of structures containing register,value to be written
2302  * @num_regs: Number of registers to write
2303  *
2304  * Write multiple registers to the device but not the cache where the set
2305  * of register are supplied in any order.
2306  *
2307  * This function is intended to be used for writing a large block of data
2308  * atomically to the device in single transfer for those I2C client devices
2309  * that implement this alternative block write mode.
2310  *
2311  * A value of zero will be returned on success, a negative errno will
2312  * be returned in error cases.
2313  */
2314 int regmap_multi_reg_write_bypassed(struct regmap *map,
2315                                     const struct reg_sequence *regs,
2316                                     int num_regs)
2317 {
2318         int ret;
2319         bool bypass;
2320
2321         map->lock(map->lock_arg);
2322
2323         bypass = map->cache_bypass;
2324         map->cache_bypass = true;
2325
2326         ret = _regmap_multi_reg_write(map, regs, num_regs);
2327
2328         map->cache_bypass = bypass;
2329
2330         map->unlock(map->lock_arg);
2331
2332         return ret;
2333 }
2334 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2335
2336 /**
2337  * regmap_raw_write_async() - Write raw values to one or more registers
2338  *                            asynchronously
2339  *
2340  * @map: Register map to write to
2341  * @reg: Initial register to write to
2342  * @val: Block of data to be written, laid out for direct transmission to the
2343  *       device.  Must be valid until regmap_async_complete() is called.
2344  * @val_len: Length of data pointed to by val.
2345  *
2346  * This function is intended to be used for things like firmware
2347  * download where a large block of data needs to be transferred to the
2348  * device.  No formatting will be done on the data provided.
2349  *
2350  * If supported by the underlying bus the write will be scheduled
2351  * asynchronously, helping maximise I/O speed on higher speed buses
2352  * like SPI.  regmap_async_complete() can be called to ensure that all
2353  * asynchrnous writes have been completed.
2354  *
2355  * A value of zero will be returned on success, a negative errno will
2356  * be returned in error cases.
2357  */
2358 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2359                            const void *val, size_t val_len)
2360 {
2361         int ret;
2362
2363         if (val_len % map->format.val_bytes)
2364                 return -EINVAL;
2365         if (!IS_ALIGNED(reg, map->reg_stride))
2366                 return -EINVAL;
2367
2368         map->lock(map->lock_arg);
2369
2370         map->async = true;
2371
2372         ret = _regmap_raw_write(map, reg, val, val_len);
2373
2374         map->async = false;
2375
2376         map->unlock(map->lock_arg);
2377
2378         return ret;
2379 }
2380 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2381
2382 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2383                             unsigned int val_len)
2384 {
2385         struct regmap_range_node *range;
2386         int ret;
2387
2388         WARN_ON(!map->bus);
2389
2390         if (!map->bus || !map->bus->read)
2391                 return -EINVAL;
2392
2393         range = _regmap_range_lookup(map, reg);
2394         if (range) {
2395                 ret = _regmap_select_page(map, &reg, range,
2396                                           val_len / map->format.val_bytes);
2397                 if (ret != 0)
2398                         return ret;
2399         }
2400
2401         map->format.format_reg(map->work_buf, reg, map->reg_shift);
2402         regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2403                                       map->read_flag_mask);
2404         trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2405
2406         ret = map->bus->read(map->bus_context, map->work_buf,
2407                              map->format.reg_bytes + map->format.pad_bytes,
2408                              val, val_len);
2409
2410         trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2411
2412         return ret;
2413 }
2414
2415 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2416                                 unsigned int *val)
2417 {
2418         struct regmap *map = context;
2419
2420         return map->bus->reg_read(map->bus_context, reg, val);
2421 }
2422
2423 static int _regmap_bus_read(void *context, unsigned int reg,
2424                             unsigned int *val)
2425 {
2426         int ret;
2427         struct regmap *map = context;
2428         void *work_val = map->work_buf + map->format.reg_bytes +
2429                 map->format.pad_bytes;
2430
2431         if (!map->format.parse_val)
2432                 return -EINVAL;
2433
2434         ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
2435         if (ret == 0)
2436                 *val = map->format.parse_val(work_val);
2437
2438         return ret;
2439 }
2440
2441 static int _regmap_read(struct regmap *map, unsigned int reg,
2442                         unsigned int *val)
2443 {
2444         int ret;
2445         void *context = _regmap_map_get_context(map);
2446
2447         if (!map->cache_bypass) {
2448                 ret = regcache_read(map, reg, val);
2449                 if (ret == 0)
2450                         return 0;
2451         }
2452
2453         if (map->cache_only)
2454                 return -EBUSY;
2455
2456         if (!regmap_readable(map, reg))
2457                 return -EIO;
2458
2459         ret = map->reg_read(context, reg, val);
2460         if (ret == 0) {
2461                 if (regmap_should_log(map))
2462                         dev_info(map->dev, "%x => %x\n", reg, *val);
2463
2464                 trace_regmap_reg_read(map, reg, *val);
2465
2466                 if (!map->cache_bypass)
2467                         regcache_write(map, reg, *val);
2468         }
2469
2470         return ret;
2471 }
2472
2473 /**
2474  * regmap_read() - Read a value from a single register
2475  *
2476  * @map: Register map to read from
2477  * @reg: Register to be read from
2478  * @val: Pointer to store read value
2479  *
2480  * A value of zero will be returned on success, a negative errno will
2481  * be returned in error cases.
2482  */
2483 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2484 {
2485         int ret;
2486
2487         if (!IS_ALIGNED(reg, map->reg_stride))
2488                 return -EINVAL;
2489
2490         map->lock(map->lock_arg);
2491
2492         ret = _regmap_read(map, reg, val);
2493
2494         map->unlock(map->lock_arg);
2495
2496         return ret;
2497 }
2498 EXPORT_SYMBOL_GPL(regmap_read);
2499
2500 /**
2501  * regmap_raw_read() - Read raw data from the device
2502  *
2503  * @map: Register map to read from
2504  * @reg: First register to be read from
2505  * @val: Pointer to store read value
2506  * @val_len: Size of data to read
2507  *
2508  * A value of zero will be returned on success, a negative errno will
2509  * be returned in error cases.
2510  */
2511 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2512                     size_t val_len)
2513 {
2514         size_t val_bytes = map->format.val_bytes;
2515         size_t val_count = val_len / val_bytes;
2516         unsigned int v;
2517         int ret, i;
2518
2519         if (!map->bus)
2520                 return -EINVAL;
2521         if (val_len % map->format.val_bytes)
2522                 return -EINVAL;
2523         if (!IS_ALIGNED(reg, map->reg_stride))
2524                 return -EINVAL;
2525         if (val_count == 0)
2526                 return -EINVAL;
2527
2528         map->lock(map->lock_arg);
2529
2530         if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2531             map->cache_type == REGCACHE_NONE) {
2532                 size_t chunk_count, chunk_bytes;
2533                 size_t chunk_regs = val_count;
2534
2535                 if (!map->bus->read) {
2536                         ret = -ENOTSUPP;
2537                         goto out;
2538                 }
2539
2540                 if (map->use_single_read)
2541                         chunk_regs = 1;
2542                 else if (map->max_raw_read && val_len > map->max_raw_read)
2543                         chunk_regs = map->max_raw_read / val_bytes;
2544
2545                 chunk_count = val_count / chunk_regs;
2546                 chunk_bytes = chunk_regs * val_bytes;
2547
2548                 /* Read bytes that fit into whole chunks */
2549                 for (i = 0; i < chunk_count; i++) {
2550                         ret = _regmap_raw_read(map, reg, val, chunk_bytes);
2551                         if (ret != 0)
2552                                 goto out;
2553
2554                         reg += regmap_get_offset(map, chunk_regs);
2555                         val += chunk_bytes;
2556                         val_len -= chunk_bytes;
2557                 }
2558
2559                 /* Read remaining bytes */
2560                 if (val_len) {
2561                         ret = _regmap_raw_read(map, reg, val, val_len);
2562                         if (ret != 0)
2563                                 goto out;
2564                 }
2565         } else {
2566                 /* Otherwise go word by word for the cache; should be low
2567                  * cost as we expect to hit the cache.
2568                  */
2569                 for (i = 0; i < val_count; i++) {
2570                         ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2571                                            &v);
2572                         if (ret != 0)
2573                                 goto out;
2574
2575                         map->format.format_val(val + (i * val_bytes), v, 0);
2576                 }
2577         }
2578
2579  out:
2580         map->unlock(map->lock_arg);
2581
2582         return ret;
2583 }
2584 EXPORT_SYMBOL_GPL(regmap_raw_read);
2585
2586 /**
2587  * regmap_noinc_read(): Read data from a register without incrementing the
2588  *                      register number
2589  *
2590  * @map: Register map to read from
2591  * @reg: Register to read from
2592  * @val: Pointer to data buffer
2593  * @val_len: Length of output buffer in bytes.
2594  *
2595  * The regmap API usually assumes that bulk bus read operations will read a
2596  * range of registers. Some devices have certain registers for which a read
2597  * operation read will read from an internal FIFO.
2598  *
2599  * The target register must be volatile but registers after it can be
2600  * completely unrelated cacheable registers.
2601  *
2602  * This will attempt multiple reads as required to read val_len bytes.
2603  *
2604  * A value of zero will be returned on success, a negative errno will be
2605  * returned in error cases.
2606  */
2607 int regmap_noinc_read(struct regmap *map, unsigned int reg,
2608                       void *val, size_t val_len)
2609 {
2610         size_t read_len;
2611         int ret;
2612
2613         if (!map->bus)
2614                 return -EINVAL;
2615         if (!map->bus->read)
2616                 return -ENOTSUPP;
2617         if (val_len % map->format.val_bytes)
2618                 return -EINVAL;
2619         if (!IS_ALIGNED(reg, map->reg_stride))
2620                 return -EINVAL;
2621         if (val_len == 0)
2622                 return -EINVAL;
2623
2624         map->lock(map->lock_arg);
2625
2626         if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
2627                 ret = -EINVAL;
2628                 goto out_unlock;
2629         }
2630
2631         while (val_len) {
2632                 if (map->max_raw_read && map->max_raw_read < val_len)
2633                         read_len = map->max_raw_read;
2634                 else
2635                         read_len = val_len;
2636                 ret = _regmap_raw_read(map, reg, val, read_len);
2637                 if (ret)
2638                         goto out_unlock;
2639                 val = ((u8 *)val) + read_len;
2640                 val_len -= read_len;
2641         }
2642
2643 out_unlock:
2644         map->unlock(map->lock_arg);
2645         return ret;
2646 }
2647 EXPORT_SYMBOL_GPL(regmap_noinc_read);
2648
2649 /**
2650  * regmap_field_read(): Read a value to a single register field
2651  *
2652  * @field: Register field to read from
2653  * @val: Pointer to store read value
2654  *
2655  * A value of zero will be returned on success, a negative errno will
2656  * be returned in error cases.
2657  */
2658 int regmap_field_read(struct regmap_field *field, unsigned int *val)
2659 {
2660         int ret;
2661         unsigned int reg_val;
2662         ret = regmap_read(field->regmap, field->reg, &reg_val);
2663         if (ret != 0)
2664                 return ret;
2665
2666         reg_val &= field->mask;
2667         reg_val >>= field->shift;
2668         *val = reg_val;
2669
2670         return ret;
2671 }
2672 EXPORT_SYMBOL_GPL(regmap_field_read);
2673
2674 /**
2675  * regmap_fields_read() - Read a value to a single register field with port ID
2676  *
2677  * @field: Register field to read from
2678  * @id: port ID
2679  * @val: Pointer to store read value
2680  *
2681  * A value of zero will be returned on success, a negative errno will
2682  * be returned in error cases.
2683  */
2684 int regmap_fields_read(struct regmap_field *field, unsigned int id,
2685                        unsigned int *val)
2686 {
2687         int ret;
2688         unsigned int reg_val;
2689
2690         if (id >= field->id_size)
2691                 return -EINVAL;
2692
2693         ret = regmap_read(field->regmap,
2694                           field->reg + (field->id_offset * id),
2695                           &reg_val);
2696         if (ret != 0)
2697                 return ret;
2698
2699         reg_val &= field->mask;
2700         reg_val >>= field->shift;
2701         *val = reg_val;
2702
2703         return ret;
2704 }
2705 EXPORT_SYMBOL_GPL(regmap_fields_read);
2706
2707 /**
2708  * regmap_bulk_read() - Read multiple registers from the device
2709  *
2710  * @map: Register map to read from
2711  * @reg: First register to be read from
2712  * @val: Pointer to store read value, in native register size for device
2713  * @val_count: Number of registers to read
2714  *
2715  * A value of zero will be returned on success, a negative errno will
2716  * be returned in error cases.
2717  */
2718 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2719                      size_t val_count)
2720 {
2721         int ret, i;
2722         size_t val_bytes = map->format.val_bytes;
2723         bool vol = regmap_volatile_range(map, reg, val_count);
2724
2725         if (!IS_ALIGNED(reg, map->reg_stride))
2726                 return -EINVAL;
2727         if (val_count == 0)
2728                 return -EINVAL;
2729
2730         if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2731                 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
2732                 if (ret != 0)
2733                         return ret;
2734
2735                 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2736                         map->format.parse_inplace(val + i);
2737         } else {
2738 #ifdef CONFIG_64BIT
2739                 u64 *u64 = val;
2740 #endif
2741                 u32 *u32 = val;
2742                 u16 *u16 = val;
2743                 u8 *u8 = val;
2744
2745                 map->lock(map->lock_arg);
2746
2747                 for (i = 0; i < val_count; i++) {
2748                         unsigned int ival;
2749
2750                         ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2751                                            &ival);
2752                         if (ret != 0)
2753                                 goto out;
2754
2755                         switch (map->format.val_bytes) {
2756 #ifdef CONFIG_64BIT
2757                         case 8:
2758                                 u64[i] = ival;
2759                                 break;
2760 #endif
2761                         case 4:
2762                                 u32[i] = ival;
2763                                 break;
2764                         case 2:
2765                                 u16[i] = ival;
2766                                 break;
2767                         case 1:
2768                                 u8[i] = ival;
2769                                 break;
2770                         default:
2771                                 ret = -EINVAL;
2772                                 goto out;
2773                         }
2774                 }
2775
2776 out:
2777                 map->unlock(map->lock_arg);
2778         }
2779
2780         return ret;
2781 }
2782 EXPORT_SYMBOL_GPL(regmap_bulk_read);
2783
2784 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2785                                unsigned int mask, unsigned int val,
2786                                bool *change, bool force_write)
2787 {
2788         int ret;
2789         unsigned int tmp, orig;
2790
2791         if (change)
2792                 *change = false;
2793
2794         if (regmap_volatile(map, reg) && map->reg_update_bits) {
2795                 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
2796                 if (ret == 0 && change)
2797                         *change = true;
2798         } else {
2799                 ret = _regmap_read(map, reg, &orig);
2800                 if (ret != 0)
2801                         return ret;
2802
2803                 tmp = orig & ~mask;
2804                 tmp |= val & mask;
2805
2806                 if (force_write || (tmp != orig)) {
2807                         ret = _regmap_write(map, reg, tmp);
2808                         if (ret == 0 && change)
2809                                 *change = true;
2810                 }
2811         }
2812
2813         return ret;
2814 }
2815
2816 /**
2817  * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
2818  *
2819  * @map: Register map to update
2820  * @reg: Register to update
2821  * @mask: Bitmask to change
2822  * @val: New value for bitmask
2823  * @change: Boolean indicating if a write was done
2824  * @async: Boolean indicating asynchronously
2825  * @force: Boolean indicating use force update
2826  *
2827  * Perform a read/modify/write cycle on a register map with change, async, force
2828  * options.
2829  *
2830  * If async is true:
2831  *
2832  * With most buses the read must be done synchronously so this is most useful
2833  * for devices with a cache which do not need to interact with the hardware to
2834  * determine the current register value.
2835  *
2836  * Returns zero for success, a negative number on error.
2837  */
2838 int regmap_update_bits_base(struct regmap *map, unsigned int reg,
2839                             unsigned int mask, unsigned int val,
2840                             bool *change, bool async, bool force)
2841 {
2842         int ret;
2843
2844         map->lock(map->lock_arg);
2845
2846         map->async = async;
2847
2848         ret = _regmap_update_bits(map, reg, mask, val, change, force);
2849
2850         map->async = false;
2851
2852         map->unlock(map->lock_arg);
2853
2854         return ret;
2855 }
2856 EXPORT_SYMBOL_GPL(regmap_update_bits_base);
2857
2858 void regmap_async_complete_cb(struct regmap_async *async, int ret)
2859 {
2860         struct regmap *map = async->map;
2861         bool wake;
2862
2863         trace_regmap_async_io_complete(map);
2864
2865         spin_lock(&map->async_lock);
2866         list_move(&async->list, &map->async_free);
2867         wake = list_empty(&map->async_list);
2868
2869         if (ret != 0)
2870                 map->async_ret = ret;
2871
2872         spin_unlock(&map->async_lock);
2873
2874         if (wake)
2875                 wake_up(&map->async_waitq);
2876 }
2877 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
2878
2879 static int regmap_async_is_done(struct regmap *map)
2880 {
2881         unsigned long flags;
2882         int ret;
2883
2884         spin_lock_irqsave(&map->async_lock, flags);
2885         ret = list_empty(&map->async_list);
2886         spin_unlock_irqrestore(&map->async_lock, flags);
2887
2888         return ret;
2889 }
2890
2891 /**
2892  * regmap_async_complete - Ensure all asynchronous I/O has completed.
2893  *
2894  * @map: Map to operate on.
2895  *
2896  * Blocks until any pending asynchronous I/O has completed.  Returns
2897  * an error code for any failed I/O operations.
2898  */
2899 int regmap_async_complete(struct regmap *map)
2900 {
2901         unsigned long flags;
2902         int ret;
2903
2904         /* Nothing to do with no async support */
2905         if (!map->bus || !map->bus->async_write)
2906                 return 0;
2907
2908         trace_regmap_async_complete_start(map);
2909
2910         wait_event(map->async_waitq, regmap_async_is_done(map));
2911
2912         spin_lock_irqsave(&map->async_lock, flags);
2913         ret = map->async_ret;
2914         map->async_ret = 0;
2915         spin_unlock_irqrestore(&map->async_lock, flags);
2916
2917         trace_regmap_async_complete_done(map);
2918
2919         return ret;
2920 }
2921 EXPORT_SYMBOL_GPL(regmap_async_complete);
2922
2923 /**
2924  * regmap_register_patch - Register and apply register updates to be applied
2925  *                         on device initialistion
2926  *
2927  * @map: Register map to apply updates to.
2928  * @regs: Values to update.
2929  * @num_regs: Number of entries in regs.
2930  *
2931  * Register a set of register updates to be applied to the device
2932  * whenever the device registers are synchronised with the cache and
2933  * apply them immediately.  Typically this is used to apply
2934  * corrections to be applied to the device defaults on startup, such
2935  * as the updates some vendors provide to undocumented registers.
2936  *
2937  * The caller must ensure that this function cannot be called
2938  * concurrently with either itself or regcache_sync().
2939  */
2940 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
2941                           int num_regs)
2942 {
2943         struct reg_sequence *p;
2944         int ret;
2945         bool bypass;
2946
2947         if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2948             num_regs))
2949                 return 0;
2950
2951         p = krealloc(map->patch,
2952                      sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
2953                      GFP_KERNEL);
2954         if (p) {
2955                 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2956                 map->patch = p;
2957                 map->patch_regs += num_regs;
2958         } else {
2959                 return -ENOMEM;
2960         }
2961
2962         map->lock(map->lock_arg);
2963
2964         bypass = map->cache_bypass;
2965
2966         map->cache_bypass = true;
2967         map->async = true;
2968
2969         ret = _regmap_multi_reg_write(map, regs, num_regs);
2970
2971         map->async = false;
2972         map->cache_bypass = bypass;
2973
2974         map->unlock(map->lock_arg);
2975
2976         regmap_async_complete(map);
2977
2978         return ret;
2979 }
2980 EXPORT_SYMBOL_GPL(regmap_register_patch);
2981
2982 /**
2983  * regmap_get_val_bytes() - Report the size of a register value
2984  *
2985  * @map: Register map to operate on.
2986  *
2987  * Report the size of a register value, mainly intended to for use by
2988  * generic infrastructure built on top of regmap.
2989  */
2990 int regmap_get_val_bytes(struct regmap *map)
2991 {
2992         if (map->format.format_write)
2993                 return -EINVAL;
2994
2995         return map->format.val_bytes;
2996 }
2997 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
2998
2999 /**
3000  * regmap_get_max_register() - Report the max register value
3001  *
3002  * @map: Register map to operate on.
3003  *
3004  * Report the max register value, mainly intended to for use by
3005  * generic infrastructure built on top of regmap.
3006  */
3007 int regmap_get_max_register(struct regmap *map)
3008 {
3009         return map->max_register ? map->max_register : -EINVAL;
3010 }
3011 EXPORT_SYMBOL_GPL(regmap_get_max_register);
3012
3013 /**
3014  * regmap_get_reg_stride() - Report the register address stride
3015  *
3016  * @map: Register map to operate on.
3017  *
3018  * Report the register address stride, mainly intended to for use by
3019  * generic infrastructure built on top of regmap.
3020  */
3021 int regmap_get_reg_stride(struct regmap *map)
3022 {
3023         return map->reg_stride;
3024 }
3025 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3026
3027 int regmap_parse_val(struct regmap *map, const void *buf,
3028                         unsigned int *val)
3029 {
3030         if (!map->format.parse_val)
3031                 return -EINVAL;
3032
3033         *val = map->format.parse_val(buf);
3034
3035         return 0;
3036 }
3037 EXPORT_SYMBOL_GPL(regmap_parse_val);
3038
3039 static int __init regmap_initcall(void)
3040 {
3041         regmap_debugfs_initcall();
3042
3043         return 0;
3044 }
3045 postcore_initcall(regmap_initcall);