2 * Register map access API
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
19 #include <linux/rbtree.h>
20 #include <linux/sched.h>
21 #include <linux/delay.h>
22 #include <linux/log2.h>
23 #include <linux/hwspinlock.h>
25 #define CREATE_TRACE_POINTS
31 * Sometimes for failures during very early init the trace
32 * infrastructure isn't available early enough to be used. For this
33 * sort of problem defining LOG_DEVICE will add printks for basic
34 * register I/O on a specific device.
39 static inline bool regmap_should_log(struct regmap *map)
41 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
44 static inline bool regmap_should_log(struct regmap *map) { return false; }
48 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
49 unsigned int mask, unsigned int val,
50 bool *change, bool force_write);
52 static int _regmap_bus_reg_read(void *context, unsigned int reg,
54 static int _regmap_bus_read(void *context, unsigned int reg,
56 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
58 static int _regmap_bus_reg_write(void *context, unsigned int reg,
60 static int _regmap_bus_raw_write(void *context, unsigned int reg,
63 bool regmap_reg_in_ranges(unsigned int reg,
64 const struct regmap_range *ranges,
67 const struct regmap_range *r;
70 for (i = 0, r = ranges; i < nranges; i++, r++)
71 if (regmap_reg_in_range(reg, r))
75 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
77 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
78 const struct regmap_access_table *table)
80 /* Check "no ranges" first */
81 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
84 /* In case zero "yes ranges" are supplied, any reg is OK */
85 if (!table->n_yes_ranges)
88 return regmap_reg_in_ranges(reg, table->yes_ranges,
91 EXPORT_SYMBOL_GPL(regmap_check_range_table);
93 bool regmap_writeable(struct regmap *map, unsigned int reg)
95 if (map->max_register && reg > map->max_register)
98 if (map->writeable_reg)
99 return map->writeable_reg(map->dev, reg);
102 return regmap_check_range_table(map, reg, map->wr_table);
107 bool regmap_cached(struct regmap *map, unsigned int reg)
112 if (map->cache_type == REGCACHE_NONE)
118 if (map->max_register && reg > map->max_register)
121 map->lock(map->lock_arg);
122 ret = regcache_read(map, reg, &val);
123 map->unlock(map->lock_arg);
130 bool regmap_readable(struct regmap *map, unsigned int reg)
135 if (map->max_register && reg > map->max_register)
138 if (map->format.format_write)
141 if (map->readable_reg)
142 return map->readable_reg(map->dev, reg);
145 return regmap_check_range_table(map, reg, map->rd_table);
150 bool regmap_volatile(struct regmap *map, unsigned int reg)
152 if (!map->format.format_write && !regmap_readable(map, reg))
155 if (map->volatile_reg)
156 return map->volatile_reg(map->dev, reg);
158 if (map->volatile_table)
159 return regmap_check_range_table(map, reg, map->volatile_table);
167 bool regmap_precious(struct regmap *map, unsigned int reg)
169 if (!regmap_readable(map, reg))
172 if (map->precious_reg)
173 return map->precious_reg(map->dev, reg);
175 if (map->precious_table)
176 return regmap_check_range_table(map, reg, map->precious_table);
181 bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
183 if (map->readable_noinc_reg)
184 return map->readable_noinc_reg(map->dev, reg);
186 if (map->rd_noinc_table)
187 return regmap_check_range_table(map, reg, map->rd_noinc_table);
192 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
197 for (i = 0; i < num; i++)
198 if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
204 static void regmap_format_2_6_write(struct regmap *map,
205 unsigned int reg, unsigned int val)
207 u8 *out = map->work_buf;
209 *out = (reg << 6) | val;
212 static void regmap_format_4_12_write(struct regmap *map,
213 unsigned int reg, unsigned int val)
215 __be16 *out = map->work_buf;
216 *out = cpu_to_be16((reg << 12) | val);
219 static void regmap_format_7_9_write(struct regmap *map,
220 unsigned int reg, unsigned int val)
222 __be16 *out = map->work_buf;
223 *out = cpu_to_be16((reg << 9) | val);
226 static void regmap_format_10_14_write(struct regmap *map,
227 unsigned int reg, unsigned int val)
229 u8 *out = map->work_buf;
232 out[1] = (val >> 8) | (reg << 6);
236 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
243 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
247 b[0] = cpu_to_be16(val << shift);
250 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
254 b[0] = cpu_to_le16(val << shift);
257 static void regmap_format_16_native(void *buf, unsigned int val,
260 *(u16 *)buf = val << shift;
263 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
274 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
278 b[0] = cpu_to_be32(val << shift);
281 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
285 b[0] = cpu_to_le32(val << shift);
288 static void regmap_format_32_native(void *buf, unsigned int val,
291 *(u32 *)buf = val << shift;
295 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
299 b[0] = cpu_to_be64((u64)val << shift);
302 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
306 b[0] = cpu_to_le64((u64)val << shift);
309 static void regmap_format_64_native(void *buf, unsigned int val,
312 *(u64 *)buf = (u64)val << shift;
316 static void regmap_parse_inplace_noop(void *buf)
320 static unsigned int regmap_parse_8(const void *buf)
327 static unsigned int regmap_parse_16_be(const void *buf)
329 const __be16 *b = buf;
331 return be16_to_cpu(b[0]);
334 static unsigned int regmap_parse_16_le(const void *buf)
336 const __le16 *b = buf;
338 return le16_to_cpu(b[0]);
341 static void regmap_parse_16_be_inplace(void *buf)
345 b[0] = be16_to_cpu(b[0]);
348 static void regmap_parse_16_le_inplace(void *buf)
352 b[0] = le16_to_cpu(b[0]);
355 static unsigned int regmap_parse_16_native(const void *buf)
360 static unsigned int regmap_parse_24(const void *buf)
363 unsigned int ret = b[2];
364 ret |= ((unsigned int)b[1]) << 8;
365 ret |= ((unsigned int)b[0]) << 16;
370 static unsigned int regmap_parse_32_be(const void *buf)
372 const __be32 *b = buf;
374 return be32_to_cpu(b[0]);
377 static unsigned int regmap_parse_32_le(const void *buf)
379 const __le32 *b = buf;
381 return le32_to_cpu(b[0]);
384 static void regmap_parse_32_be_inplace(void *buf)
388 b[0] = be32_to_cpu(b[0]);
391 static void regmap_parse_32_le_inplace(void *buf)
395 b[0] = le32_to_cpu(b[0]);
398 static unsigned int regmap_parse_32_native(const void *buf)
404 static unsigned int regmap_parse_64_be(const void *buf)
406 const __be64 *b = buf;
408 return be64_to_cpu(b[0]);
411 static unsigned int regmap_parse_64_le(const void *buf)
413 const __le64 *b = buf;
415 return le64_to_cpu(b[0]);
418 static void regmap_parse_64_be_inplace(void *buf)
422 b[0] = be64_to_cpu(b[0]);
425 static void regmap_parse_64_le_inplace(void *buf)
429 b[0] = le64_to_cpu(b[0]);
432 static unsigned int regmap_parse_64_native(const void *buf)
438 static void regmap_lock_hwlock(void *__map)
440 struct regmap *map = __map;
442 hwspin_lock_timeout(map->hwlock, UINT_MAX);
445 static void regmap_lock_hwlock_irq(void *__map)
447 struct regmap *map = __map;
449 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
452 static void regmap_lock_hwlock_irqsave(void *__map)
454 struct regmap *map = __map;
456 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
457 &map->spinlock_flags);
460 static void regmap_unlock_hwlock(void *__map)
462 struct regmap *map = __map;
464 hwspin_unlock(map->hwlock);
467 static void regmap_unlock_hwlock_irq(void *__map)
469 struct regmap *map = __map;
471 hwspin_unlock_irq(map->hwlock);
474 static void regmap_unlock_hwlock_irqrestore(void *__map)
476 struct regmap *map = __map;
478 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
481 static void regmap_lock_unlock_none(void *__map)
486 static void regmap_lock_mutex(void *__map)
488 struct regmap *map = __map;
489 mutex_lock(&map->mutex);
492 static void regmap_unlock_mutex(void *__map)
494 struct regmap *map = __map;
495 mutex_unlock(&map->mutex);
498 static void regmap_lock_spinlock(void *__map)
499 __acquires(&map->spinlock)
501 struct regmap *map = __map;
504 spin_lock_irqsave(&map->spinlock, flags);
505 map->spinlock_flags = flags;
508 static void regmap_unlock_spinlock(void *__map)
509 __releases(&map->spinlock)
511 struct regmap *map = __map;
512 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
515 static void dev_get_regmap_release(struct device *dev, void *res)
518 * We don't actually have anything to do here; the goal here
519 * is not to manage the regmap but to provide a simple way to
520 * get the regmap back given a struct device.
524 static bool _regmap_range_add(struct regmap *map,
525 struct regmap_range_node *data)
527 struct rb_root *root = &map->range_tree;
528 struct rb_node **new = &(root->rb_node), *parent = NULL;
531 struct regmap_range_node *this =
532 rb_entry(*new, struct regmap_range_node, node);
535 if (data->range_max < this->range_min)
536 new = &((*new)->rb_left);
537 else if (data->range_min > this->range_max)
538 new = &((*new)->rb_right);
543 rb_link_node(&data->node, parent, new);
544 rb_insert_color(&data->node, root);
549 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
552 struct rb_node *node = map->range_tree.rb_node;
555 struct regmap_range_node *this =
556 rb_entry(node, struct regmap_range_node, node);
558 if (reg < this->range_min)
559 node = node->rb_left;
560 else if (reg > this->range_max)
561 node = node->rb_right;
569 static void regmap_range_exit(struct regmap *map)
571 struct rb_node *next;
572 struct regmap_range_node *range_node;
574 next = rb_first(&map->range_tree);
576 range_node = rb_entry(next, struct regmap_range_node, node);
577 next = rb_next(&range_node->node);
578 rb_erase(&range_node->node, &map->range_tree);
582 kfree(map->selector_work_buf);
585 int regmap_attach_dev(struct device *dev, struct regmap *map,
586 const struct regmap_config *config)
592 regmap_debugfs_init(map, config->name);
594 /* Add a devres resource for dev_get_regmap() */
595 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
597 regmap_debugfs_exit(map);
605 EXPORT_SYMBOL_GPL(regmap_attach_dev);
607 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
608 const struct regmap_config *config)
610 enum regmap_endian endian;
612 /* Retrieve the endianness specification from the regmap config */
613 endian = config->reg_format_endian;
615 /* If the regmap config specified a non-default value, use that */
616 if (endian != REGMAP_ENDIAN_DEFAULT)
619 /* Retrieve the endianness specification from the bus config */
620 if (bus && bus->reg_format_endian_default)
621 endian = bus->reg_format_endian_default;
623 /* If the bus specified a non-default value, use that */
624 if (endian != REGMAP_ENDIAN_DEFAULT)
627 /* Use this if no other value was found */
628 return REGMAP_ENDIAN_BIG;
631 enum regmap_endian regmap_get_val_endian(struct device *dev,
632 const struct regmap_bus *bus,
633 const struct regmap_config *config)
635 struct device_node *np;
636 enum regmap_endian endian;
638 /* Retrieve the endianness specification from the regmap config */
639 endian = config->val_format_endian;
641 /* If the regmap config specified a non-default value, use that */
642 if (endian != REGMAP_ENDIAN_DEFAULT)
645 /* If the dev and dev->of_node exist try to get endianness from DT */
646 if (dev && dev->of_node) {
649 /* Parse the device's DT node for an endianness specification */
650 if (of_property_read_bool(np, "big-endian"))
651 endian = REGMAP_ENDIAN_BIG;
652 else if (of_property_read_bool(np, "little-endian"))
653 endian = REGMAP_ENDIAN_LITTLE;
654 else if (of_property_read_bool(np, "native-endian"))
655 endian = REGMAP_ENDIAN_NATIVE;
657 /* If the endianness was specified in DT, use that */
658 if (endian != REGMAP_ENDIAN_DEFAULT)
662 /* Retrieve the endianness specification from the bus config */
663 if (bus && bus->val_format_endian_default)
664 endian = bus->val_format_endian_default;
666 /* If the bus specified a non-default value, use that */
667 if (endian != REGMAP_ENDIAN_DEFAULT)
670 /* Use this if no other value was found */
671 return REGMAP_ENDIAN_BIG;
673 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
675 struct regmap *__regmap_init(struct device *dev,
676 const struct regmap_bus *bus,
678 const struct regmap_config *config,
679 struct lock_class_key *lock_key,
680 const char *lock_name)
684 enum regmap_endian reg_endian, val_endian;
690 map = kzalloc(sizeof(*map), GFP_KERNEL);
697 map->name = kstrdup_const(config->name, GFP_KERNEL);
704 if (config->disable_locking) {
705 map->lock = map->unlock = regmap_lock_unlock_none;
706 regmap_debugfs_disable(map);
707 } else if (config->lock && config->unlock) {
708 map->lock = config->lock;
709 map->unlock = config->unlock;
710 map->lock_arg = config->lock_arg;
711 } else if (config->use_hwlock) {
712 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
718 switch (config->hwlock_mode) {
719 case HWLOCK_IRQSTATE:
720 map->lock = regmap_lock_hwlock_irqsave;
721 map->unlock = regmap_unlock_hwlock_irqrestore;
724 map->lock = regmap_lock_hwlock_irq;
725 map->unlock = regmap_unlock_hwlock_irq;
728 map->lock = regmap_lock_hwlock;
729 map->unlock = regmap_unlock_hwlock;
735 if ((bus && bus->fast_io) ||
737 spin_lock_init(&map->spinlock);
738 map->lock = regmap_lock_spinlock;
739 map->unlock = regmap_unlock_spinlock;
740 lockdep_set_class_and_name(&map->spinlock,
741 lock_key, lock_name);
743 mutex_init(&map->mutex);
744 map->lock = regmap_lock_mutex;
745 map->unlock = regmap_unlock_mutex;
746 lockdep_set_class_and_name(&map->mutex,
747 lock_key, lock_name);
753 * When we write in fast-paths with regmap_bulk_write() don't allocate
754 * scratch buffers with sleeping allocations.
756 if ((bus && bus->fast_io) || config->fast_io)
757 map->alloc_flags = GFP_ATOMIC;
759 map->alloc_flags = GFP_KERNEL;
761 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
762 map->format.pad_bytes = config->pad_bits / 8;
763 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
764 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
765 config->val_bits + config->pad_bits, 8);
766 map->reg_shift = config->pad_bits % 8;
767 if (config->reg_stride)
768 map->reg_stride = config->reg_stride;
771 if (is_power_of_2(map->reg_stride))
772 map->reg_stride_order = ilog2(map->reg_stride);
774 map->reg_stride_order = -1;
775 map->use_single_read = config->use_single_rw || !bus || !bus->read;
776 map->use_single_write = config->use_single_rw || !bus || !bus->write;
777 map->can_multi_write = config->can_multi_write && bus && bus->write;
779 map->max_raw_read = bus->max_raw_read;
780 map->max_raw_write = bus->max_raw_write;
784 map->bus_context = bus_context;
785 map->max_register = config->max_register;
786 map->wr_table = config->wr_table;
787 map->rd_table = config->rd_table;
788 map->volatile_table = config->volatile_table;
789 map->precious_table = config->precious_table;
790 map->rd_noinc_table = config->rd_noinc_table;
791 map->writeable_reg = config->writeable_reg;
792 map->readable_reg = config->readable_reg;
793 map->volatile_reg = config->volatile_reg;
794 map->precious_reg = config->precious_reg;
795 map->readable_noinc_reg = config->readable_noinc_reg;
796 map->cache_type = config->cache_type;
798 spin_lock_init(&map->async_lock);
799 INIT_LIST_HEAD(&map->async_list);
800 INIT_LIST_HEAD(&map->async_free);
801 init_waitqueue_head(&map->async_waitq);
803 if (config->read_flag_mask ||
804 config->write_flag_mask ||
805 config->zero_flag_mask) {
806 map->read_flag_mask = config->read_flag_mask;
807 map->write_flag_mask = config->write_flag_mask;
809 map->read_flag_mask = bus->read_flag_mask;
813 map->reg_read = config->reg_read;
814 map->reg_write = config->reg_write;
816 map->defer_caching = false;
817 goto skip_format_initialization;
818 } else if (!bus->read || !bus->write) {
819 map->reg_read = _regmap_bus_reg_read;
820 map->reg_write = _regmap_bus_reg_write;
822 map->defer_caching = false;
823 goto skip_format_initialization;
825 map->reg_read = _regmap_bus_read;
826 map->reg_update_bits = bus->reg_update_bits;
829 reg_endian = regmap_get_reg_endian(bus, config);
830 val_endian = regmap_get_val_endian(dev, bus, config);
832 switch (config->reg_bits + map->reg_shift) {
834 switch (config->val_bits) {
836 map->format.format_write = regmap_format_2_6_write;
844 switch (config->val_bits) {
846 map->format.format_write = regmap_format_4_12_write;
854 switch (config->val_bits) {
856 map->format.format_write = regmap_format_7_9_write;
864 switch (config->val_bits) {
866 map->format.format_write = regmap_format_10_14_write;
874 map->format.format_reg = regmap_format_8;
878 switch (reg_endian) {
879 case REGMAP_ENDIAN_BIG:
880 map->format.format_reg = regmap_format_16_be;
882 case REGMAP_ENDIAN_LITTLE:
883 map->format.format_reg = regmap_format_16_le;
885 case REGMAP_ENDIAN_NATIVE:
886 map->format.format_reg = regmap_format_16_native;
894 if (reg_endian != REGMAP_ENDIAN_BIG)
896 map->format.format_reg = regmap_format_24;
900 switch (reg_endian) {
901 case REGMAP_ENDIAN_BIG:
902 map->format.format_reg = regmap_format_32_be;
904 case REGMAP_ENDIAN_LITTLE:
905 map->format.format_reg = regmap_format_32_le;
907 case REGMAP_ENDIAN_NATIVE:
908 map->format.format_reg = regmap_format_32_native;
917 switch (reg_endian) {
918 case REGMAP_ENDIAN_BIG:
919 map->format.format_reg = regmap_format_64_be;
921 case REGMAP_ENDIAN_LITTLE:
922 map->format.format_reg = regmap_format_64_le;
924 case REGMAP_ENDIAN_NATIVE:
925 map->format.format_reg = regmap_format_64_native;
937 if (val_endian == REGMAP_ENDIAN_NATIVE)
938 map->format.parse_inplace = regmap_parse_inplace_noop;
940 switch (config->val_bits) {
942 map->format.format_val = regmap_format_8;
943 map->format.parse_val = regmap_parse_8;
944 map->format.parse_inplace = regmap_parse_inplace_noop;
947 switch (val_endian) {
948 case REGMAP_ENDIAN_BIG:
949 map->format.format_val = regmap_format_16_be;
950 map->format.parse_val = regmap_parse_16_be;
951 map->format.parse_inplace = regmap_parse_16_be_inplace;
953 case REGMAP_ENDIAN_LITTLE:
954 map->format.format_val = regmap_format_16_le;
955 map->format.parse_val = regmap_parse_16_le;
956 map->format.parse_inplace = regmap_parse_16_le_inplace;
958 case REGMAP_ENDIAN_NATIVE:
959 map->format.format_val = regmap_format_16_native;
960 map->format.parse_val = regmap_parse_16_native;
967 if (val_endian != REGMAP_ENDIAN_BIG)
969 map->format.format_val = regmap_format_24;
970 map->format.parse_val = regmap_parse_24;
973 switch (val_endian) {
974 case REGMAP_ENDIAN_BIG:
975 map->format.format_val = regmap_format_32_be;
976 map->format.parse_val = regmap_parse_32_be;
977 map->format.parse_inplace = regmap_parse_32_be_inplace;
979 case REGMAP_ENDIAN_LITTLE:
980 map->format.format_val = regmap_format_32_le;
981 map->format.parse_val = regmap_parse_32_le;
982 map->format.parse_inplace = regmap_parse_32_le_inplace;
984 case REGMAP_ENDIAN_NATIVE:
985 map->format.format_val = regmap_format_32_native;
986 map->format.parse_val = regmap_parse_32_native;
994 switch (val_endian) {
995 case REGMAP_ENDIAN_BIG:
996 map->format.format_val = regmap_format_64_be;
997 map->format.parse_val = regmap_parse_64_be;
998 map->format.parse_inplace = regmap_parse_64_be_inplace;
1000 case REGMAP_ENDIAN_LITTLE:
1001 map->format.format_val = regmap_format_64_le;
1002 map->format.parse_val = regmap_parse_64_le;
1003 map->format.parse_inplace = regmap_parse_64_le_inplace;
1005 case REGMAP_ENDIAN_NATIVE:
1006 map->format.format_val = regmap_format_64_native;
1007 map->format.parse_val = regmap_parse_64_native;
1016 if (map->format.format_write) {
1017 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1018 (val_endian != REGMAP_ENDIAN_BIG))
1020 map->use_single_write = true;
1023 if (!map->format.format_write &&
1024 !(map->format.format_reg && map->format.format_val))
1027 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1028 if (map->work_buf == NULL) {
1033 if (map->format.format_write) {
1034 map->defer_caching = false;
1035 map->reg_write = _regmap_bus_formatted_write;
1036 } else if (map->format.format_val) {
1037 map->defer_caching = true;
1038 map->reg_write = _regmap_bus_raw_write;
1041 skip_format_initialization:
1043 map->range_tree = RB_ROOT;
1044 for (i = 0; i < config->num_ranges; i++) {
1045 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1046 struct regmap_range_node *new;
1049 if (range_cfg->range_max < range_cfg->range_min) {
1050 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1051 range_cfg->range_max, range_cfg->range_min);
1055 if (range_cfg->range_max > map->max_register) {
1056 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1057 range_cfg->range_max, map->max_register);
1061 if (range_cfg->selector_reg > map->max_register) {
1063 "Invalid range %d: selector out of map\n", i);
1067 if (range_cfg->window_len == 0) {
1068 dev_err(map->dev, "Invalid range %d: window_len 0\n",
1073 /* Make sure, that this register range has no selector
1074 or data window within its boundary */
1075 for (j = 0; j < config->num_ranges; j++) {
1076 unsigned sel_reg = config->ranges[j].selector_reg;
1077 unsigned win_min = config->ranges[j].window_start;
1078 unsigned win_max = win_min +
1079 config->ranges[j].window_len - 1;
1081 /* Allow data window inside its own virtual range */
1085 if (range_cfg->range_min <= sel_reg &&
1086 sel_reg <= range_cfg->range_max) {
1088 "Range %d: selector for %d in window\n",
1093 if (!(win_max < range_cfg->range_min ||
1094 win_min > range_cfg->range_max)) {
1096 "Range %d: window for %d in window\n",
1102 new = kzalloc(sizeof(*new), GFP_KERNEL);
1109 new->name = range_cfg->name;
1110 new->range_min = range_cfg->range_min;
1111 new->range_max = range_cfg->range_max;
1112 new->selector_reg = range_cfg->selector_reg;
1113 new->selector_mask = range_cfg->selector_mask;
1114 new->selector_shift = range_cfg->selector_shift;
1115 new->window_start = range_cfg->window_start;
1116 new->window_len = range_cfg->window_len;
1118 if (!_regmap_range_add(map, new)) {
1119 dev_err(map->dev, "Failed to add range %d\n", i);
1124 if (map->selector_work_buf == NULL) {
1125 map->selector_work_buf =
1126 kzalloc(map->format.buf_size, GFP_KERNEL);
1127 if (map->selector_work_buf == NULL) {
1134 ret = regcache_init(map, config);
1139 ret = regmap_attach_dev(dev, map, config);
1143 regmap_debugfs_init(map, config->name);
1151 regmap_range_exit(map);
1152 kfree(map->work_buf);
1155 hwspin_lock_free(map->hwlock);
1157 kfree_const(map->name);
1161 return ERR_PTR(ret);
1163 EXPORT_SYMBOL_GPL(__regmap_init);
1165 static void devm_regmap_release(struct device *dev, void *res)
1167 regmap_exit(*(struct regmap **)res);
1170 struct regmap *__devm_regmap_init(struct device *dev,
1171 const struct regmap_bus *bus,
1173 const struct regmap_config *config,
1174 struct lock_class_key *lock_key,
1175 const char *lock_name)
1177 struct regmap **ptr, *regmap;
1179 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1181 return ERR_PTR(-ENOMEM);
1183 regmap = __regmap_init(dev, bus, bus_context, config,
1184 lock_key, lock_name);
1185 if (!IS_ERR(regmap)) {
1187 devres_add(dev, ptr);
1194 EXPORT_SYMBOL_GPL(__devm_regmap_init);
1196 static void regmap_field_init(struct regmap_field *rm_field,
1197 struct regmap *regmap, struct reg_field reg_field)
1199 rm_field->regmap = regmap;
1200 rm_field->reg = reg_field.reg;
1201 rm_field->shift = reg_field.lsb;
1202 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1203 rm_field->id_size = reg_field.id_size;
1204 rm_field->id_offset = reg_field.id_offset;
1208 * devm_regmap_field_alloc() - Allocate and initialise a register field.
1210 * @dev: Device that will be interacted with
1211 * @regmap: regmap bank in which this register field is located.
1212 * @reg_field: Register field with in the bank.
1214 * The return value will be an ERR_PTR() on error or a valid pointer
1215 * to a struct regmap_field. The regmap_field will be automatically freed
1216 * by the device management code.
1218 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1219 struct regmap *regmap, struct reg_field reg_field)
1221 struct regmap_field *rm_field = devm_kzalloc(dev,
1222 sizeof(*rm_field), GFP_KERNEL);
1224 return ERR_PTR(-ENOMEM);
1226 regmap_field_init(rm_field, regmap, reg_field);
1231 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1234 * devm_regmap_field_free() - Free a register field allocated using
1235 * devm_regmap_field_alloc.
1237 * @dev: Device that will be interacted with
1238 * @field: regmap field which should be freed.
1240 * Free register field allocated using devm_regmap_field_alloc(). Usually
1241 * drivers need not call this function, as the memory allocated via devm
1242 * will be freed as per device-driver life-cyle.
1244 void devm_regmap_field_free(struct device *dev,
1245 struct regmap_field *field)
1247 devm_kfree(dev, field);
1249 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1252 * regmap_field_alloc() - Allocate and initialise a register field.
1254 * @regmap: regmap bank in which this register field is located.
1255 * @reg_field: Register field with in the bank.
1257 * The return value will be an ERR_PTR() on error or a valid pointer
1258 * to a struct regmap_field. The regmap_field should be freed by the
1259 * user once its finished working with it using regmap_field_free().
1261 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1262 struct reg_field reg_field)
1264 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1267 return ERR_PTR(-ENOMEM);
1269 regmap_field_init(rm_field, regmap, reg_field);
1273 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1276 * regmap_field_free() - Free register field allocated using
1277 * regmap_field_alloc.
1279 * @field: regmap field which should be freed.
1281 void regmap_field_free(struct regmap_field *field)
1285 EXPORT_SYMBOL_GPL(regmap_field_free);
1288 * regmap_reinit_cache() - Reinitialise the current register cache
1290 * @map: Register map to operate on.
1291 * @config: New configuration. Only the cache data will be used.
1293 * Discard any existing register cache for the map and initialize a
1294 * new cache. This can be used to restore the cache to defaults or to
1295 * update the cache configuration to reflect runtime discovery of the
1298 * No explicit locking is done here, the user needs to ensure that
1299 * this function will not race with other calls to regmap.
1301 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1304 regmap_debugfs_exit(map);
1306 map->max_register = config->max_register;
1307 map->writeable_reg = config->writeable_reg;
1308 map->readable_reg = config->readable_reg;
1309 map->volatile_reg = config->volatile_reg;
1310 map->precious_reg = config->precious_reg;
1311 map->readable_noinc_reg = config->readable_noinc_reg;
1312 map->cache_type = config->cache_type;
1314 regmap_debugfs_init(map, config->name);
1316 map->cache_bypass = false;
1317 map->cache_only = false;
1319 return regcache_init(map, config);
1321 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1324 * regmap_exit() - Free a previously allocated register map
1326 * @map: Register map to operate on.
1328 void regmap_exit(struct regmap *map)
1330 struct regmap_async *async;
1333 regmap_debugfs_exit(map);
1334 regmap_range_exit(map);
1335 if (map->bus && map->bus->free_context)
1336 map->bus->free_context(map->bus_context);
1337 kfree(map->work_buf);
1338 while (!list_empty(&map->async_free)) {
1339 async = list_first_entry_or_null(&map->async_free,
1340 struct regmap_async,
1342 list_del(&async->list);
1343 kfree(async->work_buf);
1347 hwspin_lock_free(map->hwlock);
1348 kfree_const(map->name);
1351 EXPORT_SYMBOL_GPL(regmap_exit);
1353 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1355 struct regmap **r = res;
1361 /* If the user didn't specify a name match any */
1363 return (*r)->name == data;
1369 * dev_get_regmap() - Obtain the regmap (if any) for a device
1371 * @dev: Device to retrieve the map for
1372 * @name: Optional name for the register map, usually NULL.
1374 * Returns the regmap for the device if one is present, or NULL. If
1375 * name is specified then it must match the name specified when
1376 * registering the device, if it is NULL then the first regmap found
1377 * will be used. Devices with multiple register maps are very rare,
1378 * generic code should normally not need to specify a name.
1380 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1382 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1383 dev_get_regmap_match, (void *)name);
1389 EXPORT_SYMBOL_GPL(dev_get_regmap);
1392 * regmap_get_device() - Obtain the device from a regmap
1394 * @map: Register map to operate on.
1396 * Returns the underlying device that the regmap has been created for.
1398 struct device *regmap_get_device(struct regmap *map)
1402 EXPORT_SYMBOL_GPL(regmap_get_device);
1404 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1405 struct regmap_range_node *range,
1406 unsigned int val_num)
1408 void *orig_work_buf;
1409 unsigned int win_offset;
1410 unsigned int win_page;
1414 win_offset = (*reg - range->range_min) % range->window_len;
1415 win_page = (*reg - range->range_min) / range->window_len;
1418 /* Bulk write shouldn't cross range boundary */
1419 if (*reg + val_num - 1 > range->range_max)
1422 /* ... or single page boundary */
1423 if (val_num > range->window_len - win_offset)
1427 /* It is possible to have selector register inside data window.
1428 In that case, selector register is located on every page and
1429 it needs no page switching, when accessed alone. */
1431 range->window_start + win_offset != range->selector_reg) {
1432 /* Use separate work_buf during page switching */
1433 orig_work_buf = map->work_buf;
1434 map->work_buf = map->selector_work_buf;
1436 ret = _regmap_update_bits(map, range->selector_reg,
1437 range->selector_mask,
1438 win_page << range->selector_shift,
1441 map->work_buf = orig_work_buf;
1447 *reg = range->window_start + win_offset;
1452 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1458 if (!mask || !map->work_buf)
1461 buf = map->work_buf;
1463 for (i = 0; i < max_bytes; i++)
1464 buf[i] |= (mask >> (8 * i)) & 0xff;
1467 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1468 const void *val, size_t val_len)
1470 struct regmap_range_node *range;
1471 unsigned long flags;
1472 void *work_val = map->work_buf + map->format.reg_bytes +
1473 map->format.pad_bytes;
1475 int ret = -ENOTSUPP;
1481 /* Check for unwritable registers before we start */
1482 if (map->writeable_reg)
1483 for (i = 0; i < val_len / map->format.val_bytes; i++)
1484 if (!map->writeable_reg(map->dev,
1485 reg + regmap_get_offset(map, i)))
1488 if (!map->cache_bypass && map->format.parse_val) {
1490 int val_bytes = map->format.val_bytes;
1491 for (i = 0; i < val_len / val_bytes; i++) {
1492 ival = map->format.parse_val(val + (i * val_bytes));
1493 ret = regcache_write(map,
1494 reg + regmap_get_offset(map, i),
1498 "Error in caching of register: %x ret: %d\n",
1503 if (map->cache_only) {
1504 map->cache_dirty = true;
1509 range = _regmap_range_lookup(map, reg);
1511 int val_num = val_len / map->format.val_bytes;
1512 int win_offset = (reg - range->range_min) % range->window_len;
1513 int win_residue = range->window_len - win_offset;
1515 /* If the write goes beyond the end of the window split it */
1516 while (val_num > win_residue) {
1517 dev_dbg(map->dev, "Writing window %d/%zu\n",
1518 win_residue, val_len / map->format.val_bytes);
1519 ret = _regmap_raw_write_impl(map, reg, val,
1521 map->format.val_bytes);
1526 val_num -= win_residue;
1527 val += win_residue * map->format.val_bytes;
1528 val_len -= win_residue * map->format.val_bytes;
1530 win_offset = (reg - range->range_min) %
1532 win_residue = range->window_len - win_offset;
1535 ret = _regmap_select_page(map, ®, range, val_num);
1540 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1541 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1542 map->write_flag_mask);
1545 * Essentially all I/O mechanisms will be faster with a single
1546 * buffer to write. Since register syncs often generate raw
1547 * writes of single registers optimise that case.
1549 if (val != work_val && val_len == map->format.val_bytes) {
1550 memcpy(work_val, val, map->format.val_bytes);
1554 if (map->async && map->bus->async_write) {
1555 struct regmap_async *async;
1557 trace_regmap_async_write_start(map, reg, val_len);
1559 spin_lock_irqsave(&map->async_lock, flags);
1560 async = list_first_entry_or_null(&map->async_free,
1561 struct regmap_async,
1564 list_del(&async->list);
1565 spin_unlock_irqrestore(&map->async_lock, flags);
1568 async = map->bus->async_alloc();
1572 async->work_buf = kzalloc(map->format.buf_size,
1573 GFP_KERNEL | GFP_DMA);
1574 if (!async->work_buf) {
1582 /* If the caller supplied the value we can use it safely. */
1583 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1584 map->format.reg_bytes + map->format.val_bytes);
1586 spin_lock_irqsave(&map->async_lock, flags);
1587 list_add_tail(&async->list, &map->async_list);
1588 spin_unlock_irqrestore(&map->async_lock, flags);
1590 if (val != work_val)
1591 ret = map->bus->async_write(map->bus_context,
1593 map->format.reg_bytes +
1594 map->format.pad_bytes,
1595 val, val_len, async);
1597 ret = map->bus->async_write(map->bus_context,
1599 map->format.reg_bytes +
1600 map->format.pad_bytes +
1601 val_len, NULL, 0, async);
1604 dev_err(map->dev, "Failed to schedule write: %d\n",
1607 spin_lock_irqsave(&map->async_lock, flags);
1608 list_move(&async->list, &map->async_free);
1609 spin_unlock_irqrestore(&map->async_lock, flags);
1615 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1617 /* If we're doing a single register write we can probably just
1618 * send the work_buf directly, otherwise try to do a gather
1621 if (val == work_val)
1622 ret = map->bus->write(map->bus_context, map->work_buf,
1623 map->format.reg_bytes +
1624 map->format.pad_bytes +
1626 else if (map->bus->gather_write)
1627 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1628 map->format.reg_bytes +
1629 map->format.pad_bytes,
1632 /* If that didn't work fall back on linearising by hand. */
1633 if (ret == -ENOTSUPP) {
1634 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1635 buf = kzalloc(len, GFP_KERNEL);
1639 memcpy(buf, map->work_buf, map->format.reg_bytes);
1640 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1642 ret = map->bus->write(map->bus_context, buf, len);
1645 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1646 /* regcache_drop_region() takes lock that we already have,
1647 * thus call map->cache_ops->drop() directly
1649 if (map->cache_ops && map->cache_ops->drop)
1650 map->cache_ops->drop(map, reg, reg + 1);
1653 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1659 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1661 * @map: Map to check.
1663 bool regmap_can_raw_write(struct regmap *map)
1665 return map->bus && map->bus->write && map->format.format_val &&
1666 map->format.format_reg;
1668 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1671 * regmap_get_raw_read_max - Get the maximum size we can read
1673 * @map: Map to check.
1675 size_t regmap_get_raw_read_max(struct regmap *map)
1677 return map->max_raw_read;
1679 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1682 * regmap_get_raw_write_max - Get the maximum size we can read
1684 * @map: Map to check.
1686 size_t regmap_get_raw_write_max(struct regmap *map)
1688 return map->max_raw_write;
1690 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1692 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1696 struct regmap_range_node *range;
1697 struct regmap *map = context;
1699 WARN_ON(!map->bus || !map->format.format_write);
1701 range = _regmap_range_lookup(map, reg);
1703 ret = _regmap_select_page(map, ®, range, 1);
1708 map->format.format_write(map, reg, val);
1710 trace_regmap_hw_write_start(map, reg, 1);
1712 ret = map->bus->write(map->bus_context, map->work_buf,
1713 map->format.buf_size);
1715 trace_regmap_hw_write_done(map, reg, 1);
1720 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1723 struct regmap *map = context;
1725 return map->bus->reg_write(map->bus_context, reg, val);
1728 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1731 struct regmap *map = context;
1733 WARN_ON(!map->bus || !map->format.format_val);
1735 map->format.format_val(map->work_buf + map->format.reg_bytes
1736 + map->format.pad_bytes, val, 0);
1737 return _regmap_raw_write_impl(map, reg,
1739 map->format.reg_bytes +
1740 map->format.pad_bytes,
1741 map->format.val_bytes);
1744 static inline void *_regmap_map_get_context(struct regmap *map)
1746 return (map->bus) ? map : map->bus_context;
1749 int _regmap_write(struct regmap *map, unsigned int reg,
1753 void *context = _regmap_map_get_context(map);
1755 if (!regmap_writeable(map, reg))
1758 if (!map->cache_bypass && !map->defer_caching) {
1759 ret = regcache_write(map, reg, val);
1762 if (map->cache_only) {
1763 map->cache_dirty = true;
1768 if (regmap_should_log(map))
1769 dev_info(map->dev, "%x <= %x\n", reg, val);
1771 trace_regmap_reg_write(map, reg, val);
1773 return map->reg_write(context, reg, val);
1777 * regmap_write() - Write a value to a single register
1779 * @map: Register map to write to
1780 * @reg: Register to write to
1781 * @val: Value to be written
1783 * A value of zero will be returned on success, a negative errno will
1784 * be returned in error cases.
1786 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1790 if (!IS_ALIGNED(reg, map->reg_stride))
1793 map->lock(map->lock_arg);
1795 ret = _regmap_write(map, reg, val);
1797 map->unlock(map->lock_arg);
1801 EXPORT_SYMBOL_GPL(regmap_write);
1804 * regmap_write_async() - Write a value to a single register asynchronously
1806 * @map: Register map to write to
1807 * @reg: Register to write to
1808 * @val: Value to be written
1810 * A value of zero will be returned on success, a negative errno will
1811 * be returned in error cases.
1813 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1817 if (!IS_ALIGNED(reg, map->reg_stride))
1820 map->lock(map->lock_arg);
1824 ret = _regmap_write(map, reg, val);
1828 map->unlock(map->lock_arg);
1832 EXPORT_SYMBOL_GPL(regmap_write_async);
1834 int _regmap_raw_write(struct regmap *map, unsigned int reg,
1835 const void *val, size_t val_len)
1837 size_t val_bytes = map->format.val_bytes;
1838 size_t val_count = val_len / val_bytes;
1839 size_t chunk_count, chunk_bytes;
1840 size_t chunk_regs = val_count;
1846 if (map->use_single_write)
1848 else if (map->max_raw_write && val_len > map->max_raw_write)
1849 chunk_regs = map->max_raw_write / val_bytes;
1851 chunk_count = val_count / chunk_regs;
1852 chunk_bytes = chunk_regs * val_bytes;
1854 /* Write as many bytes as possible with chunk_size */
1855 for (i = 0; i < chunk_count; i++) {
1856 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
1860 reg += regmap_get_offset(map, chunk_regs);
1862 val_len -= chunk_bytes;
1865 /* Write remaining bytes */
1867 ret = _regmap_raw_write_impl(map, reg, val, val_len);
1873 * regmap_raw_write() - Write raw values to one or more registers
1875 * @map: Register map to write to
1876 * @reg: Initial register to write to
1877 * @val: Block of data to be written, laid out for direct transmission to the
1879 * @val_len: Length of data pointed to by val.
1881 * This function is intended to be used for things like firmware
1882 * download where a large block of data needs to be transferred to the
1883 * device. No formatting will be done on the data provided.
1885 * A value of zero will be returned on success, a negative errno will
1886 * be returned in error cases.
1888 int regmap_raw_write(struct regmap *map, unsigned int reg,
1889 const void *val, size_t val_len)
1893 if (!regmap_can_raw_write(map))
1895 if (val_len % map->format.val_bytes)
1898 map->lock(map->lock_arg);
1900 ret = _regmap_raw_write(map, reg, val, val_len);
1902 map->unlock(map->lock_arg);
1906 EXPORT_SYMBOL_GPL(regmap_raw_write);
1909 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
1912 * @field: Register field to write to
1913 * @mask: Bitmask to change
1914 * @val: Value to be written
1915 * @change: Boolean indicating if a write was done
1916 * @async: Boolean indicating asynchronously
1917 * @force: Boolean indicating use force update
1919 * Perform a read/modify/write cycle on the register field with change,
1920 * async, force option.
1922 * A value of zero will be returned on success, a negative errno will
1923 * be returned in error cases.
1925 int regmap_field_update_bits_base(struct regmap_field *field,
1926 unsigned int mask, unsigned int val,
1927 bool *change, bool async, bool force)
1929 mask = (mask << field->shift) & field->mask;
1931 return regmap_update_bits_base(field->regmap, field->reg,
1932 mask, val << field->shift,
1933 change, async, force);
1935 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
1938 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
1939 * register field with port ID
1941 * @field: Register field to write to
1943 * @mask: Bitmask to change
1944 * @val: Value to be written
1945 * @change: Boolean indicating if a write was done
1946 * @async: Boolean indicating asynchronously
1947 * @force: Boolean indicating use force update
1949 * A value of zero will be returned on success, a negative errno will
1950 * be returned in error cases.
1952 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
1953 unsigned int mask, unsigned int val,
1954 bool *change, bool async, bool force)
1956 if (id >= field->id_size)
1959 mask = (mask << field->shift) & field->mask;
1961 return regmap_update_bits_base(field->regmap,
1962 field->reg + (field->id_offset * id),
1963 mask, val << field->shift,
1964 change, async, force);
1966 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
1969 * regmap_bulk_write() - Write multiple registers to the device
1971 * @map: Register map to write to
1972 * @reg: First register to be write from
1973 * @val: Block of data to be written, in native register size for device
1974 * @val_count: Number of registers to write
1976 * This function is intended to be used for writing a large block of
1977 * data to the device either in single transfer or multiple transfer.
1979 * A value of zero will be returned on success, a negative errno will
1980 * be returned in error cases.
1982 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1986 size_t val_bytes = map->format.val_bytes;
1988 if (!IS_ALIGNED(reg, map->reg_stride))
1992 * Some devices don't support bulk write, for them we have a series of
1993 * single write operations.
1995 if (!map->bus || !map->format.parse_inplace) {
1996 map->lock(map->lock_arg);
1997 for (i = 0; i < val_count; i++) {
2000 switch (val_bytes) {
2002 ival = *(u8 *)(val + (i * val_bytes));
2005 ival = *(u16 *)(val + (i * val_bytes));
2008 ival = *(u32 *)(val + (i * val_bytes));
2012 ival = *(u64 *)(val + (i * val_bytes));
2020 ret = _regmap_write(map,
2021 reg + regmap_get_offset(map, i),
2027 map->unlock(map->lock_arg);
2031 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2035 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2036 map->format.parse_inplace(wval + i);
2038 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2044 EXPORT_SYMBOL_GPL(regmap_bulk_write);
2047 * _regmap_raw_multi_reg_write()
2049 * the (register,newvalue) pairs in regs have not been formatted, but
2050 * they are all in the same page and have been changed to being page
2051 * relative. The page register has been written if that was necessary.
2053 static int _regmap_raw_multi_reg_write(struct regmap *map,
2054 const struct reg_sequence *regs,
2061 size_t val_bytes = map->format.val_bytes;
2062 size_t reg_bytes = map->format.reg_bytes;
2063 size_t pad_bytes = map->format.pad_bytes;
2064 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2065 size_t len = pair_size * num_regs;
2070 buf = kzalloc(len, GFP_KERNEL);
2074 /* We have to linearise by hand. */
2078 for (i = 0; i < num_regs; i++) {
2079 unsigned int reg = regs[i].reg;
2080 unsigned int val = regs[i].def;
2081 trace_regmap_hw_write_start(map, reg, 1);
2082 map->format.format_reg(u8, reg, map->reg_shift);
2083 u8 += reg_bytes + pad_bytes;
2084 map->format.format_val(u8, val, 0);
2088 *u8 |= map->write_flag_mask;
2090 ret = map->bus->write(map->bus_context, buf, len);
2094 for (i = 0; i < num_regs; i++) {
2095 int reg = regs[i].reg;
2096 trace_regmap_hw_write_done(map, reg, 1);
2101 static unsigned int _regmap_register_page(struct regmap *map,
2103 struct regmap_range_node *range)
2105 unsigned int win_page = (reg - range->range_min) / range->window_len;
2110 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2111 struct reg_sequence *regs,
2116 struct reg_sequence *base;
2117 unsigned int this_page = 0;
2118 unsigned int page_change = 0;
2120 * the set of registers are not neccessarily in order, but
2121 * since the order of write must be preserved this algorithm
2122 * chops the set each time the page changes. This also applies
2123 * if there is a delay required at any point in the sequence.
2126 for (i = 0, n = 0; i < num_regs; i++, n++) {
2127 unsigned int reg = regs[i].reg;
2128 struct regmap_range_node *range;
2130 range = _regmap_range_lookup(map, reg);
2132 unsigned int win_page = _regmap_register_page(map, reg,
2136 this_page = win_page;
2137 if (win_page != this_page) {
2138 this_page = win_page;
2143 /* If we have both a page change and a delay make sure to
2144 * write the regs and apply the delay before we change the
2148 if (page_change || regs[i].delay_us) {
2150 /* For situations where the first write requires
2151 * a delay we need to make sure we don't call
2152 * raw_multi_reg_write with n=0
2153 * This can't occur with page breaks as we
2154 * never write on the first iteration
2156 if (regs[i].delay_us && i == 0)
2159 ret = _regmap_raw_multi_reg_write(map, base, n);
2163 if (regs[i].delay_us)
2164 udelay(regs[i].delay_us);
2170 ret = _regmap_select_page(map,
2183 return _regmap_raw_multi_reg_write(map, base, n);
2187 static int _regmap_multi_reg_write(struct regmap *map,
2188 const struct reg_sequence *regs,
2194 if (!map->can_multi_write) {
2195 for (i = 0; i < num_regs; i++) {
2196 ret = _regmap_write(map, regs[i].reg, regs[i].def);
2200 if (regs[i].delay_us)
2201 udelay(regs[i].delay_us);
2206 if (!map->format.parse_inplace)
2209 if (map->writeable_reg)
2210 for (i = 0; i < num_regs; i++) {
2211 int reg = regs[i].reg;
2212 if (!map->writeable_reg(map->dev, reg))
2214 if (!IS_ALIGNED(reg, map->reg_stride))
2218 if (!map->cache_bypass) {
2219 for (i = 0; i < num_regs; i++) {
2220 unsigned int val = regs[i].def;
2221 unsigned int reg = regs[i].reg;
2222 ret = regcache_write(map, reg, val);
2225 "Error in caching of register: %x ret: %d\n",
2230 if (map->cache_only) {
2231 map->cache_dirty = true;
2238 for (i = 0; i < num_regs; i++) {
2239 unsigned int reg = regs[i].reg;
2240 struct regmap_range_node *range;
2242 /* Coalesce all the writes between a page break or a delay
2245 range = _regmap_range_lookup(map, reg);
2246 if (range || regs[i].delay_us) {
2247 size_t len = sizeof(struct reg_sequence)*num_regs;
2248 struct reg_sequence *base = kmemdup(regs, len,
2252 ret = _regmap_range_multi_paged_reg_write(map, base,
2259 return _regmap_raw_multi_reg_write(map, regs, num_regs);
2263 * regmap_multi_reg_write() - Write multiple registers to the device
2265 * @map: Register map to write to
2266 * @regs: Array of structures containing register,value to be written
2267 * @num_regs: Number of registers to write
2269 * Write multiple registers to the device where the set of register, value
2270 * pairs are supplied in any order, possibly not all in a single range.
2272 * The 'normal' block write mode will send ultimately send data on the
2273 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2274 * addressed. However, this alternative block multi write mode will send
2275 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2276 * must of course support the mode.
2278 * A value of zero will be returned on success, a negative errno will be
2279 * returned in error cases.
2281 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2286 map->lock(map->lock_arg);
2288 ret = _regmap_multi_reg_write(map, regs, num_regs);
2290 map->unlock(map->lock_arg);
2294 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2297 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2298 * device but not the cache
2300 * @map: Register map to write to
2301 * @regs: Array of structures containing register,value to be written
2302 * @num_regs: Number of registers to write
2304 * Write multiple registers to the device but not the cache where the set
2305 * of register are supplied in any order.
2307 * This function is intended to be used for writing a large block of data
2308 * atomically to the device in single transfer for those I2C client devices
2309 * that implement this alternative block write mode.
2311 * A value of zero will be returned on success, a negative errno will
2312 * be returned in error cases.
2314 int regmap_multi_reg_write_bypassed(struct regmap *map,
2315 const struct reg_sequence *regs,
2321 map->lock(map->lock_arg);
2323 bypass = map->cache_bypass;
2324 map->cache_bypass = true;
2326 ret = _regmap_multi_reg_write(map, regs, num_regs);
2328 map->cache_bypass = bypass;
2330 map->unlock(map->lock_arg);
2334 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2337 * regmap_raw_write_async() - Write raw values to one or more registers
2340 * @map: Register map to write to
2341 * @reg: Initial register to write to
2342 * @val: Block of data to be written, laid out for direct transmission to the
2343 * device. Must be valid until regmap_async_complete() is called.
2344 * @val_len: Length of data pointed to by val.
2346 * This function is intended to be used for things like firmware
2347 * download where a large block of data needs to be transferred to the
2348 * device. No formatting will be done on the data provided.
2350 * If supported by the underlying bus the write will be scheduled
2351 * asynchronously, helping maximise I/O speed on higher speed buses
2352 * like SPI. regmap_async_complete() can be called to ensure that all
2353 * asynchrnous writes have been completed.
2355 * A value of zero will be returned on success, a negative errno will
2356 * be returned in error cases.
2358 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2359 const void *val, size_t val_len)
2363 if (val_len % map->format.val_bytes)
2365 if (!IS_ALIGNED(reg, map->reg_stride))
2368 map->lock(map->lock_arg);
2372 ret = _regmap_raw_write(map, reg, val, val_len);
2376 map->unlock(map->lock_arg);
2380 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2382 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2383 unsigned int val_len)
2385 struct regmap_range_node *range;
2390 if (!map->bus || !map->bus->read)
2393 range = _regmap_range_lookup(map, reg);
2395 ret = _regmap_select_page(map, ®, range,
2396 val_len / map->format.val_bytes);
2401 map->format.format_reg(map->work_buf, reg, map->reg_shift);
2402 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2403 map->read_flag_mask);
2404 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2406 ret = map->bus->read(map->bus_context, map->work_buf,
2407 map->format.reg_bytes + map->format.pad_bytes,
2410 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2415 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2418 struct regmap *map = context;
2420 return map->bus->reg_read(map->bus_context, reg, val);
2423 static int _regmap_bus_read(void *context, unsigned int reg,
2427 struct regmap *map = context;
2428 void *work_val = map->work_buf + map->format.reg_bytes +
2429 map->format.pad_bytes;
2431 if (!map->format.parse_val)
2434 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
2436 *val = map->format.parse_val(work_val);
2441 static int _regmap_read(struct regmap *map, unsigned int reg,
2445 void *context = _regmap_map_get_context(map);
2447 if (!map->cache_bypass) {
2448 ret = regcache_read(map, reg, val);
2453 if (map->cache_only)
2456 if (!regmap_readable(map, reg))
2459 ret = map->reg_read(context, reg, val);
2461 if (regmap_should_log(map))
2462 dev_info(map->dev, "%x => %x\n", reg, *val);
2464 trace_regmap_reg_read(map, reg, *val);
2466 if (!map->cache_bypass)
2467 regcache_write(map, reg, *val);
2474 * regmap_read() - Read a value from a single register
2476 * @map: Register map to read from
2477 * @reg: Register to be read from
2478 * @val: Pointer to store read value
2480 * A value of zero will be returned on success, a negative errno will
2481 * be returned in error cases.
2483 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2487 if (!IS_ALIGNED(reg, map->reg_stride))
2490 map->lock(map->lock_arg);
2492 ret = _regmap_read(map, reg, val);
2494 map->unlock(map->lock_arg);
2498 EXPORT_SYMBOL_GPL(regmap_read);
2501 * regmap_raw_read() - Read raw data from the device
2503 * @map: Register map to read from
2504 * @reg: First register to be read from
2505 * @val: Pointer to store read value
2506 * @val_len: Size of data to read
2508 * A value of zero will be returned on success, a negative errno will
2509 * be returned in error cases.
2511 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2514 size_t val_bytes = map->format.val_bytes;
2515 size_t val_count = val_len / val_bytes;
2521 if (val_len % map->format.val_bytes)
2523 if (!IS_ALIGNED(reg, map->reg_stride))
2528 map->lock(map->lock_arg);
2530 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2531 map->cache_type == REGCACHE_NONE) {
2532 size_t chunk_count, chunk_bytes;
2533 size_t chunk_regs = val_count;
2535 if (!map->bus->read) {
2540 if (map->use_single_read)
2542 else if (map->max_raw_read && val_len > map->max_raw_read)
2543 chunk_regs = map->max_raw_read / val_bytes;
2545 chunk_count = val_count / chunk_regs;
2546 chunk_bytes = chunk_regs * val_bytes;
2548 /* Read bytes that fit into whole chunks */
2549 for (i = 0; i < chunk_count; i++) {
2550 ret = _regmap_raw_read(map, reg, val, chunk_bytes);
2554 reg += regmap_get_offset(map, chunk_regs);
2556 val_len -= chunk_bytes;
2559 /* Read remaining bytes */
2561 ret = _regmap_raw_read(map, reg, val, val_len);
2566 /* Otherwise go word by word for the cache; should be low
2567 * cost as we expect to hit the cache.
2569 for (i = 0; i < val_count; i++) {
2570 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2575 map->format.format_val(val + (i * val_bytes), v, 0);
2580 map->unlock(map->lock_arg);
2584 EXPORT_SYMBOL_GPL(regmap_raw_read);
2587 * regmap_noinc_read(): Read data from a register without incrementing the
2590 * @map: Register map to read from
2591 * @reg: Register to read from
2592 * @val: Pointer to data buffer
2593 * @val_len: Length of output buffer in bytes.
2595 * The regmap API usually assumes that bulk bus read operations will read a
2596 * range of registers. Some devices have certain registers for which a read
2597 * operation read will read from an internal FIFO.
2599 * The target register must be volatile but registers after it can be
2600 * completely unrelated cacheable registers.
2602 * This will attempt multiple reads as required to read val_len bytes.
2604 * A value of zero will be returned on success, a negative errno will be
2605 * returned in error cases.
2607 int regmap_noinc_read(struct regmap *map, unsigned int reg,
2608 void *val, size_t val_len)
2615 if (!map->bus->read)
2617 if (val_len % map->format.val_bytes)
2619 if (!IS_ALIGNED(reg, map->reg_stride))
2624 map->lock(map->lock_arg);
2626 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
2632 if (map->max_raw_read && map->max_raw_read < val_len)
2633 read_len = map->max_raw_read;
2636 ret = _regmap_raw_read(map, reg, val, read_len);
2639 val = ((u8 *)val) + read_len;
2640 val_len -= read_len;
2644 map->unlock(map->lock_arg);
2647 EXPORT_SYMBOL_GPL(regmap_noinc_read);
2650 * regmap_field_read(): Read a value to a single register field
2652 * @field: Register field to read from
2653 * @val: Pointer to store read value
2655 * A value of zero will be returned on success, a negative errno will
2656 * be returned in error cases.
2658 int regmap_field_read(struct regmap_field *field, unsigned int *val)
2661 unsigned int reg_val;
2662 ret = regmap_read(field->regmap, field->reg, ®_val);
2666 reg_val &= field->mask;
2667 reg_val >>= field->shift;
2672 EXPORT_SYMBOL_GPL(regmap_field_read);
2675 * regmap_fields_read() - Read a value to a single register field with port ID
2677 * @field: Register field to read from
2679 * @val: Pointer to store read value
2681 * A value of zero will be returned on success, a negative errno will
2682 * be returned in error cases.
2684 int regmap_fields_read(struct regmap_field *field, unsigned int id,
2688 unsigned int reg_val;
2690 if (id >= field->id_size)
2693 ret = regmap_read(field->regmap,
2694 field->reg + (field->id_offset * id),
2699 reg_val &= field->mask;
2700 reg_val >>= field->shift;
2705 EXPORT_SYMBOL_GPL(regmap_fields_read);
2708 * regmap_bulk_read() - Read multiple registers from the device
2710 * @map: Register map to read from
2711 * @reg: First register to be read from
2712 * @val: Pointer to store read value, in native register size for device
2713 * @val_count: Number of registers to read
2715 * A value of zero will be returned on success, a negative errno will
2716 * be returned in error cases.
2718 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2722 size_t val_bytes = map->format.val_bytes;
2723 bool vol = regmap_volatile_range(map, reg, val_count);
2725 if (!IS_ALIGNED(reg, map->reg_stride))
2730 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2731 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
2735 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2736 map->format.parse_inplace(val + i);
2745 map->lock(map->lock_arg);
2747 for (i = 0; i < val_count; i++) {
2750 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2755 switch (map->format.val_bytes) {
2777 map->unlock(map->lock_arg);
2782 EXPORT_SYMBOL_GPL(regmap_bulk_read);
2784 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2785 unsigned int mask, unsigned int val,
2786 bool *change, bool force_write)
2789 unsigned int tmp, orig;
2794 if (regmap_volatile(map, reg) && map->reg_update_bits) {
2795 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
2796 if (ret == 0 && change)
2799 ret = _regmap_read(map, reg, &orig);
2806 if (force_write || (tmp != orig)) {
2807 ret = _regmap_write(map, reg, tmp);
2808 if (ret == 0 && change)
2817 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
2819 * @map: Register map to update
2820 * @reg: Register to update
2821 * @mask: Bitmask to change
2822 * @val: New value for bitmask
2823 * @change: Boolean indicating if a write was done
2824 * @async: Boolean indicating asynchronously
2825 * @force: Boolean indicating use force update
2827 * Perform a read/modify/write cycle on a register map with change, async, force
2832 * With most buses the read must be done synchronously so this is most useful
2833 * for devices with a cache which do not need to interact with the hardware to
2834 * determine the current register value.
2836 * Returns zero for success, a negative number on error.
2838 int regmap_update_bits_base(struct regmap *map, unsigned int reg,
2839 unsigned int mask, unsigned int val,
2840 bool *change, bool async, bool force)
2844 map->lock(map->lock_arg);
2848 ret = _regmap_update_bits(map, reg, mask, val, change, force);
2852 map->unlock(map->lock_arg);
2856 EXPORT_SYMBOL_GPL(regmap_update_bits_base);
2858 void regmap_async_complete_cb(struct regmap_async *async, int ret)
2860 struct regmap *map = async->map;
2863 trace_regmap_async_io_complete(map);
2865 spin_lock(&map->async_lock);
2866 list_move(&async->list, &map->async_free);
2867 wake = list_empty(&map->async_list);
2870 map->async_ret = ret;
2872 spin_unlock(&map->async_lock);
2875 wake_up(&map->async_waitq);
2877 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
2879 static int regmap_async_is_done(struct regmap *map)
2881 unsigned long flags;
2884 spin_lock_irqsave(&map->async_lock, flags);
2885 ret = list_empty(&map->async_list);
2886 spin_unlock_irqrestore(&map->async_lock, flags);
2892 * regmap_async_complete - Ensure all asynchronous I/O has completed.
2894 * @map: Map to operate on.
2896 * Blocks until any pending asynchronous I/O has completed. Returns
2897 * an error code for any failed I/O operations.
2899 int regmap_async_complete(struct regmap *map)
2901 unsigned long flags;
2904 /* Nothing to do with no async support */
2905 if (!map->bus || !map->bus->async_write)
2908 trace_regmap_async_complete_start(map);
2910 wait_event(map->async_waitq, regmap_async_is_done(map));
2912 spin_lock_irqsave(&map->async_lock, flags);
2913 ret = map->async_ret;
2915 spin_unlock_irqrestore(&map->async_lock, flags);
2917 trace_regmap_async_complete_done(map);
2921 EXPORT_SYMBOL_GPL(regmap_async_complete);
2924 * regmap_register_patch - Register and apply register updates to be applied
2925 * on device initialistion
2927 * @map: Register map to apply updates to.
2928 * @regs: Values to update.
2929 * @num_regs: Number of entries in regs.
2931 * Register a set of register updates to be applied to the device
2932 * whenever the device registers are synchronised with the cache and
2933 * apply them immediately. Typically this is used to apply
2934 * corrections to be applied to the device defaults on startup, such
2935 * as the updates some vendors provide to undocumented registers.
2937 * The caller must ensure that this function cannot be called
2938 * concurrently with either itself or regcache_sync().
2940 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
2943 struct reg_sequence *p;
2947 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2951 p = krealloc(map->patch,
2952 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
2955 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2957 map->patch_regs += num_regs;
2962 map->lock(map->lock_arg);
2964 bypass = map->cache_bypass;
2966 map->cache_bypass = true;
2969 ret = _regmap_multi_reg_write(map, regs, num_regs);
2972 map->cache_bypass = bypass;
2974 map->unlock(map->lock_arg);
2976 regmap_async_complete(map);
2980 EXPORT_SYMBOL_GPL(regmap_register_patch);
2983 * regmap_get_val_bytes() - Report the size of a register value
2985 * @map: Register map to operate on.
2987 * Report the size of a register value, mainly intended to for use by
2988 * generic infrastructure built on top of regmap.
2990 int regmap_get_val_bytes(struct regmap *map)
2992 if (map->format.format_write)
2995 return map->format.val_bytes;
2997 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3000 * regmap_get_max_register() - Report the max register value
3002 * @map: Register map to operate on.
3004 * Report the max register value, mainly intended to for use by
3005 * generic infrastructure built on top of regmap.
3007 int regmap_get_max_register(struct regmap *map)
3009 return map->max_register ? map->max_register : -EINVAL;
3011 EXPORT_SYMBOL_GPL(regmap_get_max_register);
3014 * regmap_get_reg_stride() - Report the register address stride
3016 * @map: Register map to operate on.
3018 * Report the register address stride, mainly intended to for use by
3019 * generic infrastructure built on top of regmap.
3021 int regmap_get_reg_stride(struct regmap *map)
3023 return map->reg_stride;
3025 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3027 int regmap_parse_val(struct regmap *map, const void *buf,
3030 if (!map->format.parse_val)
3033 *val = map->format.parse_val(buf);
3037 EXPORT_SYMBOL_GPL(regmap_parse_val);
3039 static int __init regmap_initcall(void)
3041 regmap_debugfs_initcall();
3045 postcore_initcall(regmap_initcall);