2 * Register cache access API - rbtree caching support
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/debugfs.h>
14 #include <linux/device.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
21 static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
23 static int regcache_rbtree_exit(struct regmap *map);
25 struct regcache_rbtree_node {
26 /* block of adjacent registers */
28 /* Which registers are present */
30 /* base register handled by this block */
31 unsigned int base_reg;
32 /* number of registers available in the block */
34 /* the actual rbtree node holding this block */
38 struct regcache_rbtree_ctx {
40 struct regcache_rbtree_node *cached_rbnode;
43 static inline void regcache_rbtree_get_base_top_reg(
45 struct regcache_rbtree_node *rbnode,
46 unsigned int *base, unsigned int *top)
48 *base = rbnode->base_reg;
49 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
52 static unsigned int regcache_rbtree_get_register(struct regmap *map,
53 struct regcache_rbtree_node *rbnode, unsigned int idx)
55 return regcache_get_val(map, rbnode->block, idx);
58 static void regcache_rbtree_set_register(struct regmap *map,
59 struct regcache_rbtree_node *rbnode,
60 unsigned int idx, unsigned int val)
62 set_bit(idx, rbnode->cache_present);
63 regcache_set_val(map, rbnode->block, idx, val);
66 static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
69 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
71 struct regcache_rbtree_node *rbnode;
72 unsigned int base_reg, top_reg;
74 rbnode = rbtree_ctx->cached_rbnode;
76 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
78 if (reg >= base_reg && reg <= top_reg)
82 node = rbtree_ctx->root.rb_node;
84 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
85 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
87 if (reg >= base_reg && reg <= top_reg) {
88 rbtree_ctx->cached_rbnode = rbnode;
90 } else if (reg > top_reg) {
91 node = node->rb_right;
92 } else if (reg < base_reg) {
100 static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
101 struct regcache_rbtree_node *rbnode)
103 struct rb_node **new, *parent;
104 struct regcache_rbtree_node *rbnode_tmp;
105 unsigned int base_reg_tmp, top_reg_tmp;
106 unsigned int base_reg;
109 new = &root->rb_node;
111 rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
112 /* base and top registers of the current rbnode */
113 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
115 /* base register of the rbnode to be added */
116 base_reg = rbnode->base_reg;
118 /* if this register has already been inserted, just return */
119 if (base_reg >= base_reg_tmp &&
120 base_reg <= top_reg_tmp)
122 else if (base_reg > top_reg_tmp)
123 new = &((*new)->rb_right);
124 else if (base_reg < base_reg_tmp)
125 new = &((*new)->rb_left);
128 /* insert the node into the rbtree */
129 rb_link_node(&rbnode->node, parent, new);
130 rb_insert_color(&rbnode->node, root);
135 #ifdef CONFIG_DEBUG_FS
136 static int rbtree_show(struct seq_file *s, void *ignored)
138 struct regmap *map = s->private;
139 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
140 struct regcache_rbtree_node *n;
141 struct rb_node *node;
142 unsigned int base, top;
146 int this_registers, average;
148 map->lock(map->lock_arg);
150 mem_size = sizeof(*rbtree_ctx);
152 for (node = rb_first(&rbtree_ctx->root); node != NULL;
153 node = rb_next(node)) {
154 n = rb_entry(node, struct regcache_rbtree_node, node);
155 mem_size += sizeof(*n);
156 mem_size += (n->blklen * map->cache_word_size);
157 mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
159 regcache_rbtree_get_base_top_reg(map, n, &base, &top);
160 this_registers = ((top - base) / map->reg_stride) + 1;
161 seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
164 registers += this_registers;
168 average = registers / nodes;
172 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
173 nodes, registers, average, mem_size);
175 map->unlock(map->lock_arg);
180 DEFINE_SHOW_ATTRIBUTE(rbtree);
182 static void rbtree_debugfs_init(struct regmap *map)
184 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
188 static int regcache_rbtree_init(struct regmap *map)
190 struct regcache_rbtree_ctx *rbtree_ctx;
194 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
198 rbtree_ctx = map->cache;
199 rbtree_ctx->root = RB_ROOT;
200 rbtree_ctx->cached_rbnode = NULL;
202 for (i = 0; i < map->num_reg_defaults; i++) {
203 ret = regcache_rbtree_write(map,
204 map->reg_defaults[i].reg,
205 map->reg_defaults[i].def);
213 regcache_rbtree_exit(map);
217 static int regcache_rbtree_exit(struct regmap *map)
219 struct rb_node *next;
220 struct regcache_rbtree_ctx *rbtree_ctx;
221 struct regcache_rbtree_node *rbtree_node;
223 /* if we've already been called then just return */
224 rbtree_ctx = map->cache;
228 /* free up the rbtree */
229 next = rb_first(&rbtree_ctx->root);
231 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
232 next = rb_next(&rbtree_node->node);
233 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
234 kfree(rbtree_node->cache_present);
235 kfree(rbtree_node->block);
239 /* release the resources */
246 static int regcache_rbtree_read(struct regmap *map,
247 unsigned int reg, unsigned int *value)
249 struct regcache_rbtree_node *rbnode;
250 unsigned int reg_tmp;
252 rbnode = regcache_rbtree_lookup(map, reg);
254 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
255 if (!test_bit(reg_tmp, rbnode->cache_present))
257 *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
266 static int regcache_rbtree_insert_to_block(struct regmap *map,
267 struct regcache_rbtree_node *rbnode,
268 unsigned int base_reg,
269 unsigned int top_reg,
274 unsigned int pos, offset;
275 unsigned long *present;
278 blklen = (top_reg - base_reg) / map->reg_stride + 1;
279 pos = (reg - base_reg) / map->reg_stride;
280 offset = (rbnode->base_reg - base_reg) / map->reg_stride;
282 blk = krealloc(rbnode->block,
283 blklen * map->cache_word_size,
288 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
289 present = krealloc(rbnode->cache_present,
290 BITS_TO_LONGS(blklen) * sizeof(*present),
297 memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
298 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
301 present = rbnode->cache_present;
304 /* insert the register value in the correct place in the rbnode block */
306 memmove(blk + offset * map->cache_word_size,
307 blk, rbnode->blklen * map->cache_word_size);
308 bitmap_shift_left(present, present, offset, blklen);
311 /* update the rbnode block, its size and the base register */
313 rbnode->blklen = blklen;
314 rbnode->base_reg = base_reg;
315 rbnode->cache_present = present;
317 regcache_rbtree_set_register(map, rbnode, pos, value);
321 static struct regcache_rbtree_node *
322 regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
324 struct regcache_rbtree_node *rbnode;
325 const struct regmap_range *range;
328 rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
332 /* If there is a read table then use it to guess at an allocation */
334 for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
335 if (regmap_reg_in_range(reg,
336 &map->rd_table->yes_ranges[i]))
340 if (i != map->rd_table->n_yes_ranges) {
341 range = &map->rd_table->yes_ranges[i];
342 rbnode->blklen = (range->range_max - range->range_min) /
344 rbnode->base_reg = range->range_min;
348 if (!rbnode->blklen) {
350 rbnode->base_reg = reg;
353 rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
358 rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
359 sizeof(*rbnode->cache_present),
361 if (!rbnode->cache_present)
367 kfree(rbnode->block);
373 static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
376 struct regcache_rbtree_ctx *rbtree_ctx;
377 struct regcache_rbtree_node *rbnode, *rbnode_tmp;
378 struct rb_node *node;
379 unsigned int reg_tmp;
382 rbtree_ctx = map->cache;
384 /* if we can't locate it in the cached rbnode we'll have
385 * to traverse the rbtree looking for it.
387 rbnode = regcache_rbtree_lookup(map, reg);
389 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
390 regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
392 unsigned int base_reg, top_reg;
393 unsigned int new_base_reg, new_top_reg;
394 unsigned int min, max;
395 unsigned int max_dist;
396 unsigned int dist, best_dist = UINT_MAX;
398 max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
399 map->cache_word_size;
403 min = reg - max_dist;
404 max = reg + max_dist;
406 /* look for an adjacent register to the one we are about to add */
407 node = rbtree_ctx->root.rb_node;
409 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
412 regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
413 &base_reg, &top_reg);
415 if (base_reg <= max && top_reg >= min) {
417 dist = base_reg - reg;
418 else if (reg > top_reg)
419 dist = reg - top_reg;
422 if (dist < best_dist) {
425 new_base_reg = min(reg, base_reg);
426 new_top_reg = max(reg, top_reg);
431 * Keep looking, we want to choose the closest block,
432 * otherwise we might end up creating overlapping
433 * blocks, which breaks the rbtree.
436 node = node->rb_left;
437 else if (reg > top_reg)
438 node = node->rb_right;
444 ret = regcache_rbtree_insert_to_block(map, rbnode,
450 rbtree_ctx->cached_rbnode = rbnode;
454 /* We did not manage to find a place to insert it in
455 * an existing block so create a new rbnode.
457 rbnode = regcache_rbtree_node_alloc(map, reg);
460 regcache_rbtree_set_register(map, rbnode,
461 reg - rbnode->base_reg, value);
462 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
463 rbtree_ctx->cached_rbnode = rbnode;
469 static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
472 struct regcache_rbtree_ctx *rbtree_ctx;
473 struct rb_node *node;
474 struct regcache_rbtree_node *rbnode;
475 unsigned int base_reg, top_reg;
476 unsigned int start, end;
479 rbtree_ctx = map->cache;
480 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
481 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
483 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
491 start = (min - base_reg) / map->reg_stride;
496 end = (max - base_reg) / map->reg_stride + 1;
498 end = rbnode->blklen;
500 ret = regcache_sync_block(map, rbnode->block,
501 rbnode->cache_present,
502 rbnode->base_reg, start, end);
507 return regmap_async_complete(map);
510 static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
513 struct regcache_rbtree_ctx *rbtree_ctx;
514 struct regcache_rbtree_node *rbnode;
515 struct rb_node *node;
516 unsigned int base_reg, top_reg;
517 unsigned int start, end;
519 rbtree_ctx = map->cache;
520 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
521 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
523 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
531 start = (min - base_reg) / map->reg_stride;
536 end = (max - base_reg) / map->reg_stride + 1;
538 end = rbnode->blklen;
540 bitmap_clear(rbnode->cache_present, start, end - start);
546 struct regcache_ops regcache_rbtree_ops = {
547 .type = REGCACHE_RBTREE,
549 .init = regcache_rbtree_init,
550 .exit = regcache_rbtree_exit,
551 #ifdef CONFIG_DEBUG_FS
552 .debugfs_init = rbtree_debugfs_init,
554 .read = regcache_rbtree_read,
555 .write = regcache_rbtree_write,
556 .sync = regcache_rbtree_sync,
557 .drop = regcache_rbtree_drop,