s390/test_unwind: avoid build warning with W=1
[linux-2.6-microblaze.git] / lib / sbitmap.c
index b25db9b..09d293c 100644 (file)
@@ -457,10 +457,9 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
 }
 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
 
-static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
-                                           unsigned int depth)
+static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
+                                           unsigned int wake_batch)
 {
-       unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
        int i;
 
        if (sbq->wake_batch != wake_batch) {
@@ -476,6 +475,30 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
        }
 }
 
+static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
+                                           unsigned int depth)
+{
+       unsigned int wake_batch;
+
+       wake_batch = sbq_calc_wake_batch(sbq, depth);
+       __sbitmap_queue_update_wake_batch(sbq, wake_batch);
+}
+
+void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
+                                           unsigned int users)
+{
+       unsigned int wake_batch;
+       unsigned int min_batch;
+       unsigned int depth = (sbq->sb.depth + users - 1) / users;
+
+       min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1;
+
+       wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
+                       min_batch, SBQ_WAKE_BATCH);
+       __sbitmap_queue_update_wake_batch(sbq, wake_batch);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
+
 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
 {
        sbitmap_queue_update_wake_batch(sbq, depth);
@@ -489,6 +512,57 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
 }
 EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
 
+unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+                                       unsigned int *offset)
+{
+       struct sbitmap *sb = &sbq->sb;
+       unsigned int hint, depth;
+       unsigned long index, nr;
+       int i;
+
+       if (unlikely(sb->round_robin))
+               return 0;
+
+       depth = READ_ONCE(sb->depth);
+       hint = update_alloc_hint_before_get(sb, depth);
+
+       index = SB_NR_TO_INDEX(sb, hint);
+
+       for (i = 0; i < sb->map_nr; i++) {
+               struct sbitmap_word *map = &sb->map[index];
+               unsigned long get_mask;
+
+               sbitmap_deferred_clear(map);
+               if (map->word == (1UL << (map->depth - 1)) - 1)
+                       continue;
+
+               nr = find_first_zero_bit(&map->word, map->depth);
+               if (nr + nr_tags <= map->depth) {
+                       atomic_long_t *ptr = (atomic_long_t *) &map->word;
+                       int map_tags = min_t(int, nr_tags, map->depth);
+                       unsigned long val, ret;
+
+                       get_mask = ((1UL << map_tags) - 1) << nr;
+                       do {
+                               val = READ_ONCE(map->word);
+                               ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
+                       } while (ret != val);
+                       get_mask = (get_mask & ~ret) >> nr;
+                       if (get_mask) {
+                               *offset = nr + (index << sb->shift);
+                               update_alloc_hint_after_get(sb, depth, hint,
+                                                       *offset + map_tags - 1);
+                               return get_mask;
+                       }
+               }
+               /* Jump to next index. */
+               if (++index >= sb->map_nr)
+                       index = 0;
+       }
+
+       return 0;
+}
+
 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
                                unsigned int shallow_depth)
 {
@@ -577,6 +651,46 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
 }
 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 
+static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
+{
+       if (likely(!sb->round_robin && tag < sb->depth))
+               data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
+}
+
+void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
+                               int *tags, int nr_tags)
+{
+       struct sbitmap *sb = &sbq->sb;
+       unsigned long *addr = NULL;
+       unsigned long mask = 0;
+       int i;
+
+       smp_mb__before_atomic();
+       for (i = 0; i < nr_tags; i++) {
+               const int tag = tags[i] - offset;
+               unsigned long *this_addr;
+
+               /* since we're clearing a batch, skip the deferred map */
+               this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
+               if (!addr) {
+                       addr = this_addr;
+               } else if (addr != this_addr) {
+                       atomic_long_andnot(mask, (atomic_long_t *) addr);
+                       mask = 0;
+                       addr = this_addr;
+               }
+               mask |= (1UL << SB_NR_TO_BIT(sb, tag));
+       }
+
+       if (mask)
+               atomic_long_andnot(mask, (atomic_long_t *) addr);
+
+       smp_mb__after_atomic();
+       sbitmap_queue_wake_up(sbq);
+       sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
+                                       tags[nr_tags - 1] - offset);
+}
+
 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
                         unsigned int cpu)
 {
@@ -601,9 +715,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
         */
        smp_mb__after_atomic();
        sbitmap_queue_wake_up(sbq);
-
-       if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth))
-               *per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr;
+       sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
 }
 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);