Replace the swp function pointer in the min_heap_callbacks of bcache with
NULL, allowing direct usage of the default builtin swap implementation.
This modification simplifies the code and improves performance by removing
unnecessary function indirection.
Link: https://lkml.kernel.org/r/20241020040200.939973-8-visitorckw@gmail.com
Signed-off-by: Kuan-Wei Chiu <visitorckw@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw>
Cc: Coly Li <colyli@suse.de>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: "Liang, Kan" <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Sakai <msakai@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs);
}
-static inline void new_bucket_swap(void *l, void *r, void __always_unused *args)
-{
- struct bucket **lhs = l, **rhs = r;
-
- swap(*lhs, *rhs);
-}
-
static void invalidate_buckets_lru(struct cache *ca)
{
struct bucket *b;
const struct min_heap_callbacks bucket_max_cmp_callback = {
.less = new_bucket_max_cmp,
- .swp = new_bucket_swap,
+ .swp = NULL,
};
const struct min_heap_callbacks bucket_min_cmp_callback = {
.less = new_bucket_min_cmp,
- .swp = new_bucket_swap,
+ .swp = NULL,
};
ca->heap.nr = 0;
return bkey_cmp(_l->k, _r->k) <= 0;
}
-static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
-{
- struct btree_iter_set *_iter1 = iter1;
- struct btree_iter_set *_iter2 = iter2;
-
- swap(*_iter1, *_iter2);
-}
-
static inline bool btree_iter_end(struct btree_iter *iter)
{
return !iter->heap.nr;
{
const struct min_heap_callbacks callbacks = {
.less = new_btree_iter_cmp,
- .swp = new_btree_iter_swap,
+ .swp = NULL,
};
if (k != end)
struct bkey *ret = NULL;
const struct min_heap_callbacks callbacks = {
.less = cmp,
- .swp = new_btree_iter_swap,
+ .swp = NULL,
};
if (!btree_iter_end(iter)) {
: bch_ptr_invalid;
const struct min_heap_callbacks callbacks = {
.less = b->ops->sort_cmp,
- .swp = new_btree_iter_swap,
+ .swp = NULL,
};
/* Heapify the iterator, using our comparison function */
return !(c ? c > 0 : _l->k < _r->k);
}
-static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
-{
- struct btree_iter_set *_iter1 = iter1;
- struct btree_iter_set *_iter2 = iter2;
-
- swap(*_iter1, *_iter2);
-}
-
static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
struct bkey *tmp)
{
const struct min_heap_callbacks callbacks = {
.less = new_bch_extent_sort_cmp,
- .swp = new_btree_iter_swap,
+ .swp = NULL,
};
while (iter->heap.nr > 1) {
struct btree_iter_set *top = iter->heap.data, *i = top + 1;
return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r);
}
-static void new_bucket_swap(void *l, void *r, void __always_unused *args)
-{
- struct bucket **_l = l;
- struct bucket **_r = r;
-
- swap(*_l, *_r);
-}
-
static unsigned int bucket_heap_top(struct cache *ca)
{
struct bucket *b;
unsigned long sectors_to_move, reserve_sectors;
const struct min_heap_callbacks callbacks = {
.less = new_bucket_cmp,
- .swp = new_bucket_swap,
+ .swp = NULL,
};
if (!c->copy_gc_enabled)