static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
unsigned char);
static void free_swap_count_continuations(struct swap_info_struct *);
-static sector_t map_swap_entry(swp_entry_t, struct block_device**);
DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
{
unsigned long idx;
struct swap_cluster_info *ci;
- unsigned long offset, i;
- unsigned char *map;
+ unsigned long offset;
/*
* Should not even be attempting cluster allocations when huge
alloc_cluster(si, idx);
cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
- map = si->swap_map + offset;
- for (i = 0; i < SWAPFILE_CLUSTER; i++)
- map[i] = SWAP_HAS_CACHE;
+ memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER);
unlock_cluster(ci);
swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
*slot = swp_entry(si->type, offset);
/* Only single cluster request supported */
WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
+ spin_lock(&swap_avail_lock);
+
avail_pgs = atomic_long_read(&nr_swap_pages) / size;
- if (avail_pgs <= 0)
+ if (avail_pgs <= 0) {
+ spin_unlock(&swap_avail_lock);
goto noswap;
+ }
n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
atomic_long_sub(n_goal * size, &nr_swap_pages);
- spin_lock(&swap_avail_lock);
-
start_over:
node = numa_node_id();
plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
spin_lock(&si->lock);
if (si->flags & SWP_WRITEOK) {
- atomic_long_dec(&nr_swap_pages);
/* This is called for allocating swap entry, not cache */
offset = scan_swap_map(si, 1);
if (offset) {
+ atomic_long_dec(&nr_swap_pages);
spin_unlock(&si->lock);
return swp_entry(type, offset);
}
- atomic_long_inc(&nr_swap_pages);
}
spin_unlock(&si->lock);
fail:
*/
sector_t swapdev_block(int type, pgoff_t offset)
{
- struct block_device *bdev;
struct swap_info_struct *si = swap_type_to_swap_info(type);
+ struct swap_extent *se;
if (!si || !(si->flags & SWP_WRITEOK))
return 0;
- return map_swap_entry(swp_entry(type, offset), &bdev);
+ se = offset_to_swap_extent(si, offset);
+ return se->start_block + (offset - se->start_page);
}
/*
si = swap_info[type];
pte = pte_offset_map(pmd, addr);
do {
- struct vm_fault vmf;
-
if (!is_swap_pte(*pte))
continue;
swap_map = &si->swap_map[offset];
page = lookup_swap_cache(entry, vma, addr);
if (!page) {
- vmf.vma = vma;
- vmf.address = addr;
- vmf.pmd = pmd;
+ struct vm_fault vmf = {
+ .vma = vma,
+ .address = addr,
+ .pmd = pmd,
+ };
+
page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
&vmf);
}
spin_unlock(&mmlist_lock);
}
-/*
- * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
- * corresponds to page offset for the specified swap entry.
- * Note that the type of this function is sector_t, but it returns page offset
- * into the bdev, not sector offset.
- */
-static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
-{
- struct swap_info_struct *sis;
- struct swap_extent *se;
- pgoff_t offset;
-
- sis = swp_swap_info(entry);
- *bdev = sis->bdev;
-
- offset = swp_offset(entry);
- se = offset_to_swap_extent(sis, offset);
- return se->start_block + (offset - se->start_page);
-}
-
-/*
- * Returns the page offset into bdev for the specified page's swap entry.
- */
-sector_t map_swap_page(struct page *page, struct block_device **bdev)
-{
- swp_entry_t entry;
- entry.val = page_private(page);
- return map_swap_entry(entry, bdev);
-}
-
/*
* Free all of a swapdev's extent information
*/
unsigned long offset;
unsigned char count;
unsigned char has_cache;
- int err = -EINVAL;
+ int err;
p = get_swap_device(entry);
if (!p)
- goto out;
+ return -EINVAL;
offset = swp_offset(entry);
ci = lock_cluster_or_swap_info(p, offset);
unlock_out:
unlock_cluster_or_swap_info(p, ci);
-out:
if (p)
put_swap_device(p);
return err;
ci = lock_cluster(si, offset);
- count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
+ count = swap_count(si->swap_map[offset]);
if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
/*