#include <linux/export.h>
#include <linux/backing-dev.h>
#include <linux/task_io_accounting_ops.h>
-#include <linux/pagevec.h>
#include <linux/pagemap.h>
+#include <linux/psi.h>
#include <linux/syscalls.h>
#include <linux/file.h>
#include <linux/mm_inline.h>
if (!readahead_count(rac))
return;
+ if (unlikely(rac->_workingset))
+ psi_memstall_enter(&rac->_pflags);
blk_start_plug(&plug);
if (aops->readahead) {
}
blk_finish_plug(&plug);
+ if (unlikely(rac->_workingset))
+ psi_memstall_leave(&rac->_pflags);
+ rac->_workingset = false;
BUG_ON(readahead_count(rac));
}
*/
for (i = 0; i < nr_to_read; i++) {
struct folio *folio = xa_load(&mapping->i_pages, index + i);
+ int ret;
if (folio && !xa_is_value(folio)) {
/*
folio = filemap_alloc_folio(gfp_mask, 0);
if (!folio)
break;
- if (filemap_add_folio(mapping, folio, index + i,
- gfp_mask) < 0) {
+
+ ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
+ if (ret < 0) {
folio_put(folio);
+ if (ret == -ENOMEM)
+ break;
read_pages(ractl);
ractl->_index++;
i = ractl->_index + ractl->_nr_pages - index - 1;
}
if (i == nr_to_read - lookahead_size)
folio_set_readahead(folio);
+ ractl->_workingset |= folio_test_workingset(folio);
ractl->_nr_pages++;
}
struct address_space *mapping = ractl->mapping;
struct file_ra_state *ra = ractl->ra;
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
- unsigned long max_pages, index;
+ unsigned long max_pages;
if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
return;
* If the request exceeds the readahead window, allow the read to
* be up to the optimal hardware IO size
*/
- index = readahead_index(ractl);
max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
while (nr_to_read) {
if (this_chunk > nr_to_read)
this_chunk = nr_to_read;
- ractl->_index = index;
do_page_cache_ra(ractl, this_chunk, 0);
- index += this_chunk;
nr_to_read -= this_chunk;
}
}
* it approaches max_readhead.
*/
-/*
- * Count contiguously cached pages from @index-1 to @index-@max,
- * this count is a conservative estimation of
- * - length of the sequential read sequence, or
- * - thrashing threshold in memory tight systems
- */
-static pgoff_t count_history_pages(struct address_space *mapping,
- pgoff_t index, unsigned long max)
-{
- pgoff_t head;
-
- rcu_read_lock();
- head = page_cache_prev_miss(mapping, index - 1, max);
- rcu_read_unlock();
-
- return index - 1 - head;
-}
-
-/*
- * page cache context based readahead
- */
-static int try_context_readahead(struct address_space *mapping,
- struct file_ra_state *ra,
- pgoff_t index,
- unsigned long req_size,
- unsigned long max)
-{
- pgoff_t size;
-
- size = count_history_pages(mapping, index, max);
-
- /*
- * not enough history pages:
- * it could be a random read
- */
- if (size <= req_size)
- return 0;
-
- /*
- * starts from beginning of file:
- * it is a strong indication of long-run stream (or whole-file-read)
- */
- if (size >= index)
- size *= 2;
-
- ra->start = index;
- ra->size = min(size + req_size, max);
- ra->async_size = 1;
-
- return 1;
-}
-
-/*
- * There are some parts of the kernel which assume that PMD entries
- * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
- * limit the maximum allocation order to PMD size. I'm not aware of any
- * assumptions about maximum order if THP are disabled, but 8 seems like
- * a good order (that's 1MB if you're using 4kB pages)
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
-#else
-#define MAX_PAGECACHE_ORDER 8
-#endif
-
static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
pgoff_t mark, unsigned int order, gfp_t gfp)
{
if (!folio)
return -ENOMEM;
- mark = round_up(mark, 1UL << order);
+ mark = round_down(mark, 1UL << order);
if (index == mark)
folio_set_readahead(folio);
err = filemap_add_folio(ractl->mapping, folio, index, gfp);
- if (err)
+ if (err) {
folio_put(folio);
- else
- ractl->_nr_pages += 1UL << order;
- return err;
+ return err;
+ }
+
+ ractl->_nr_pages += 1UL << order;
+ ractl->_workingset |= folio_test_workingset(folio);
+ return 0;
}
void page_cache_ra_order(struct readahead_control *ractl,
struct file_ra_state *ra, unsigned int new_order)
{
struct address_space *mapping = ractl->mapping;
- pgoff_t index = readahead_index(ractl);
+ pgoff_t start = readahead_index(ractl);
+ pgoff_t index = start;
pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
pgoff_t mark = index + ra->size - ra->async_size;
+ unsigned int nofs;
int err = 0;
gfp_t gfp = readahead_gfp_mask(mapping);
limit = min(limit, index + ra->size - 1);
- if (new_order < MAX_PAGECACHE_ORDER) {
+ if (new_order < MAX_PAGECACHE_ORDER)
new_order += 2;
- if (new_order > MAX_PAGECACHE_ORDER)
- new_order = MAX_PAGECACHE_ORDER;
- while ((1 << new_order) > ra->size)
- new_order--;
- }
+ new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order);
+ new_order = min_t(unsigned int, new_order, ilog2(ra->size));
+
+ /* See comment in page_cache_ra_unbounded() */
+ nofs = memalloc_nofs_save();
filemap_invalidate_lock_shared(mapping);
while (index <= limit) {
unsigned int order = new_order;
/* Align with smaller pages if needed */
- if (index & ((1UL << order) - 1)) {
+ if (index & ((1UL << order) - 1))
order = __ffs(index);
- if (order == 1)
- order = 0;
- }
/* Don't allocate pages past EOF */
- while (index + (1UL << order) - 1 > limit) {
- if (--order == 1)
- order = 0;
- }
+ while (index + (1UL << order) - 1 > limit)
+ order--;
err = ra_alloc_folio(ractl, index, mark, order, gfp);
if (err)
break;
index += 1UL << order;
}
- if (index > limit) {
- ra->size += index - limit - 1;
- ra->async_size += index - limit - 1;
- }
-
read_pages(ractl);
filemap_invalidate_unlock_shared(mapping);
+ memalloc_nofs_restore(nofs);
/*
* If there were already pages in the page cache, then we may have
if (!err)
return;
fallback:
- do_page_cache_ra(ractl, ra->size, ra->async_size);
+ do_page_cache_ra(ractl, ra->size - (index - start), ra->async_size);
}
-/*
- * A minimal readahead algorithm for trivial sequential/random reads.
- */
-static void ondemand_readahead(struct readahead_control *ractl,
- struct folio *folio, unsigned long req_size)
+static unsigned long ractl_max_pages(struct readahead_control *ractl,
+ unsigned long req_size)
{
struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
- struct file_ra_state *ra = ractl->ra;
- unsigned long max_pages = ra->ra_pages;
- unsigned long add_pages;
- pgoff_t index = readahead_index(ractl);
- pgoff_t expected, prev_index;
- unsigned int order = folio ? folio_order(folio) : 0;
+ unsigned long max_pages = ractl->ra->ra_pages;
/*
* If the request exceeds the readahead window, allow the read to
*/
if (req_size > max_pages && bdi->io_pages > max_pages)
max_pages = min(req_size, bdi->io_pages);
-
- /*
- * start of file
- */
- if (!index)
- goto initial_readahead;
-
- /*
- * It's the expected callback index, assume sequential access.
- * Ramp up sizes, and push forward the readahead window.
- */
- expected = round_up(ra->start + ra->size - ra->async_size,
- 1UL << order);
- if (index == expected || index == (ra->start + ra->size)) {
- ra->start += ra->size;
- ra->size = get_next_ra_size(ra, max_pages);
- ra->async_size = ra->size;
- goto readit;
- }
-
- /*
- * Hit a marked folio without valid readahead state.
- * E.g. interleaved reads.
- * Query the pagecache for async_size, which normally equals to
- * readahead size. Ramp it up and use it as the new readahead size.
- */
- if (folio) {
- pgoff_t start;
-
- rcu_read_lock();
- start = page_cache_next_miss(ractl->mapping, index + 1,
- max_pages);
- rcu_read_unlock();
-
- if (!start || start - index > max_pages)
- return;
-
- ra->start = start;
- ra->size = start - index; /* old async_size */
- ra->size += req_size;
- ra->size = get_next_ra_size(ra, max_pages);
- ra->async_size = ra->size;
- goto readit;
- }
-
- /*
- * oversize read
- */
- if (req_size > max_pages)
- goto initial_readahead;
-
- /*
- * sequential cache miss
- * trivial case: (index - prev_index) == 1
- * unaligned reads: (index - prev_index) == 0
- */
- prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
- if (index - prev_index <= 1UL)
- goto initial_readahead;
-
- /*
- * Query the page cache and look for the traces(cached history pages)
- * that a sequential stream would leave behind.
- */
- if (try_context_readahead(ractl->mapping, ra, index, req_size,
- max_pages))
- goto readit;
-
- /*
- * standalone, small random read
- * Read as is, and do not pollute the readahead state.
- */
- do_page_cache_ra(ractl, req_size, 0);
- return;
-
-initial_readahead:
- ra->start = index;
- ra->size = get_init_ra_size(req_size, max_pages);
- ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
-
-readit:
- /*
- * Will this read hit the readahead marker made by itself?
- * If so, trigger the readahead marker hit now, and merge
- * the resulted next readahead window into the current one.
- * Take care of maximum IO pages as above.
- */
- if (index == ra->start && ra->size == ra->async_size) {
- add_pages = get_next_ra_size(ra, max_pages);
- if (ra->size + add_pages <= max_pages) {
- ra->async_size = add_pages;
- ra->size += add_pages;
- } else {
- ra->size = max_pages;
- ra->async_size = max_pages >> 1;
- }
- }
-
- ractl->_index = ra->start;
- page_cache_ra_order(ractl, ra, order);
+ return max_pages;
}
void page_cache_sync_ra(struct readahead_control *ractl,
unsigned long req_count)
{
+ pgoff_t index = readahead_index(ractl);
bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
+ struct file_ra_state *ra = ractl->ra;
+ unsigned long max_pages, contig_count;
+ pgoff_t prev_index, miss;
/*
* Even if readahead is disabled, issue this request as readahead
* readahead will do the right thing and limit the read to just the
* requested range, which we'll set to 1 page for this case.
*/
- if (!ractl->ra->ra_pages || blk_cgroup_congested()) {
+ if (!ra->ra_pages || blk_cgroup_congested()) {
if (!ractl->file)
return;
req_count = 1;
return;
}
- ondemand_readahead(ractl, NULL, req_count);
+ max_pages = ractl_max_pages(ractl, req_count);
+ prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
+ /*
+ * A start of file, oversized read, or sequential cache miss:
+ * trivial case: (index - prev_index) == 1
+ * unaligned reads: (index - prev_index) == 0
+ */
+ if (!index || req_count > max_pages || index - prev_index <= 1UL) {
+ ra->start = index;
+ ra->size = get_init_ra_size(req_count, max_pages);
+ ra->async_size = ra->size > req_count ? ra->size - req_count :
+ ra->size >> 1;
+ goto readit;
+ }
+
+ /*
+ * Query the page cache and look for the traces(cached history pages)
+ * that a sequential stream would leave behind.
+ */
+ rcu_read_lock();
+ miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages);
+ rcu_read_unlock();
+ contig_count = index - miss - 1;
+ /*
+ * Standalone, small random read. Read as is, and do not pollute the
+ * readahead state.
+ */
+ if (contig_count <= req_count) {
+ do_page_cache_ra(ractl, req_count, 0);
+ return;
+ }
+ /*
+ * File cached from the beginning:
+ * it is a strong indication of long-run stream (or whole-file-read)
+ */
+ if (miss == ULONG_MAX)
+ contig_count *= 2;
+ ra->start = index;
+ ra->size = min(contig_count + req_count, max_pages);
+ ra->async_size = 1;
+readit:
+ ractl->_index = ra->start;
+ page_cache_ra_order(ractl, ra, 0);
}
EXPORT_SYMBOL_GPL(page_cache_sync_ra);
void page_cache_async_ra(struct readahead_control *ractl,
struct folio *folio, unsigned long req_count)
{
+ unsigned long max_pages;
+ struct file_ra_state *ra = ractl->ra;
+ pgoff_t index = readahead_index(ractl);
+ pgoff_t expected, start;
+ unsigned int order = folio_order(folio);
+
/* no readahead */
- if (!ractl->ra->ra_pages)
+ if (!ra->ra_pages)
return;
/*
if (blk_cgroup_congested())
return;
- ondemand_readahead(ractl, folio, req_count);
+ max_pages = ractl_max_pages(ractl, req_count);
+ /*
+ * It's the expected callback index, assume sequential access.
+ * Ramp up sizes, and push forward the readahead window.
+ */
+ expected = round_down(ra->start + ra->size - ra->async_size,
+ 1UL << order);
+ if (index == expected) {
+ ra->start += ra->size;
+ ra->size = get_next_ra_size(ra, max_pages);
+ ra->async_size = ra->size;
+ goto readit;
+ }
+
+ /*
+ * Hit a marked folio without valid readahead state.
+ * E.g. interleaved reads.
+ * Query the pagecache for async_size, which normally equals to
+ * readahead size. Ramp it up and use it as the new readahead size.
+ */
+ rcu_read_lock();
+ start = page_cache_next_miss(ractl->mapping, index + 1, max_pages);
+ rcu_read_unlock();
+
+ if (!start || start - index > max_pages)
+ return;
+
+ ra->start = start;
+ ra->size = start - index; /* old async_size */
+ ra->size += req_count;
+ ra->size = get_next_ra_size(ra, max_pages);
+ ra->async_size = ra->size;
+readit:
+ ractl->_index = ra->start;
+ page_cache_ra_order(ractl, ra, order);
}
EXPORT_SYMBOL_GPL(page_cache_async_ra);
*/
ret = -EINVAL;
if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
- !S_ISREG(file_inode(f.file)->i_mode))
+ (!S_ISREG(file_inode(f.file)->i_mode) &&
+ !S_ISBLK(file_inode(f.file)->i_mode)))
goto out;
ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
/* Expand the leading edge downwards */
while (ractl->_index > new_index) {
unsigned long index = ractl->_index - 1;
- struct page *page = xa_load(&mapping->i_pages, index);
+ struct folio *folio = xa_load(&mapping->i_pages, index);
- if (page && !xa_is_value(page))
- return; /* Page apparently present */
+ if (folio && !xa_is_value(folio))
+ return; /* Folio apparently present */
- page = __page_cache_alloc(gfp_mask);
- if (!page)
+ folio = filemap_alloc_folio(gfp_mask, 0);
+ if (!folio)
return;
- if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
- put_page(page);
+ if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
+ folio_put(folio);
return;
}
-
+ if (unlikely(folio_test_workingset(folio)) &&
+ !ractl->_workingset) {
+ ractl->_workingset = true;
+ psi_memstall_enter(&ractl->_pflags);
+ }
ractl->_nr_pages++;
- ractl->_index = page->index;
+ ractl->_index = folio->index;
}
new_len += new_start - readahead_pos(ractl);
/* Expand the trailing edge upwards */
while (ractl->_nr_pages < new_nr_pages) {
unsigned long index = ractl->_index + ractl->_nr_pages;
- struct page *page = xa_load(&mapping->i_pages, index);
+ struct folio *folio = xa_load(&mapping->i_pages, index);
- if (page && !xa_is_value(page))
- return; /* Page apparently present */
+ if (folio && !xa_is_value(folio))
+ return; /* Folio apparently present */
- page = __page_cache_alloc(gfp_mask);
- if (!page)
+ folio = filemap_alloc_folio(gfp_mask, 0);
+ if (!folio)
return;
- if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
- put_page(page);
+ if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
+ folio_put(folio);
return;
}
+ if (unlikely(folio_test_workingset(folio)) &&
+ !ractl->_workingset) {
+ ractl->_workingset = true;
+ psi_memstall_enter(&ractl->_pflags);
+ }
ractl->_nr_pages++;
if (ra) {
ra->size++;