Merge tag 'perf-tools-for-v5.12-2020-02-19' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / mm / readahead.c
index 3c9a8dd..c5b0457 100644 (file)
@@ -158,10 +158,8 @@ out:
 }
 
 /**
- * page_cache_readahead_unbounded - Start unchecked readahead.
- * @mapping: File address space.
- * @file: This instance of the open file; used for authentication.
- * @index: First page index to read.
+ * page_cache_ra_unbounded - Start unchecked readahead.
+ * @ractl: Readahead control.
  * @nr_to_read: The number of pages to read.
  * @lookahead_size: Where to start the next readahead.
  *
@@ -173,17 +171,13 @@ out:
  * Context: File is referenced by caller.  Mutexes may be held by caller.
  * May sleep, but will not reenter filesystem to reclaim memory.
  */
-void page_cache_readahead_unbounded(struct address_space *mapping,
-               struct file *file, pgoff_t index, unsigned long nr_to_read,
-               unsigned long lookahead_size)
+void page_cache_ra_unbounded(struct readahead_control *ractl,
+               unsigned long nr_to_read, unsigned long lookahead_size)
 {
+       struct address_space *mapping = ractl->mapping;
+       unsigned long index = readahead_index(ractl);
        LIST_HEAD(page_pool);
        gfp_t gfp_mask = readahead_gfp_mask(mapping);
-       struct readahead_control rac = {
-               .mapping = mapping,
-               .file = file,
-               ._index = index,
-       };
        unsigned long i;
 
        /*
@@ -204,7 +198,7 @@ void page_cache_readahead_unbounded(struct address_space *mapping,
        for (i = 0; i < nr_to_read; i++) {
                struct page *page = xa_load(&mapping->i_pages, index + i);
 
-               BUG_ON(index + i != rac._index + rac._nr_pages);
+               BUG_ON(index + i != ractl->_index + ractl->_nr_pages);
 
                if (page && !xa_is_value(page)) {
                        /*
@@ -215,7 +209,7 @@ void page_cache_readahead_unbounded(struct address_space *mapping,
                         * have a stable reference to this page, and it's
                         * not worth getting one just for that.
                         */
-                       read_pages(&rac, &page_pool, true);
+                       read_pages(ractl, &page_pool, true);
                        continue;
                }
 
@@ -228,12 +222,12 @@ void page_cache_readahead_unbounded(struct address_space *mapping,
                } else if (add_to_page_cache_lru(page, mapping, index + i,
                                        gfp_mask) < 0) {
                        put_page(page);
-                       read_pages(&rac, &page_pool, true);
+                       read_pages(ractl, &page_pool, true);
                        continue;
                }
                if (i == nr_to_read - lookahead_size)
                        SetPageReadahead(page);
-               rac._nr_pages++;
+               ractl->_nr_pages++;
        }
 
        /*
@@ -241,22 +235,22 @@ void page_cache_readahead_unbounded(struct address_space *mapping,
         * uptodate then the caller will launch readpage again, and
         * will then handle the error.
         */
-       read_pages(&rac, &page_pool, false);
+       read_pages(ractl, &page_pool, false);
        memalloc_nofs_restore(nofs);
 }
-EXPORT_SYMBOL_GPL(page_cache_readahead_unbounded);
+EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
 
 /*
- * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates
+ * do_page_cache_ra() actually reads a chunk of disk.  It allocates
  * the pages first, then submits them for I/O. This avoids the very bad
  * behaviour which would occur if page allocations are causing VM writeback.
  * We really don't want to intermingle reads and writes like that.
  */
-void __do_page_cache_readahead(struct address_space *mapping,
-               struct file *file, pgoff_t index, unsigned long nr_to_read,
-               unsigned long lookahead_size)
+void do_page_cache_ra(struct readahead_control *ractl,
+               unsigned long nr_to_read, unsigned long lookahead_size)
 {
-       struct inode *inode = mapping->host;
+       struct inode *inode = ractl->mapping->host;
+       unsigned long index = readahead_index(ractl);
        loff_t isize = i_size_read(inode);
        pgoff_t end_index;      /* The last page we want to read */
 
@@ -270,20 +264,19 @@ void __do_page_cache_readahead(struct address_space *mapping,
        if (nr_to_read > end_index - index)
                nr_to_read = end_index - index + 1;
 
-       page_cache_readahead_unbounded(mapping, file, index, nr_to_read,
-                       lookahead_size);
+       page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
 }
 
 /*
  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
  * memory at once.
  */
-void force_page_cache_readahead(struct address_space *mapping,
-               struct file *filp, pgoff_t index, unsigned long nr_to_read)
+void force_page_cache_ra(struct readahead_control *ractl,
+               struct file_ra_state *ra, unsigned long nr_to_read)
 {
+       struct address_space *mapping = ractl->mapping;
        struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
-       struct file_ra_state *ra = &filp->f_ra;
-       unsigned long max_pages;
+       unsigned long max_pages, index;
 
        if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
                        !mapping->a_ops->readahead))
@@ -293,14 +286,16 @@ void force_page_cache_readahead(struct address_space *mapping,
         * If the request exceeds the readahead window, allow the read to
         * be up to the optimal hardware IO size
         */
+       index = readahead_index(ractl);
        max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
-       nr_to_read = min(nr_to_read, max_pages);
+       nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
        while (nr_to_read) {
                unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
 
                if (this_chunk > nr_to_read)
                        this_chunk = nr_to_read;
-               __do_page_cache_readahead(mapping, filp, index, this_chunk, 0);
+               ractl->_index = index;
+               do_page_cache_ra(ractl, this_chunk, 0);
 
                index += this_chunk;
                nr_to_read -= this_chunk;
@@ -437,14 +432,14 @@ static int try_context_readahead(struct address_space *mapping,
 /*
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
-static void ondemand_readahead(struct address_space *mapping,
-               struct file_ra_state *ra, struct file *filp,
-               bool hit_readahead_marker, pgoff_t index,
+static void ondemand_readahead(struct readahead_control *ractl,
+               struct file_ra_state *ra, bool hit_readahead_marker,
                unsigned long req_size)
 {
-       struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
+       struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
        unsigned long max_pages = ra->ra_pages;
        unsigned long add_pages;
+       unsigned long index = readahead_index(ractl);
        pgoff_t prev_index;
 
        /*
@@ -482,7 +477,8 @@ static void ondemand_readahead(struct address_space *mapping,
                pgoff_t start;
 
                rcu_read_lock();
-               start = page_cache_next_miss(mapping, index + 1, max_pages);
+               start = page_cache_next_miss(ractl->mapping, index + 1,
+                               max_pages);
                rcu_read_unlock();
 
                if (!start || start - index > max_pages)
@@ -515,14 +511,15 @@ static void ondemand_readahead(struct address_space *mapping,
         * Query the page cache and look for the traces(cached history pages)
         * that a sequential stream would leave behind.
         */
-       if (try_context_readahead(mapping, ra, index, req_size, max_pages))
+       if (try_context_readahead(ractl->mapping, ra, index, req_size,
+                       max_pages))
                goto readit;
 
        /*
         * standalone, small random read
         * Read as is, and do not pollute the readahead state.
         */
-       __do_page_cache_readahead(mapping, filp, index, req_size, 0);
+       do_page_cache_ra(ractl, req_size, 0);
        return;
 
 initial_readahead:
@@ -548,63 +545,42 @@ readit:
                }
        }
 
-       ra_submit(ra, mapping, filp);
+       ractl->_index = ra->start;
+       do_page_cache_ra(ractl, ra->size, ra->async_size);
 }
 
-/**
- * page_cache_sync_readahead - generic file readahead
- * @mapping: address_space which holds the pagecache and I/O vectors
- * @ra: file_ra_state which holds the readahead state
- * @filp: passed on to ->readpage() and ->readpages()
- * @index: Index of first page to be read.
- * @req_count: Total number of pages being read by the caller.
- *
- * page_cache_sync_readahead() should be called when a cache miss happened:
- * it will submit the read.  The readahead logic may decide to piggyback more
- * pages onto the read request if access patterns suggest it will improve
- * performance.
- */
-void page_cache_sync_readahead(struct address_space *mapping,
-                              struct file_ra_state *ra, struct file *filp,
-                              pgoff_t index, unsigned long req_count)
+void page_cache_sync_ra(struct readahead_control *ractl,
+               struct file_ra_state *ra, unsigned long req_count)
 {
-       /* no read-ahead */
-       if (!ra->ra_pages)
-               return;
+       bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
 
-       if (blk_cgroup_congested())
-               return;
+       /*
+        * Even if read-ahead is disabled, issue this request as read-ahead
+        * as we'll need it to satisfy the requested range. The forced
+        * read-ahead will do the right thing and limit the read to just the
+        * requested range, which we'll set to 1 page for this case.
+        */
+       if (!ra->ra_pages || blk_cgroup_congested()) {
+               if (!ractl->file)
+                       return;
+               req_count = 1;
+               do_forced_ra = true;
+       }
 
        /* be dumb */
-       if (filp && (filp->f_mode & FMODE_RANDOM)) {
-               force_page_cache_readahead(mapping, filp, index, req_count);
+       if (do_forced_ra) {
+               force_page_cache_ra(ractl, ra, req_count);
                return;
        }
 
        /* do read-ahead */
-       ondemand_readahead(mapping, ra, filp, false, index, req_count);
+       ondemand_readahead(ractl, ra, false, req_count);
 }
-EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
+EXPORT_SYMBOL_GPL(page_cache_sync_ra);
 
-/**
- * page_cache_async_readahead - file readahead for marked pages
- * @mapping: address_space which holds the pagecache and I/O vectors
- * @ra: file_ra_state which holds the readahead state
- * @filp: passed on to ->readpage() and ->readpages()
- * @page: The page at @index which triggered the readahead call.
- * @index: Index of first page to be read.
- * @req_count: Total number of pages being read by the caller.
- *
- * page_cache_async_readahead() should be called when a page is used which
- * is marked as PageReadahead; this is a marker to suggest that the application
- * has used up enough of the readahead window that we should start pulling in
- * more pages.
- */
-void
-page_cache_async_readahead(struct address_space *mapping,
-                          struct file_ra_state *ra, struct file *filp,
-                          struct page *page, pgoff_t index,
-                          unsigned long req_count)
+void page_cache_async_ra(struct readahead_control *ractl,
+               struct file_ra_state *ra, struct page *page,
+               unsigned long req_count)
 {
        /* no read-ahead */
        if (!ra->ra_pages)
@@ -621,16 +597,16 @@ page_cache_async_readahead(struct address_space *mapping,
        /*
         * Defer asynchronous read-ahead on IO congestion.
         */
-       if (inode_read_congested(mapping->host))
+       if (inode_read_congested(ractl->mapping->host))
                return;
 
        if (blk_cgroup_congested())
                return;
 
        /* do read-ahead */
-       ondemand_readahead(mapping, ra, filp, true, index, req_count);
+       ondemand_readahead(ractl, ra, true, req_count);
 }
-EXPORT_SYMBOL_GPL(page_cache_async_readahead);
+EXPORT_SYMBOL_GPL(page_cache_async_ra);
 
 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
 {