Merge tag 'kconfig-v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[linux-2.6-microblaze.git] / mm / readahead.c
index c5b0457..d589f14 100644 (file)
@@ -198,8 +198,6 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
        for (i = 0; i < nr_to_read; i++) {
                struct page *page = xa_load(&mapping->i_pages, index + i);
 
-               BUG_ON(index + i != ractl->_index + ractl->_nr_pages);
-
                if (page && !xa_is_value(page)) {
                        /*
                         * Page already present?  Kick off the current batch
@@ -210,6 +208,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
                         * not worth getting one just for that.
                         */
                        read_pages(ractl, &page_pool, true);
+                       i = ractl->_index + ractl->_nr_pages - index - 1;
                        continue;
                }
 
@@ -223,6 +222,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
                                        gfp_mask) < 0) {
                        put_page(page);
                        read_pages(ractl, &page_pool, true);
+                       i = ractl->_index + ractl->_nr_pages - index - 1;
                        continue;
                }
                if (i == nr_to_read - lookahead_size)
@@ -272,9 +272,10 @@ void do_page_cache_ra(struct readahead_control *ractl,
  * memory at once.
  */
 void force_page_cache_ra(struct readahead_control *ractl,
-               struct file_ra_state *ra, unsigned long nr_to_read)
+               unsigned long nr_to_read)
 {
        struct address_space *mapping = ractl->mapping;
+       struct file_ra_state *ra = ractl->ra;
        struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
        unsigned long max_pages, index;
 
@@ -433,10 +434,10 @@ static int try_context_readahead(struct address_space *mapping,
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
 static void ondemand_readahead(struct readahead_control *ractl,
-               struct file_ra_state *ra, bool hit_readahead_marker,
-               unsigned long req_size)
+               bool hit_readahead_marker, unsigned long req_size)
 {
        struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
+       struct file_ra_state *ra = ractl->ra;
        unsigned long max_pages = ra->ra_pages;
        unsigned long add_pages;
        unsigned long index = readahead_index(ractl);
@@ -550,7 +551,7 @@ readit:
 }
 
 void page_cache_sync_ra(struct readahead_control *ractl,
-               struct file_ra_state *ra, unsigned long req_count)
+               unsigned long req_count)
 {
        bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
 
@@ -560,7 +561,7 @@ void page_cache_sync_ra(struct readahead_control *ractl,
         * read-ahead will do the right thing and limit the read to just the
         * requested range, which we'll set to 1 page for this case.
         */
-       if (!ra->ra_pages || blk_cgroup_congested()) {
+       if (!ractl->ra->ra_pages || blk_cgroup_congested()) {
                if (!ractl->file)
                        return;
                req_count = 1;
@@ -569,21 +570,20 @@ void page_cache_sync_ra(struct readahead_control *ractl,
 
        /* be dumb */
        if (do_forced_ra) {
-               force_page_cache_ra(ractl, ra, req_count);
+               force_page_cache_ra(ractl, req_count);
                return;
        }
 
        /* do read-ahead */
-       ondemand_readahead(ractl, ra, false, req_count);
+       ondemand_readahead(ractl, false, req_count);
 }
 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
 
 void page_cache_async_ra(struct readahead_control *ractl,
-               struct file_ra_state *ra, struct page *page,
-               unsigned long req_count)
+               struct page *page, unsigned long req_count)
 {
        /* no read-ahead */
-       if (!ra->ra_pages)
+       if (!ractl->ra->ra_pages)
                return;
 
        /*
@@ -604,7 +604,7 @@ void page_cache_async_ra(struct readahead_control *ractl,
                return;
 
        /* do read-ahead */
-       ondemand_readahead(ractl, ra, true, req_count);
+       ondemand_readahead(ractl, true, req_count);
 }
 EXPORT_SYMBOL_GPL(page_cache_async_ra);
 
@@ -638,3 +638,78 @@ SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
 {
        return ksys_readahead(fd, offset, count);
 }
+
+/**
+ * readahead_expand - Expand a readahead request
+ * @ractl: The request to be expanded
+ * @new_start: The revised start
+ * @new_len: The revised size of the request
+ *
+ * Attempt to expand a readahead request outwards from the current size to the
+ * specified size by inserting locked pages before and after the current window
+ * to increase the size to the new window.  This may involve the insertion of
+ * THPs, in which case the window may get expanded even beyond what was
+ * requested.
+ *
+ * The algorithm will stop if it encounters a conflicting page already in the
+ * pagecache and leave a smaller expansion than requested.
+ *
+ * The caller must check for this by examining the revised @ractl object for a
+ * different expansion than was requested.
+ */
+void readahead_expand(struct readahead_control *ractl,
+                     loff_t new_start, size_t new_len)
+{
+       struct address_space *mapping = ractl->mapping;
+       struct file_ra_state *ra = ractl->ra;
+       pgoff_t new_index, new_nr_pages;
+       gfp_t gfp_mask = readahead_gfp_mask(mapping);
+
+       new_index = new_start / PAGE_SIZE;
+
+       /* Expand the leading edge downwards */
+       while (ractl->_index > new_index) {
+               unsigned long index = ractl->_index - 1;
+               struct page *page = xa_load(&mapping->i_pages, index);
+
+               if (page && !xa_is_value(page))
+                       return; /* Page apparently present */
+
+               page = __page_cache_alloc(gfp_mask);
+               if (!page)
+                       return;
+               if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
+                       put_page(page);
+                       return;
+               }
+
+               ractl->_nr_pages++;
+               ractl->_index = page->index;
+       }
+
+       new_len += new_start - readahead_pos(ractl);
+       new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE);
+
+       /* Expand the trailing edge upwards */
+       while (ractl->_nr_pages < new_nr_pages) {
+               unsigned long index = ractl->_index + ractl->_nr_pages;
+               struct page *page = xa_load(&mapping->i_pages, index);
+
+               if (page && !xa_is_value(page))
+                       return; /* Page apparently present */
+
+               page = __page_cache_alloc(gfp_mask);
+               if (!page)
+                       return;
+               if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
+                       put_page(page);
+                       return;
+               }
+               ractl->_nr_pages++;
+               if (ra) {
+                       ra->size++;
+                       ra->async_size++;
+               }
+       }
+}
+EXPORT_SYMBOL(readahead_expand);