tools, bpftool: Remove two unused variables.
[linux-2.6-microblaze.git] / include / linux / pagemap.h
index a002452..c77b7c3 100644 (file)
@@ -761,17 +761,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 void delete_from_page_cache_batch(struct address_space *mapping,
                                  struct pagevec *pvec);
 
-#define VM_READAHEAD_PAGES     (SZ_128K / PAGE_SIZE)
-
-void page_cache_sync_readahead(struct address_space *, struct file_ra_state *,
-               struct file *, pgoff_t index, unsigned long req_count);
-void page_cache_async_readahead(struct address_space *, struct file_ra_state *,
-               struct file *, struct page *, pgoff_t index,
-               unsigned long req_count);
-void page_cache_readahead_unbounded(struct address_space *, struct file *,
-               pgoff_t index, unsigned long nr_to_read,
-               unsigned long lookahead_count);
-
 /*
  * Like add_to_page_cache_locked, but used to add newly allocated pages:
  * the page is new, so we can just run __SetPageLocked() against it.
@@ -812,6 +801,67 @@ struct readahead_control {
        unsigned int _batch_count;
 };
 
+#define DEFINE_READAHEAD(rac, f, m, i)                                 \
+       struct readahead_control rac = {                                \
+               .file = f,                                              \
+               .mapping = m,                                           \
+               ._index = i,                                            \
+       }
+
+#define VM_READAHEAD_PAGES     (SZ_128K / PAGE_SIZE)
+
+void page_cache_ra_unbounded(struct readahead_control *,
+               unsigned long nr_to_read, unsigned long lookahead_count);
+void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *,
+               unsigned long req_count);
+void page_cache_async_ra(struct readahead_control *, struct file_ra_state *,
+               struct page *, unsigned long req_count);
+
+/**
+ * page_cache_sync_readahead - generic file readahead
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_sync_readahead() should be called when a cache miss happened:
+ * it will submit the read.  The readahead logic may decide to piggyback more
+ * pages onto the read request if access patterns suggest it will improve
+ * performance.
+ */
+static inline
+void page_cache_sync_readahead(struct address_space *mapping,
+               struct file_ra_state *ra, struct file *file, pgoff_t index,
+               unsigned long req_count)
+{
+       DEFINE_READAHEAD(ractl, file, mapping, index);
+       page_cache_sync_ra(&ractl, ra, req_count);
+}
+
+/**
+ * page_cache_async_readahead - file readahead for marked pages
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @page: The page at @index which triggered the readahead call.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_async_readahead() should be called when a page is used which
+ * is marked as PageReadahead; this is a marker to suggest that the application
+ * has used up enough of the readahead window that we should start pulling in
+ * more pages.
+ */
+static inline
+void page_cache_async_readahead(struct address_space *mapping,
+               struct file_ra_state *ra, struct file *file,
+               struct page *page, pgoff_t index, unsigned long req_count)
+{
+       DEFINE_READAHEAD(ractl, file, mapping, index);
+       page_cache_async_ra(&ractl, ra, page, req_count);
+}
+
 /**
  * readahead_page - Get the next page to read.
  * @rac: The current readahead request.