tools, bpftool: Remove two unused variables.
[linux-2.6-microblaze.git] / include / linux / pagemap.h
index c3afd32..c77b7c3 100644 (file)
@@ -29,6 +29,7 @@ enum mapping_flags {
        AS_EXITING      = 4,    /* final truncate in progress */
        /* writeback related tags are not used */
        AS_NO_WRITEBACK_TAGS = 5,
+       AS_THP_SUPPORT = 6,     /* THPs supported */
 };
 
 /**
@@ -120,6 +121,40 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
        m->gfp_mask = mask;
 }
 
+static inline bool mapping_thp_support(struct address_space *mapping)
+{
+       return test_bit(AS_THP_SUPPORT, &mapping->flags);
+}
+
+static inline int filemap_nr_thps(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+       return atomic_read(&mapping->nr_thps);
+#else
+       return 0;
+#endif
+}
+
+static inline void filemap_nr_thps_inc(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+       if (!mapping_thp_support(mapping))
+               atomic_inc(&mapping->nr_thps);
+#else
+       WARN_ON_ONCE(1);
+#endif
+}
+
+static inline void filemap_nr_thps_dec(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+       if (!mapping_thp_support(mapping))
+               atomic_dec(&mapping->nr_thps);
+#else
+       WARN_ON_ONCE(1);
+#endif
+}
+
 void release_pages(struct page **pages, int nr);
 
 /*
@@ -726,17 +761,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 void delete_from_page_cache_batch(struct address_space *mapping,
                                  struct pagevec *pvec);
 
-#define VM_READAHEAD_PAGES     (SZ_128K / PAGE_SIZE)
-
-void page_cache_sync_readahead(struct address_space *, struct file_ra_state *,
-               struct file *, pgoff_t index, unsigned long req_count);
-void page_cache_async_readahead(struct address_space *, struct file_ra_state *,
-               struct file *, struct page *, pgoff_t index,
-               unsigned long req_count);
-void page_cache_readahead_unbounded(struct address_space *, struct file *,
-               pgoff_t index, unsigned long nr_to_read,
-               unsigned long lookahead_count);
-
 /*
  * Like add_to_page_cache_locked, but used to add newly allocated pages:
  * the page is new, so we can just run __SetPageLocked() against it.
@@ -777,6 +801,67 @@ struct readahead_control {
        unsigned int _batch_count;
 };
 
+#define DEFINE_READAHEAD(rac, f, m, i)                                 \
+       struct readahead_control rac = {                                \
+               .file = f,                                              \
+               .mapping = m,                                           \
+               ._index = i,                                            \
+       }
+
+#define VM_READAHEAD_PAGES     (SZ_128K / PAGE_SIZE)
+
+void page_cache_ra_unbounded(struct readahead_control *,
+               unsigned long nr_to_read, unsigned long lookahead_count);
+void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *,
+               unsigned long req_count);
+void page_cache_async_ra(struct readahead_control *, struct file_ra_state *,
+               struct page *, unsigned long req_count);
+
+/**
+ * page_cache_sync_readahead - generic file readahead
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_sync_readahead() should be called when a cache miss happened:
+ * it will submit the read.  The readahead logic may decide to piggyback more
+ * pages onto the read request if access patterns suggest it will improve
+ * performance.
+ */
+static inline
+void page_cache_sync_readahead(struct address_space *mapping,
+               struct file_ra_state *ra, struct file *file, pgoff_t index,
+               unsigned long req_count)
+{
+       DEFINE_READAHEAD(ractl, file, mapping, index);
+       page_cache_sync_ra(&ractl, ra, req_count);
+}
+
+/**
+ * page_cache_async_readahead - file readahead for marked pages
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @page: The page at @index which triggered the readahead call.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_async_readahead() should be called when a page is used which
+ * is marked as PageReadahead; this is a marker to suggest that the application
+ * has used up enough of the readahead window that we should start pulling in
+ * more pages.
+ */
+static inline
+void page_cache_async_readahead(struct address_space *mapping,
+               struct file_ra_state *ra, struct file *file,
+               struct page *page, pgoff_t index, unsigned long req_count)
+{
+       DEFINE_READAHEAD(ractl, file, mapping, index);
+       page_cache_async_ra(&ractl, ra, page, req_count);
+}
+
 /**
  * readahead_page - Get the next page to read.
  * @rac: The current readahead request.