struct page *get_dump_page(unsigned long addr);
bool folio_mark_dirty(struct folio *folio);
+bool folio_mark_dirty_lock(struct folio *folio);
bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
}
EXPORT_SYMBOL(set_page_dirty);
+int set_page_dirty_lock(struct page *page)
+{
+ return folio_mark_dirty_lock(page_folio(page));
+}
+EXPORT_SYMBOL(set_page_dirty_lock);
+
bool clear_page_dirty_for_io(struct page *page)
{
return folio_clear_dirty_for_io(page_folio(page));
EXPORT_SYMBOL(folio_mark_dirty);
/*
- * set_page_dirty() is racy if the caller has no reference against
- * page->mapping->host, and if the page is unlocked. This is because another
- * CPU could truncate the page off the mapping and then free the mapping.
+ * folio_mark_dirty() is racy if the caller has no reference against
+ * folio->mapping->host, and if the folio is unlocked. This is because another
+ * CPU could truncate the folio off the mapping and then free the mapping.
*
- * Usually, the page _is_ locked, or the caller is a user-space process which
+ * Usually, the folio _is_ locked, or the caller is a user-space process which
* holds a reference on the inode by having an open file.
*
- * In other cases, the page should be locked before running set_page_dirty().
+ * In other cases, the folio should be locked before running folio_mark_dirty().
*/
-int set_page_dirty_lock(struct page *page)
+bool folio_mark_dirty_lock(struct folio *folio)
{
- int ret;
+ bool ret;
- lock_page(page);
- ret = set_page_dirty(page);
- unlock_page(page);
+ folio_lock(folio);
+ ret = folio_mark_dirty(folio);
+ folio_unlock(folio);
return ret;
}
-EXPORT_SYMBOL(set_page_dirty_lock);
+EXPORT_SYMBOL(folio_mark_dirty_lock);
/*
* This cancels just the dirty bit on the kernel page itself, it does NOT