c6x: switch to ->regset_get()
[linux-2.6-microblaze.git] / fs / buffer.c
index b8d2837..64fe82e 100644 (file)
@@ -123,14 +123,6 @@ void __wait_on_buffer(struct buffer_head * bh)
 }
 EXPORT_SYMBOL(__wait_on_buffer);
 
-static void
-__clear_page_buffers(struct page *page)
-{
-       ClearPagePrivate(page);
-       set_page_private(page, 0);
-       put_page(page);
-}
-
 static void buffer_io_error(struct buffer_head *bh, char *msg)
 {
        if (!test_bit(BH_Quiet, &bh->b_state))
@@ -274,8 +266,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
         * decide that the page is now completely done.
         */
        first = page_buffers(page);
-       local_irq_save(flags);
-       bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+       spin_lock_irqsave(&first->b_uptodate_lock, flags);
        clear_buffer_async_read(bh);
        unlock_buffer(bh);
        tmp = bh;
@@ -288,8 +279,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
                }
                tmp = tmp->b_this_page;
        } while (tmp != bh);
-       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-       local_irq_restore(flags);
+       spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
 
        /*
         * If none of the buffers had errors and they are all
@@ -301,8 +291,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
        return;
 
 still_busy:
-       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-       local_irq_restore(flags);
+       spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
        return;
 }
 
@@ -371,8 +360,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
        }
 
        first = page_buffers(page);
-       local_irq_save(flags);
-       bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+       spin_lock_irqsave(&first->b_uptodate_lock, flags);
 
        clear_buffer_async_write(bh);
        unlock_buffer(bh);
@@ -384,14 +372,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
                }
                tmp = tmp->b_this_page;
        }
-       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-       local_irq_restore(flags);
+       spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
        end_page_writeback(page);
        return;
 
 still_busy:
-       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-       local_irq_restore(flags);
+       spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
        return;
 }
 EXPORT_SYMBOL(end_buffer_async_write);
@@ -912,7 +898,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
                bh = bh->b_this_page;
        } while (bh);
        tail->b_this_page = head;
-       attach_page_buffers(page, head);
+       attach_page_private(page, head);
 }
 
 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
@@ -973,7 +959,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        struct page *page;
        struct buffer_head *bh;
        sector_t end_block;
-       int ret = 0;            /* Will call free_more_memory() */
+       int ret = 0;
        gfp_t gfp_mask;
 
        gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
@@ -1160,12 +1146,19 @@ EXPORT_SYMBOL(mark_buffer_dirty);
 
 void mark_buffer_write_io_error(struct buffer_head *bh)
 {
+       struct super_block *sb;
+
        set_buffer_write_io_error(bh);
        /* FIXME: do we need to set this in both places? */
        if (bh->b_page && bh->b_page->mapping)
                mapping_set_error(bh->b_page->mapping, -EIO);
        if (bh->b_assoc_map)
                mapping_set_error(bh->b_assoc_map, -EIO);
+       rcu_read_lock();
+       sb = READ_ONCE(bh->b_bdev->bd_super);
+       if (sb)
+               errseq_set(&sb->s_wb_err, -EIO);
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL(mark_buffer_write_io_error);
 
@@ -1377,6 +1370,17 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
 }
 EXPORT_SYMBOL(__breadahead);
 
+void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
+                     gfp_t gfp)
+{
+       struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
+       if (likely(bh)) {
+               ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
+               brelse(bh);
+       }
+}
+EXPORT_SYMBOL(__breadahead_gfp);
+
 /**
  *  __bread_gfp() - reads a specified block and returns the bh
  *  @bdev: the block_device to read from
@@ -1575,7 +1579,7 @@ void create_empty_buffers(struct page *page,
                        bh = bh->b_this_page;
                } while (bh != head);
        }
-       attach_page_buffers(page, head);
+       attach_page_private(page, head);
        spin_unlock(&page->mapping->private_lock);
 }
 EXPORT_SYMBOL(create_empty_buffers);
@@ -2562,7 +2566,7 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
                        bh->b_this_page = head;
                bh = bh->b_this_page;
        } while (bh != head);
-       attach_page_buffers(page, head);
+       attach_page_private(page, head);
        spin_unlock(&page->mapping->private_lock);
 }
 
@@ -3019,49 +3023,6 @@ static void end_bio_bh_io_sync(struct bio *bio)
        bio_put(bio);
 }
 
-/*
- * This allows us to do IO even on the odd last sectors
- * of a device, even if the block size is some multiple
- * of the physical sector size.
- *
- * We'll just truncate the bio to the size of the device,
- * and clear the end of the buffer head manually.
- *
- * Truly out-of-range accesses will turn into actual IO
- * errors, this only handles the "we need to be able to
- * do IO at the final sector" case.
- */
-void guard_bio_eod(struct bio *bio)
-{
-       sector_t maxsector;
-       struct hd_struct *part;
-
-       rcu_read_lock();
-       part = __disk_get_part(bio->bi_disk, bio->bi_partno);
-       if (part)
-               maxsector = part_nr_sects_read(part);
-       else
-               maxsector = get_capacity(bio->bi_disk);
-       rcu_read_unlock();
-
-       if (!maxsector)
-               return;
-
-       /*
-        * If the *whole* IO is past the end of the device,
-        * let it through, and the IO layer will turn it into
-        * an EIO.
-        */
-       if (unlikely(bio->bi_iter.bi_sector >= maxsector))
-               return;
-
-       maxsector -= bio->bi_iter.bi_sector;
-       if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
-               return;
-
-       bio_truncate(bio, maxsector << 9);
-}
-
 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
                         enum rw_hint write_hint, struct writeback_control *wbc)
 {
@@ -3265,7 +3226,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
                bh = next;
        } while (bh != head);
        *buffers_to_free = head;
-       __clear_page_buffers(page);
+       detach_page_private(page);
        return 1;
 failed:
        return 0;
@@ -3385,6 +3346,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
        struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
        if (ret) {
                INIT_LIST_HEAD(&ret->b_assoc_buffers);
+               spin_lock_init(&ret->b_uptodate_lock);
                preempt_disable();
                __this_cpu_inc(bh_accounting.nr);
                recalc_bh_state();