mm: memmap_init: iterate over memblock regions rather that check each PFN
[linux-2.6-microblaze.git] / block / blk-merge.c
index 1534ed7..f0b0bae 100644 (file)
@@ -336,16 +336,6 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
                /* there isn't chance to merge the splitted bio */
                split->bi_opf |= REQ_NOMERGE;
 
-               /*
-                * Since we're recursing into make_request here, ensure
-                * that we mark this bio as already having entered the queue.
-                * If not, and the queue is going away, we can get stuck
-                * forever on waiting for the queue reference to drop. But
-                * that will never happen, as we're already holding a
-                * reference to it.
-                */
-               bio_set_flag(*bio, BIO_QUEUE_ENTERED);
-
                bio_chain(split, *bio);
                trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
                generic_make_request(*bio);
@@ -519,44 +509,20 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  * map a request to scatterlist, return number of sg entries setup. Caller
  * must make sure sg can hold rq->nr_phys_segments entries
  */
-int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-                 struct scatterlist *sglist)
+int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
+               struct scatterlist *sglist, struct scatterlist **last_sg)
 {
-       struct scatterlist *sg = NULL;
        int nsegs = 0;
 
        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
-               nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
+               nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
        else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
-               nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
+               nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
        else if (rq->bio)
-               nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
-
-       if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
-           (blk_rq_bytes(rq) & q->dma_pad_mask)) {
-               unsigned int pad_len =
-                       (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
-
-               sg->length += pad_len;
-               rq->extra_len += pad_len;
-       }
+               nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
 
-       if (q->dma_drain_size && q->dma_drain_needed(rq)) {
-               if (op_is_write(req_op(rq)))
-                       memset(q->dma_drain_buffer, 0, q->dma_drain_size);
-
-               sg_unmark_end(sg);
-               sg = sg_next(sg);
-               sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
-                           q->dma_drain_size,
-                           ((unsigned long)q->dma_drain_buffer) &
-                           (PAGE_SIZE - 1));
-               nsegs++;
-               rq->extra_len += q->dma_drain_size;
-       }
-
-       if (sg)
-               sg_mark_end(sg);
+       if (*last_sg)
+               sg_mark_end(*last_sg);
 
        /*
         * Something must have been wrong if the figured number of
@@ -566,7 +532,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
 
        return nsegs;
 }
-EXPORT_SYMBOL(blk_rq_map_sg);
+EXPORT_SYMBOL(__blk_rq_map_sg);
 
 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
                unsigned int nr_phys_segs)
@@ -596,6 +562,8 @@ int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
        if (blk_integrity_rq(req) &&
            integrity_req_gap_back_merge(req, bio))
                return 0;
+       if (!bio_crypt_ctx_back_mergeable(req, bio))
+               return 0;
        if (blk_rq_sectors(req) + bio_sectors(bio) >
            blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
                req_set_nomerge(req->q, req);
@@ -612,6 +580,8 @@ int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs
        if (blk_integrity_rq(req) &&
            integrity_req_gap_front_merge(req, bio))
                return 0;
+       if (!bio_crypt_ctx_front_mergeable(req, bio))
+               return 0;
        if (blk_rq_sectors(req) + bio_sectors(bio) >
            blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
                req_set_nomerge(req->q, req);
@@ -661,6 +631,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        if (blk_integrity_merge_rq(q, req, next) == false)
                return 0;
 
+       if (!bio_crypt_ctx_merge_rq(req, next))
+               return 0;
+
        /* Merge is OK... */
        req->nr_phys_segments = total_phys_segments;
        return 1;
@@ -696,20 +669,17 @@ void blk_rq_set_mixed_merge(struct request *rq)
        rq->rq_flags |= RQF_MIXED_MERGE;
 }
 
-static void blk_account_io_merge(struct request *req)
+static void blk_account_io_merge_request(struct request *req)
 {
        if (blk_do_io_stat(req)) {
-               struct hd_struct *part;
-
                part_stat_lock();
-               part = req->part;
-
-               part_dec_in_flight(req->q, part, rq_data_dir(req));
-
-               hd_struct_put(part);
+               part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
                part_stat_unlock();
+
+               hd_struct_put(req->part);
        }
 }
+
 /*
  * Two cases of handling DISCARD merge:
  * If max_discard_segments > 1, the driver takes every bio
@@ -821,7 +791,7 @@ static struct request *attempt_merge(struct request_queue *q,
        /*
         * 'next' is going away, so update stats accordingly
         */
-       blk_account_io_merge(next);
+       blk_account_io_merge_request(next);
 
        /*
         * ownership of bio passed from next to req, return 'next' for
@@ -885,6 +855,10 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
        if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
                return false;
 
+       /* Only merge if the crypt contexts are compatible */
+       if (!bio_crypt_rq_ctx_compatible(rq, bio))
+               return false;
+
        /* must be using the same buffer */
        if (req_op(rq) == REQ_OP_WRITE_SAME &&
            !blk_write_same_mergeable(rq->bio, bio))