Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[linux-2.6-microblaze.git] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@suse.de> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/compiler.h>
35 #include <linux/delay.h>
36
37 #include <asm/uaccess.h>
38
39 static DEFINE_SPINLOCK(elv_list_lock);
40 static LIST_HEAD(elv_list);
41
42 /*
43  * can we safely merge with this request?
44  */
45 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
46 {
47         if (!rq_mergeable(rq))
48                 return 0;
49
50         /*
51          * different data direction or already started, don't merge
52          */
53         if (bio_data_dir(bio) != rq_data_dir(rq))
54                 return 0;
55
56         /*
57          * same device and no special stuff set, merge is ok
58          */
59         if (rq->rq_disk == bio->bi_bdev->bd_disk &&
60             !rq->waiting && !rq->special)
61                 return 1;
62
63         return 0;
64 }
65 EXPORT_SYMBOL(elv_rq_merge_ok);
66
67 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
68 {
69         int ret = ELEVATOR_NO_MERGE;
70
71         /*
72          * we can merge and sequence is ok, check if it's possible
73          */
74         if (elv_rq_merge_ok(__rq, bio)) {
75                 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
76                         ret = ELEVATOR_BACK_MERGE;
77                 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
78                         ret = ELEVATOR_FRONT_MERGE;
79         }
80
81         return ret;
82 }
83
84 static struct elevator_type *elevator_find(const char *name)
85 {
86         struct elevator_type *e = NULL;
87         struct list_head *entry;
88
89         list_for_each(entry, &elv_list) {
90                 struct elevator_type *__e;
91
92                 __e = list_entry(entry, struct elevator_type, list);
93
94                 if (!strcmp(__e->elevator_name, name)) {
95                         e = __e;
96                         break;
97                 }
98         }
99
100         return e;
101 }
102
103 static void elevator_put(struct elevator_type *e)
104 {
105         module_put(e->elevator_owner);
106 }
107
108 static struct elevator_type *elevator_get(const char *name)
109 {
110         struct elevator_type *e;
111
112         spin_lock_irq(&elv_list_lock);
113
114         e = elevator_find(name);
115         if (e && !try_module_get(e->elevator_owner))
116                 e = NULL;
117
118         spin_unlock_irq(&elv_list_lock);
119
120         return e;
121 }
122
123 static int elevator_attach(request_queue_t *q, struct elevator_type *e,
124                            struct elevator_queue *eq)
125 {
126         int ret = 0;
127
128         memset(eq, 0, sizeof(*eq));
129         eq->ops = &e->ops;
130         eq->elevator_type = e;
131
132         q->elevator = eq;
133
134         if (eq->ops->elevator_init_fn)
135                 ret = eq->ops->elevator_init_fn(q, eq);
136
137         return ret;
138 }
139
140 static char chosen_elevator[16];
141
142 static void elevator_setup_default(void)
143 {
144         struct elevator_type *e;
145
146         /*
147          * If default has not been set, use the compiled-in selection.
148          */
149         if (!chosen_elevator[0])
150                 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
151
152         /*
153          * If the given scheduler is not available, fall back to no-op.
154          */
155         if ((e = elevator_find(chosen_elevator)))
156                 elevator_put(e);
157         else
158                 strcpy(chosen_elevator, "noop");
159 }
160
161 static int __init elevator_setup(char *str)
162 {
163         strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
164         return 0;
165 }
166
167 __setup("elevator=", elevator_setup);
168
169 int elevator_init(request_queue_t *q, char *name)
170 {
171         struct elevator_type *e = NULL;
172         struct elevator_queue *eq;
173         int ret = 0;
174
175         INIT_LIST_HEAD(&q->queue_head);
176         q->last_merge = NULL;
177         q->end_sector = 0;
178         q->boundary_rq = NULL;
179
180         elevator_setup_default();
181
182         if (!name)
183                 name = chosen_elevator;
184
185         e = elevator_get(name);
186         if (!e)
187                 return -EINVAL;
188
189         eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
190         if (!eq) {
191                 elevator_put(e);
192                 return -ENOMEM;
193         }
194
195         ret = elevator_attach(q, e, eq);
196         if (ret) {
197                 kfree(eq);
198                 elevator_put(e);
199         }
200
201         return ret;
202 }
203
204 void elevator_exit(elevator_t *e)
205 {
206         if (e->ops->elevator_exit_fn)
207                 e->ops->elevator_exit_fn(e);
208
209         elevator_put(e->elevator_type);
210         e->elevator_type = NULL;
211         kfree(e);
212 }
213
214 /*
215  * Insert rq into dispatch queue of q.  Queue lock must be held on
216  * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
217  * appended to the dispatch queue.  To be used by specific elevators.
218  */
219 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
220 {
221         sector_t boundary;
222         struct list_head *entry;
223
224         if (q->last_merge == rq)
225                 q->last_merge = NULL;
226         q->nr_sorted--;
227
228         boundary = q->end_sector;
229
230         list_for_each_prev(entry, &q->queue_head) {
231                 struct request *pos = list_entry_rq(entry);
232
233                 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
234                         break;
235                 if (rq->sector >= boundary) {
236                         if (pos->sector < boundary)
237                                 continue;
238                 } else {
239                         if (pos->sector >= boundary)
240                                 break;
241                 }
242                 if (rq->sector >= pos->sector)
243                         break;
244         }
245
246         list_add(&rq->queuelist, entry);
247 }
248
249 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
250 {
251         elevator_t *e = q->elevator;
252         int ret;
253
254         if (q->last_merge) {
255                 ret = elv_try_merge(q->last_merge, bio);
256                 if (ret != ELEVATOR_NO_MERGE) {
257                         *req = q->last_merge;
258                         return ret;
259                 }
260         }
261
262         if (e->ops->elevator_merge_fn)
263                 return e->ops->elevator_merge_fn(q, req, bio);
264
265         return ELEVATOR_NO_MERGE;
266 }
267
268 void elv_merged_request(request_queue_t *q, struct request *rq)
269 {
270         elevator_t *e = q->elevator;
271
272         if (e->ops->elevator_merged_fn)
273                 e->ops->elevator_merged_fn(q, rq);
274
275         q->last_merge = rq;
276 }
277
278 void elv_merge_requests(request_queue_t *q, struct request *rq,
279                              struct request *next)
280 {
281         elevator_t *e = q->elevator;
282
283         if (e->ops->elevator_merge_req_fn)
284                 e->ops->elevator_merge_req_fn(q, rq, next);
285         q->nr_sorted--;
286
287         q->last_merge = rq;
288 }
289
290 void elv_requeue_request(request_queue_t *q, struct request *rq)
291 {
292         elevator_t *e = q->elevator;
293
294         /*
295          * it already went through dequeue, we need to decrement the
296          * in_flight count again
297          */
298         if (blk_account_rq(rq)) {
299                 q->in_flight--;
300                 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
301                         e->ops->elevator_deactivate_req_fn(q, rq);
302         }
303
304         rq->flags &= ~REQ_STARTED;
305
306         __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
307 }
308
309 static void elv_drain_elevator(request_queue_t *q)
310 {
311         static int printed;
312         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
313                 ;
314         if (q->nr_sorted == 0)
315                 return;
316         if (printed++ < 10) {
317                 printk(KERN_ERR "%s: forced dispatching is broken "
318                        "(nr_sorted=%u), please report this\n",
319                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
320         }
321 }
322
323 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
324                        int plug)
325 {
326         struct list_head *pos;
327         unsigned ordseq;
328
329         if (q->ordcolor)
330                 rq->flags |= REQ_ORDERED_COLOR;
331
332         if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
333                 /*
334                  * toggle ordered color
335                  */
336                 q->ordcolor ^= 1;
337
338                 /*
339                  * barriers implicitly indicate back insertion
340                  */
341                 if (where == ELEVATOR_INSERT_SORT)
342                         where = ELEVATOR_INSERT_BACK;
343
344                 /*
345                  * this request is scheduling boundary, update end_sector
346                  */
347                 if (blk_fs_request(rq)) {
348                         q->end_sector = rq_end_sector(rq);
349                         q->boundary_rq = rq;
350                 }
351         } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
352                 where = ELEVATOR_INSERT_BACK;
353
354         if (plug)
355                 blk_plug_device(q);
356
357         rq->q = q;
358
359         switch (where) {
360         case ELEVATOR_INSERT_FRONT:
361                 rq->flags |= REQ_SOFTBARRIER;
362
363                 list_add(&rq->queuelist, &q->queue_head);
364                 break;
365
366         case ELEVATOR_INSERT_BACK:
367                 rq->flags |= REQ_SOFTBARRIER;
368                 elv_drain_elevator(q);
369                 list_add_tail(&rq->queuelist, &q->queue_head);
370                 /*
371                  * We kick the queue here for the following reasons.
372                  * - The elevator might have returned NULL previously
373                  *   to delay requests and returned them now.  As the
374                  *   queue wasn't empty before this request, ll_rw_blk
375                  *   won't run the queue on return, resulting in hang.
376                  * - Usually, back inserted requests won't be merged
377                  *   with anything.  There's no point in delaying queue
378                  *   processing.
379                  */
380                 blk_remove_plug(q);
381                 q->request_fn(q);
382                 break;
383
384         case ELEVATOR_INSERT_SORT:
385                 BUG_ON(!blk_fs_request(rq));
386                 rq->flags |= REQ_SORTED;
387                 q->nr_sorted++;
388                 if (q->last_merge == NULL && rq_mergeable(rq))
389                         q->last_merge = rq;
390                 /*
391                  * Some ioscheds (cfq) run q->request_fn directly, so
392                  * rq cannot be accessed after calling
393                  * elevator_add_req_fn.
394                  */
395                 q->elevator->ops->elevator_add_req_fn(q, rq);
396                 break;
397
398         case ELEVATOR_INSERT_REQUEUE:
399                 /*
400                  * If ordered flush isn't in progress, we do front
401                  * insertion; otherwise, requests should be requeued
402                  * in ordseq order.
403                  */
404                 rq->flags |= REQ_SOFTBARRIER;
405
406                 if (q->ordseq == 0) {
407                         list_add(&rq->queuelist, &q->queue_head);
408                         break;
409                 }
410
411                 ordseq = blk_ordered_req_seq(rq);
412
413                 list_for_each(pos, &q->queue_head) {
414                         struct request *pos_rq = list_entry_rq(pos);
415                         if (ordseq <= blk_ordered_req_seq(pos_rq))
416                                 break;
417                 }
418
419                 list_add_tail(&rq->queuelist, pos);
420                 break;
421
422         default:
423                 printk(KERN_ERR "%s: bad insertion point %d\n",
424                        __FUNCTION__, where);
425                 BUG();
426         }
427
428         if (blk_queue_plugged(q)) {
429                 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
430                         - q->in_flight;
431
432                 if (nrq >= q->unplug_thresh)
433                         __generic_unplug_device(q);
434         }
435 }
436
437 void elv_add_request(request_queue_t *q, struct request *rq, int where,
438                      int plug)
439 {
440         unsigned long flags;
441
442         spin_lock_irqsave(q->queue_lock, flags);
443         __elv_add_request(q, rq, where, plug);
444         spin_unlock_irqrestore(q->queue_lock, flags);
445 }
446
447 static inline struct request *__elv_next_request(request_queue_t *q)
448 {
449         struct request *rq;
450
451         while (1) {
452                 while (!list_empty(&q->queue_head)) {
453                         rq = list_entry_rq(q->queue_head.next);
454                         if (blk_do_ordered(q, &rq))
455                                 return rq;
456                 }
457
458                 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
459                         return NULL;
460         }
461 }
462
463 struct request *elv_next_request(request_queue_t *q)
464 {
465         struct request *rq;
466         int ret;
467
468         while ((rq = __elv_next_request(q)) != NULL) {
469                 if (!(rq->flags & REQ_STARTED)) {
470                         elevator_t *e = q->elevator;
471
472                         /*
473                          * This is the first time the device driver
474                          * sees this request (possibly after
475                          * requeueing).  Notify IO scheduler.
476                          */
477                         if (blk_sorted_rq(rq) &&
478                             e->ops->elevator_activate_req_fn)
479                                 e->ops->elevator_activate_req_fn(q, rq);
480
481                         /*
482                          * just mark as started even if we don't start
483                          * it, a request that has been delayed should
484                          * not be passed by new incoming requests
485                          */
486                         rq->flags |= REQ_STARTED;
487                 }
488
489                 if (!q->boundary_rq || q->boundary_rq == rq) {
490                         q->end_sector = rq_end_sector(rq);
491                         q->boundary_rq = NULL;
492                 }
493
494                 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
495                         break;
496
497                 ret = q->prep_rq_fn(q, rq);
498                 if (ret == BLKPREP_OK) {
499                         break;
500                 } else if (ret == BLKPREP_DEFER) {
501                         /*
502                          * the request may have been (partially) prepped.
503                          * we need to keep this request in the front to
504                          * avoid resource deadlock.  REQ_STARTED will
505                          * prevent other fs requests from passing this one.
506                          */
507                         rq = NULL;
508                         break;
509                 } else if (ret == BLKPREP_KILL) {
510                         int nr_bytes = rq->hard_nr_sectors << 9;
511
512                         if (!nr_bytes)
513                                 nr_bytes = rq->data_len;
514
515                         blkdev_dequeue_request(rq);
516                         rq->flags |= REQ_QUIET;
517                         end_that_request_chunk(rq, 0, nr_bytes);
518                         end_that_request_last(rq, 0);
519                 } else {
520                         printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
521                                                                 ret);
522                         break;
523                 }
524         }
525
526         return rq;
527 }
528
529 void elv_dequeue_request(request_queue_t *q, struct request *rq)
530 {
531         BUG_ON(list_empty(&rq->queuelist));
532
533         list_del_init(&rq->queuelist);
534
535         /*
536          * the time frame between a request being removed from the lists
537          * and to it is freed is accounted as io that is in progress at
538          * the driver side.
539          */
540         if (blk_account_rq(rq))
541                 q->in_flight++;
542 }
543
544 int elv_queue_empty(request_queue_t *q)
545 {
546         elevator_t *e = q->elevator;
547
548         if (!list_empty(&q->queue_head))
549                 return 0;
550
551         if (e->ops->elevator_queue_empty_fn)
552                 return e->ops->elevator_queue_empty_fn(q);
553
554         return 1;
555 }
556
557 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
558 {
559         elevator_t *e = q->elevator;
560
561         if (e->ops->elevator_latter_req_fn)
562                 return e->ops->elevator_latter_req_fn(q, rq);
563         return NULL;
564 }
565
566 struct request *elv_former_request(request_queue_t *q, struct request *rq)
567 {
568         elevator_t *e = q->elevator;
569
570         if (e->ops->elevator_former_req_fn)
571                 return e->ops->elevator_former_req_fn(q, rq);
572         return NULL;
573 }
574
575 int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
576                     gfp_t gfp_mask)
577 {
578         elevator_t *e = q->elevator;
579
580         if (e->ops->elevator_set_req_fn)
581                 return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
582
583         rq->elevator_private = NULL;
584         return 0;
585 }
586
587 void elv_put_request(request_queue_t *q, struct request *rq)
588 {
589         elevator_t *e = q->elevator;
590
591         if (e->ops->elevator_put_req_fn)
592                 e->ops->elevator_put_req_fn(q, rq);
593 }
594
595 int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
596 {
597         elevator_t *e = q->elevator;
598
599         if (e->ops->elevator_may_queue_fn)
600                 return e->ops->elevator_may_queue_fn(q, rw, bio);
601
602         return ELV_MQUEUE_MAY;
603 }
604
605 void elv_completed_request(request_queue_t *q, struct request *rq)
606 {
607         elevator_t *e = q->elevator;
608
609         /*
610          * request is released from the driver, io must be done
611          */
612         if (blk_account_rq(rq)) {
613                 struct request *first_rq = list_entry_rq(q->queue_head.next);
614
615                 q->in_flight--;
616
617                 /*
618                  * Check if the queue is waiting for fs requests to be
619                  * drained for flush sequence.
620                  */
621                 if (q->ordseq && q->in_flight == 0 &&
622                     blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
623                     blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
624                         blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
625                         q->request_fn(q);
626                 }
627
628                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
629                         e->ops->elevator_completed_req_fn(q, rq);
630         }
631 }
632
633 int elv_register_queue(struct request_queue *q)
634 {
635         elevator_t *e = q->elevator;
636
637         e->kobj.parent = kobject_get(&q->kobj);
638         if (!e->kobj.parent)
639                 return -EBUSY;
640
641         snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
642         e->kobj.ktype = e->elevator_type->elevator_ktype;
643
644         return kobject_register(&e->kobj);
645 }
646
647 void elv_unregister_queue(struct request_queue *q)
648 {
649         if (q) {
650                 elevator_t *e = q->elevator;
651                 kobject_unregister(&e->kobj);
652                 kobject_put(&q->kobj);
653         }
654 }
655
656 int elv_register(struct elevator_type *e)
657 {
658         spin_lock_irq(&elv_list_lock);
659         if (elevator_find(e->elevator_name))
660                 BUG();
661         list_add_tail(&e->list, &elv_list);
662         spin_unlock_irq(&elv_list_lock);
663
664         printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
665         if (!strcmp(e->elevator_name, chosen_elevator))
666                 printk(" (default)");
667         printk("\n");
668         return 0;
669 }
670 EXPORT_SYMBOL_GPL(elv_register);
671
672 void elv_unregister(struct elevator_type *e)
673 {
674         struct task_struct *g, *p;
675
676         /*
677          * Iterate every thread in the process to remove the io contexts.
678          */
679         read_lock(&tasklist_lock);
680         do_each_thread(g, p) {
681                 struct io_context *ioc = p->io_context;
682                 if (ioc && ioc->cic) {
683                         ioc->cic->exit(ioc->cic);
684                         ioc->cic->dtor(ioc->cic);
685                         ioc->cic = NULL;
686                 }
687                 if (ioc && ioc->aic) {
688                         ioc->aic->exit(ioc->aic);
689                         ioc->aic->dtor(ioc->aic);
690                         ioc->aic = NULL;
691                 }
692         } while_each_thread(g, p);
693         read_unlock(&tasklist_lock);
694
695         spin_lock_irq(&elv_list_lock);
696         list_del_init(&e->list);
697         spin_unlock_irq(&elv_list_lock);
698 }
699 EXPORT_SYMBOL_GPL(elv_unregister);
700
701 /*
702  * switch to new_e io scheduler. be careful not to introduce deadlocks -
703  * we don't free the old io scheduler, before we have allocated what we
704  * need for the new one. this way we have a chance of going back to the old
705  * one, if the new one fails init for some reason.
706  */
707 static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
708 {
709         elevator_t *old_elevator, *e;
710
711         /*
712          * Allocate new elevator
713          */
714         e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
715         if (!e)
716                 goto error;
717
718         /*
719          * Turn on BYPASS and drain all requests w/ elevator private data
720          */
721         spin_lock_irq(q->queue_lock);
722
723         set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
724
725         elv_drain_elevator(q);
726
727         while (q->rq.elvpriv) {
728                 blk_remove_plug(q);
729                 q->request_fn(q);
730                 spin_unlock_irq(q->queue_lock);
731                 msleep(10);
732                 spin_lock_irq(q->queue_lock);
733                 elv_drain_elevator(q);
734         }
735
736         spin_unlock_irq(q->queue_lock);
737
738         /*
739          * unregister old elevator data
740          */
741         elv_unregister_queue(q);
742         old_elevator = q->elevator;
743
744         /*
745          * attach and start new elevator
746          */
747         if (elevator_attach(q, new_e, e))
748                 goto fail;
749
750         if (elv_register_queue(q))
751                 goto fail_register;
752
753         /*
754          * finally exit old elevator and turn off BYPASS.
755          */
756         elevator_exit(old_elevator);
757         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
758         return;
759
760 fail_register:
761         /*
762          * switch failed, exit the new io scheduler and reattach the old
763          * one again (along with re-adding the sysfs dir)
764          */
765         elevator_exit(e);
766         e = NULL;
767 fail:
768         q->elevator = old_elevator;
769         elv_register_queue(q);
770         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
771         kfree(e);
772 error:
773         elevator_put(new_e);
774         printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
775 }
776
777 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
778 {
779         char elevator_name[ELV_NAME_MAX];
780         size_t len;
781         struct elevator_type *e;
782
783         elevator_name[sizeof(elevator_name) - 1] = '\0';
784         strncpy(elevator_name, name, sizeof(elevator_name) - 1);
785         len = strlen(elevator_name);
786
787         if (len && elevator_name[len - 1] == '\n')
788                 elevator_name[len - 1] = '\0';
789
790         e = elevator_get(elevator_name);
791         if (!e) {
792                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
793                 return -EINVAL;
794         }
795
796         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
797                 elevator_put(e);
798                 return count;
799         }
800
801         elevator_switch(q, e);
802         return count;
803 }
804
805 ssize_t elv_iosched_show(request_queue_t *q, char *name)
806 {
807         elevator_t *e = q->elevator;
808         struct elevator_type *elv = e->elevator_type;
809         struct list_head *entry;
810         int len = 0;
811
812         spin_lock_irq(q->queue_lock);
813         list_for_each(entry, &elv_list) {
814                 struct elevator_type *__e;
815
816                 __e = list_entry(entry, struct elevator_type, list);
817                 if (!strcmp(elv->elevator_name, __e->elevator_name))
818                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
819                 else
820                         len += sprintf(name+len, "%s ", __e->elevator_name);
821         }
822         spin_unlock_irq(q->queue_lock);
823
824         len += sprintf(len+name, "\n");
825         return len;
826 }
827
828 EXPORT_SYMBOL(elv_dispatch_sort);
829 EXPORT_SYMBOL(elv_add_request);
830 EXPORT_SYMBOL(__elv_add_request);
831 EXPORT_SYMBOL(elv_requeue_request);
832 EXPORT_SYMBOL(elv_next_request);
833 EXPORT_SYMBOL(elv_dequeue_request);
834 EXPORT_SYMBOL(elv_queue_empty);
835 EXPORT_SYMBOL(elv_completed_request);
836 EXPORT_SYMBOL(elevator_exit);
837 EXPORT_SYMBOL(elevator_init);