Merge branch 'kvm-amd-pmu-fixes' into HEAD
[linux-2.6-microblaze.git] / arch / x86 / kvm / i8259.c
1 /*
2  * 8259 interrupt controller emulation
3  *
4  * Copyright (c) 2003-2004 Fabrice Bellard
5  * Copyright (c) 2007 Intel Corporation
6  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  * Authors:
26  *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
27  *   Port from Qemu.
28  */
29 #include <linux/mm.h>
30 #include <linux/slab.h>
31 #include <linux/bitops.h>
32 #include "irq.h"
33
34 #include <linux/kvm_host.h>
35 #include "trace.h"
36
37 #define pr_pic_unimpl(fmt, ...) \
38         pr_err_ratelimited("kvm: pic: " fmt, ## __VA_ARGS__)
39
40 static void pic_irq_request(struct kvm *kvm, int level);
41
42 static void pic_lock(struct kvm_pic *s)
43         __acquires(&s->lock)
44 {
45         spin_lock(&s->lock);
46 }
47
48 static void pic_unlock(struct kvm_pic *s)
49         __releases(&s->lock)
50 {
51         bool wakeup = s->wakeup_needed;
52         struct kvm_vcpu *vcpu;
53         unsigned long i;
54
55         s->wakeup_needed = false;
56
57         spin_unlock(&s->lock);
58
59         if (wakeup) {
60                 kvm_for_each_vcpu(i, vcpu, s->kvm) {
61                         if (kvm_apic_accept_pic_intr(vcpu)) {
62                                 kvm_make_request(KVM_REQ_EVENT, vcpu);
63                                 kvm_vcpu_kick(vcpu);
64                                 return;
65                         }
66                 }
67         }
68 }
69
70 static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
71 {
72         s->isr &= ~(1 << irq);
73         if (s != &s->pics_state->pics[0])
74                 irq += 8;
75         /*
76          * We are dropping lock while calling ack notifiers since ack
77          * notifier callbacks for assigned devices call into PIC recursively.
78          * Other interrupt may be delivered to PIC while lock is dropped but
79          * it should be safe since PIC state is already updated at this stage.
80          */
81         pic_unlock(s->pics_state);
82         kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
83         pic_lock(s->pics_state);
84 }
85
86 /*
87  * set irq level. If an edge is detected, then the IRR is set to 1
88  */
89 static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
90 {
91         int mask, ret = 1;
92         mask = 1 << irq;
93         if (s->elcr & mask)     /* level triggered */
94                 if (level) {
95                         ret = !(s->irr & mask);
96                         s->irr |= mask;
97                         s->last_irr |= mask;
98                 } else {
99                         s->irr &= ~mask;
100                         s->last_irr &= ~mask;
101                 }
102         else    /* edge triggered */
103                 if (level) {
104                         if ((s->last_irr & mask) == 0) {
105                                 ret = !(s->irr & mask);
106                                 s->irr |= mask;
107                         }
108                         s->last_irr |= mask;
109                 } else
110                         s->last_irr &= ~mask;
111
112         return (s->imr & mask) ? -1 : ret;
113 }
114
115 /*
116  * return the highest priority found in mask (highest = smallest
117  * number). Return 8 if no irq
118  */
119 static inline int get_priority(struct kvm_kpic_state *s, int mask)
120 {
121         int priority;
122         if (mask == 0)
123                 return 8;
124         priority = 0;
125         while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
126                 priority++;
127         return priority;
128 }
129
130 /*
131  * return the pic wanted interrupt. return -1 if none
132  */
133 static int pic_get_irq(struct kvm_kpic_state *s)
134 {
135         int mask, cur_priority, priority;
136
137         mask = s->irr & ~s->imr;
138         priority = get_priority(s, mask);
139         if (priority == 8)
140                 return -1;
141         /*
142          * compute current priority. If special fully nested mode on the
143          * master, the IRQ coming from the slave is not taken into account
144          * for the priority computation.
145          */
146         mask = s->isr;
147         if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
148                 mask &= ~(1 << 2);
149         cur_priority = get_priority(s, mask);
150         if (priority < cur_priority)
151                 /*
152                  * higher priority found: an irq should be generated
153                  */
154                 return (priority + s->priority_add) & 7;
155         else
156                 return -1;
157 }
158
159 /*
160  * raise irq to CPU if necessary. must be called every time the active
161  * irq may change
162  */
163 static void pic_update_irq(struct kvm_pic *s)
164 {
165         int irq2, irq;
166
167         irq2 = pic_get_irq(&s->pics[1]);
168         if (irq2 >= 0) {
169                 /*
170                  * if irq request by slave pic, signal master PIC
171                  */
172                 pic_set_irq1(&s->pics[0], 2, 1);
173                 pic_set_irq1(&s->pics[0], 2, 0);
174         }
175         irq = pic_get_irq(&s->pics[0]);
176         pic_irq_request(s->kvm, irq >= 0);
177 }
178
179 void kvm_pic_update_irq(struct kvm_pic *s)
180 {
181         pic_lock(s);
182         pic_update_irq(s);
183         pic_unlock(s);
184 }
185
186 int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level)
187 {
188         int ret, irq_level;
189
190         BUG_ON(irq < 0 || irq >= PIC_NUM_PINS);
191
192         pic_lock(s);
193         irq_level = __kvm_irq_line_state(&s->irq_states[irq],
194                                          irq_source_id, level);
195         ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, irq_level);
196         pic_update_irq(s);
197         trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
198                               s->pics[irq >> 3].imr, ret == 0);
199         pic_unlock(s);
200
201         return ret;
202 }
203
204 void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id)
205 {
206         int i;
207
208         pic_lock(s);
209         for (i = 0; i < PIC_NUM_PINS; i++)
210                 __clear_bit(irq_source_id, &s->irq_states[i]);
211         pic_unlock(s);
212 }
213
214 /*
215  * acknowledge interrupt 'irq'
216  */
217 static inline void pic_intack(struct kvm_kpic_state *s, int irq)
218 {
219         s->isr |= 1 << irq;
220         /*
221          * We don't clear a level sensitive interrupt here
222          */
223         if (!(s->elcr & (1 << irq)))
224                 s->irr &= ~(1 << irq);
225
226         if (s->auto_eoi) {
227                 if (s->rotate_on_auto_eoi)
228                         s->priority_add = (irq + 1) & 7;
229                 pic_clear_isr(s, irq);
230         }
231
232 }
233
234 int kvm_pic_read_irq(struct kvm *kvm)
235 {
236         int irq, irq2, intno;
237         struct kvm_pic *s = kvm->arch.vpic;
238
239         s->output = 0;
240
241         pic_lock(s);
242         irq = pic_get_irq(&s->pics[0]);
243         if (irq >= 0) {
244                 pic_intack(&s->pics[0], irq);
245                 if (irq == 2) {
246                         irq2 = pic_get_irq(&s->pics[1]);
247                         if (irq2 >= 0)
248                                 pic_intack(&s->pics[1], irq2);
249                         else
250                                 /*
251                                  * spurious IRQ on slave controller
252                                  */
253                                 irq2 = 7;
254                         intno = s->pics[1].irq_base + irq2;
255                         irq = irq2 + 8;
256                 } else
257                         intno = s->pics[0].irq_base + irq;
258         } else {
259                 /*
260                  * spurious IRQ on host controller
261                  */
262                 irq = 7;
263                 intno = s->pics[0].irq_base + irq;
264         }
265         pic_update_irq(s);
266         pic_unlock(s);
267
268         return intno;
269 }
270
271 static void kvm_pic_reset(struct kvm_kpic_state *s)
272 {
273         int irq;
274         unsigned long i;
275         struct kvm_vcpu *vcpu;
276         u8 edge_irr = s->irr & ~s->elcr;
277         bool found = false;
278
279         s->last_irr = 0;
280         s->irr &= s->elcr;
281         s->imr = 0;
282         s->priority_add = 0;
283         s->special_mask = 0;
284         s->read_reg_select = 0;
285         if (!s->init4) {
286                 s->special_fully_nested_mode = 0;
287                 s->auto_eoi = 0;
288         }
289         s->init_state = 1;
290
291         kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
292                 if (kvm_apic_accept_pic_intr(vcpu)) {
293                         found = true;
294                         break;
295                 }
296
297
298         if (!found)
299                 return;
300
301         for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
302                 if (edge_irr & (1 << irq))
303                         pic_clear_isr(s, irq);
304 }
305
306 static void pic_ioport_write(void *opaque, u32 addr, u32 val)
307 {
308         struct kvm_kpic_state *s = opaque;
309         int priority, cmd, irq;
310
311         addr &= 1;
312         if (addr == 0) {
313                 if (val & 0x10) {
314                         s->init4 = val & 1;
315                         if (val & 0x02)
316                                 pr_pic_unimpl("single mode not supported");
317                         if (val & 0x08)
318                                 pr_pic_unimpl(
319                                                 "level sensitive irq not supported");
320                         kvm_pic_reset(s);
321                 } else if (val & 0x08) {
322                         if (val & 0x04)
323                                 s->poll = 1;
324                         if (val & 0x02)
325                                 s->read_reg_select = val & 1;
326                         if (val & 0x40)
327                                 s->special_mask = (val >> 5) & 1;
328                 } else {
329                         cmd = val >> 5;
330                         switch (cmd) {
331                         case 0:
332                         case 4:
333                                 s->rotate_on_auto_eoi = cmd >> 2;
334                                 break;
335                         case 1: /* end of interrupt */
336                         case 5:
337                                 priority = get_priority(s, s->isr);
338                                 if (priority != 8) {
339                                         irq = (priority + s->priority_add) & 7;
340                                         if (cmd == 5)
341                                                 s->priority_add = (irq + 1) & 7;
342                                         pic_clear_isr(s, irq);
343                                         pic_update_irq(s->pics_state);
344                                 }
345                                 break;
346                         case 3:
347                                 irq = val & 7;
348                                 pic_clear_isr(s, irq);
349                                 pic_update_irq(s->pics_state);
350                                 break;
351                         case 6:
352                                 s->priority_add = (val + 1) & 7;
353                                 pic_update_irq(s->pics_state);
354                                 break;
355                         case 7:
356                                 irq = val & 7;
357                                 s->priority_add = (irq + 1) & 7;
358                                 pic_clear_isr(s, irq);
359                                 pic_update_irq(s->pics_state);
360                                 break;
361                         default:
362                                 break;  /* no operation */
363                         }
364                 }
365         } else
366                 switch (s->init_state) {
367                 case 0: { /* normal mode */
368                         u8 imr_diff = s->imr ^ val,
369                                 off = (s == &s->pics_state->pics[0]) ? 0 : 8;
370                         s->imr = val;
371                         for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
372                                 if (imr_diff & (1 << irq))
373                                         kvm_fire_mask_notifiers(
374                                                 s->pics_state->kvm,
375                                                 SELECT_PIC(irq + off),
376                                                 irq + off,
377                                                 !!(s->imr & (1 << irq)));
378                         pic_update_irq(s->pics_state);
379                         break;
380                 }
381                 case 1:
382                         s->irq_base = val & 0xf8;
383                         s->init_state = 2;
384                         break;
385                 case 2:
386                         if (s->init4)
387                                 s->init_state = 3;
388                         else
389                                 s->init_state = 0;
390                         break;
391                 case 3:
392                         s->special_fully_nested_mode = (val >> 4) & 1;
393                         s->auto_eoi = (val >> 1) & 1;
394                         s->init_state = 0;
395                         break;
396                 }
397 }
398
399 static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
400 {
401         int ret;
402
403         ret = pic_get_irq(s);
404         if (ret >= 0) {
405                 if (addr1 >> 7) {
406                         s->pics_state->pics[0].isr &= ~(1 << 2);
407                         s->pics_state->pics[0].irr &= ~(1 << 2);
408                 }
409                 s->irr &= ~(1 << ret);
410                 pic_clear_isr(s, ret);
411                 if (addr1 >> 7 || ret != 2)
412                         pic_update_irq(s->pics_state);
413         } else {
414                 ret = 0x07;
415                 pic_update_irq(s->pics_state);
416         }
417
418         return ret;
419 }
420
421 static u32 pic_ioport_read(void *opaque, u32 addr)
422 {
423         struct kvm_kpic_state *s = opaque;
424         int ret;
425
426         if (s->poll) {
427                 ret = pic_poll_read(s, addr);
428                 s->poll = 0;
429         } else
430                 if ((addr & 1) == 0)
431                         if (s->read_reg_select)
432                                 ret = s->isr;
433                         else
434                                 ret = s->irr;
435                 else
436                         ret = s->imr;
437         return ret;
438 }
439
440 static void elcr_ioport_write(void *opaque, u32 val)
441 {
442         struct kvm_kpic_state *s = opaque;
443         s->elcr = val & s->elcr_mask;
444 }
445
446 static u32 elcr_ioport_read(void *opaque)
447 {
448         struct kvm_kpic_state *s = opaque;
449         return s->elcr;
450 }
451
452 static int picdev_write(struct kvm_pic *s,
453                          gpa_t addr, int len, const void *val)
454 {
455         unsigned char data = *(unsigned char *)val;
456
457         if (len != 1) {
458                 pr_pic_unimpl("non byte write\n");
459                 return 0;
460         }
461         switch (addr) {
462         case 0x20:
463         case 0x21:
464                 pic_lock(s);
465                 pic_ioport_write(&s->pics[0], addr, data);
466                 pic_unlock(s);
467                 break;
468         case 0xa0:
469         case 0xa1:
470                 pic_lock(s);
471                 pic_ioport_write(&s->pics[1], addr, data);
472                 pic_unlock(s);
473                 break;
474         case 0x4d0:
475         case 0x4d1:
476                 pic_lock(s);
477                 elcr_ioport_write(&s->pics[addr & 1], data);
478                 pic_unlock(s);
479                 break;
480         default:
481                 return -EOPNOTSUPP;
482         }
483         return 0;
484 }
485
486 static int picdev_read(struct kvm_pic *s,
487                        gpa_t addr, int len, void *val)
488 {
489         unsigned char *data = (unsigned char *)val;
490
491         if (len != 1) {
492                 memset(val, 0, len);
493                 pr_pic_unimpl("non byte read\n");
494                 return 0;
495         }
496         switch (addr) {
497         case 0x20:
498         case 0x21:
499         case 0xa0:
500         case 0xa1:
501                 pic_lock(s);
502                 *data = pic_ioport_read(&s->pics[addr >> 7], addr);
503                 pic_unlock(s);
504                 break;
505         case 0x4d0:
506         case 0x4d1:
507                 pic_lock(s);
508                 *data = elcr_ioport_read(&s->pics[addr & 1]);
509                 pic_unlock(s);
510                 break;
511         default:
512                 return -EOPNOTSUPP;
513         }
514         return 0;
515 }
516
517 static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
518                                gpa_t addr, int len, const void *val)
519 {
520         return picdev_write(container_of(dev, struct kvm_pic, dev_master),
521                             addr, len, val);
522 }
523
524 static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
525                               gpa_t addr, int len, void *val)
526 {
527         return picdev_read(container_of(dev, struct kvm_pic, dev_master),
528                             addr, len, val);
529 }
530
531 static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
532                               gpa_t addr, int len, const void *val)
533 {
534         return picdev_write(container_of(dev, struct kvm_pic, dev_slave),
535                             addr, len, val);
536 }
537
538 static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
539                              gpa_t addr, int len, void *val)
540 {
541         return picdev_read(container_of(dev, struct kvm_pic, dev_slave),
542                             addr, len, val);
543 }
544
545 static int picdev_elcr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
546                              gpa_t addr, int len, const void *val)
547 {
548         return picdev_write(container_of(dev, struct kvm_pic, dev_elcr),
549                             addr, len, val);
550 }
551
552 static int picdev_elcr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
553                             gpa_t addr, int len, void *val)
554 {
555         return picdev_read(container_of(dev, struct kvm_pic, dev_elcr),
556                             addr, len, val);
557 }
558
559 /*
560  * callback when PIC0 irq status changed
561  */
562 static void pic_irq_request(struct kvm *kvm, int level)
563 {
564         struct kvm_pic *s = kvm->arch.vpic;
565
566         if (!s->output)
567                 s->wakeup_needed = true;
568         s->output = level;
569 }
570
571 static const struct kvm_io_device_ops picdev_master_ops = {
572         .read     = picdev_master_read,
573         .write    = picdev_master_write,
574 };
575
576 static const struct kvm_io_device_ops picdev_slave_ops = {
577         .read     = picdev_slave_read,
578         .write    = picdev_slave_write,
579 };
580
581 static const struct kvm_io_device_ops picdev_elcr_ops = {
582         .read     = picdev_elcr_read,
583         .write    = picdev_elcr_write,
584 };
585
586 int kvm_pic_init(struct kvm *kvm)
587 {
588         struct kvm_pic *s;
589         int ret;
590
591         s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL_ACCOUNT);
592         if (!s)
593                 return -ENOMEM;
594         spin_lock_init(&s->lock);
595         s->kvm = kvm;
596         s->pics[0].elcr_mask = 0xf8;
597         s->pics[1].elcr_mask = 0xde;
598         s->pics[0].pics_state = s;
599         s->pics[1].pics_state = s;
600
601         /*
602          * Initialize PIO device
603          */
604         kvm_iodevice_init(&s->dev_master, &picdev_master_ops);
605         kvm_iodevice_init(&s->dev_slave, &picdev_slave_ops);
606         kvm_iodevice_init(&s->dev_elcr, &picdev_elcr_ops);
607         mutex_lock(&kvm->slots_lock);
608         ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2,
609                                       &s->dev_master);
610         if (ret < 0)
611                 goto fail_unlock;
612
613         ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0xa0, 2, &s->dev_slave);
614         if (ret < 0)
615                 goto fail_unreg_2;
616
617         ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_elcr);
618         if (ret < 0)
619                 goto fail_unreg_1;
620
621         mutex_unlock(&kvm->slots_lock);
622
623         kvm->arch.vpic = s;
624
625         return 0;
626
627 fail_unreg_1:
628         kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_slave);
629
630 fail_unreg_2:
631         kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_master);
632
633 fail_unlock:
634         mutex_unlock(&kvm->slots_lock);
635
636         kfree(s);
637
638         return ret;
639 }
640
641 void kvm_pic_destroy(struct kvm *kvm)
642 {
643         struct kvm_pic *vpic = kvm->arch.vpic;
644
645         if (!vpic)
646                 return;
647
648         mutex_lock(&kvm->slots_lock);
649         kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
650         kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
651         kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_elcr);
652         mutex_unlock(&kvm->slots_lock);
653
654         kvm->arch.vpic = NULL;
655         kfree(vpic);
656 }