ARM: multi_v5_defconfig: make DaVinci part of the ARM v5 multiplatform build
[linux-2.6-microblaze.git] / virt / kvm / arm / mmio.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5  */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_mmio.h>
9 #include <asm/kvm_emulate.h>
10 #include <trace/events/kvm.h>
11
12 #include "trace.h"
13
14 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
15 {
16         void *datap = NULL;
17         union {
18                 u8      byte;
19                 u16     hword;
20                 u32     word;
21                 u64     dword;
22         } tmp;
23
24         switch (len) {
25         case 1:
26                 tmp.byte        = data;
27                 datap           = &tmp.byte;
28                 break;
29         case 2:
30                 tmp.hword       = data;
31                 datap           = &tmp.hword;
32                 break;
33         case 4:
34                 tmp.word        = data;
35                 datap           = &tmp.word;
36                 break;
37         case 8:
38                 tmp.dword       = data;
39                 datap           = &tmp.dword;
40                 break;
41         }
42
43         memcpy(buf, datap, len);
44 }
45
46 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
47 {
48         unsigned long data = 0;
49         union {
50                 u16     hword;
51                 u32     word;
52                 u64     dword;
53         } tmp;
54
55         switch (len) {
56         case 1:
57                 data = *(u8 *)buf;
58                 break;
59         case 2:
60                 memcpy(&tmp.hword, buf, len);
61                 data = tmp.hword;
62                 break;
63         case 4:
64                 memcpy(&tmp.word, buf, len);
65                 data = tmp.word;
66                 break;
67         case 8:
68                 memcpy(&tmp.dword, buf, len);
69                 data = tmp.dword;
70                 break;
71         }
72
73         return data;
74 }
75
76 /**
77  * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
78  *                           or in-kernel IO emulation
79  *
80  * @vcpu: The VCPU pointer
81  * @run:  The VCPU run struct containing the mmio data
82  */
83 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
84 {
85         unsigned long data;
86         unsigned int len;
87         int mask;
88
89         if (!run->mmio.is_write) {
90                 len = run->mmio.len;
91                 if (len > sizeof(unsigned long))
92                         return -EINVAL;
93
94                 data = kvm_mmio_read_buf(run->mmio.data, len);
95
96                 if (vcpu->arch.mmio_decode.sign_extend &&
97                     len < sizeof(unsigned long)) {
98                         mask = 1U << ((len * 8) - 1);
99                         data = (data ^ mask) - mask;
100                 }
101
102                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
103                                &data);
104                 data = vcpu_data_host_to_guest(vcpu, data, len);
105                 vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
106         }
107
108         /*
109          * The MMIO instruction is emulated and should not be re-executed
110          * in the guest.
111          */
112         kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
113
114         return 0;
115 }
116
117 static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
118 {
119         unsigned long rt;
120         int access_size;
121         bool sign_extend;
122
123         if (kvm_vcpu_dabt_iss1tw(vcpu)) {
124                 /* page table accesses IO mem: tell guest to fix its TTBR */
125                 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
126                 return 1;
127         }
128
129         access_size = kvm_vcpu_dabt_get_as(vcpu);
130         if (unlikely(access_size < 0))
131                 return access_size;
132
133         *is_write = kvm_vcpu_dabt_iswrite(vcpu);
134         sign_extend = kvm_vcpu_dabt_issext(vcpu);
135         rt = kvm_vcpu_dabt_get_rd(vcpu);
136
137         *len = access_size;
138         vcpu->arch.mmio_decode.sign_extend = sign_extend;
139         vcpu->arch.mmio_decode.rt = rt;
140
141         return 0;
142 }
143
144 int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
145                  phys_addr_t fault_ipa)
146 {
147         unsigned long data;
148         unsigned long rt;
149         int ret;
150         bool is_write;
151         int len;
152         u8 data_buf[8];
153
154         /*
155          * Prepare MMIO operation. First decode the syndrome data we get
156          * from the CPU. Then try if some in-kernel emulation feels
157          * responsible, otherwise let user space do its magic.
158          */
159         if (kvm_vcpu_dabt_isvalid(vcpu)) {
160                 ret = decode_hsr(vcpu, &is_write, &len);
161                 if (ret)
162                         return ret;
163         } else {
164                 kvm_err("load/store instruction decoding not implemented\n");
165                 return -ENOSYS;
166         }
167
168         rt = vcpu->arch.mmio_decode.rt;
169
170         if (is_write) {
171                 data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
172                                                len);
173
174                 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
175                 kvm_mmio_write_buf(data_buf, len, data);
176
177                 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
178                                        data_buf);
179         } else {
180                 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
181                                fault_ipa, NULL);
182
183                 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
184                                       data_buf);
185         }
186
187         /* Now prepare kvm_run for the potential return to userland. */
188         run->mmio.is_write      = is_write;
189         run->mmio.phys_addr     = fault_ipa;
190         run->mmio.len           = len;
191
192         if (!ret) {
193                 /* We handled the access successfully in the kernel. */
194                 if (!is_write)
195                         memcpy(run->mmio.data, data_buf, len);
196                 vcpu->stat.mmio_exit_kernel++;
197                 kvm_handle_mmio_return(vcpu, run);
198                 return 1;
199         }
200
201         if (is_write)
202                 memcpy(run->mmio.data, data_buf, len);
203         vcpu->stat.mmio_exit_user++;
204         run->exit_reason        = KVM_EXIT_MMIO;
205         return 0;
206 }