2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Eddie Dong <eddie.dong@intel.com>
25 * Jike Song <jike.song@intel.com>
28 * Zhi Wang <zhi.a.wang@intel.com>
29 * Min He <min.he@intel.com>
30 * Bing Niu <bing.niu@intel.com>
38 INTEL_GVT_PCI_BAR_GTTMMIO = 0,
39 INTEL_GVT_PCI_BAR_APERTURE,
40 INTEL_GVT_PCI_BAR_PIO,
41 INTEL_GVT_PCI_BAR_MAX,
44 /* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
45 * byte) byte by byte in standard pci configuration space. (not the full
48 static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
49 [PCI_COMMAND] = 0xff, 0x07,
50 [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
51 [PCI_CACHE_LINE_SIZE] = 0xff,
52 [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
53 [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
54 [PCI_INTERRUPT_LINE] = 0xff,
58 * vgpu_pci_cfg_mem_write - write virtual cfg space memory
60 * Use this function to write virtual cfg space memory.
61 * For standard cfg space, only RW bits can be changed,
62 * and we emulates the RW1C behavior of PCI_STATUS register.
64 static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
65 u8 *src, unsigned int bytes)
67 u8 *cfg_base = vgpu_cfg_space(vgpu);
71 for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
72 mask = pci_cfg_space_rw_bmp[off + i];
73 old = cfg_base[off + i];
77 * The PCI_STATUS high byte has RW1C bits, here
78 * emulates clear by writing 1 for these bits.
79 * Writing a 0b to RW1C bits has no effect.
81 if (off + i == PCI_STATUS + 1)
82 new = (~new & old) & mask;
84 cfg_base[off + i] = (old & ~mask) | new;
87 /* For other configuration space directly copy as it is. */
89 memcpy(cfg_base + off + i, src + i, bytes - i);
93 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
96 * Zero on success, negative error code if failed.
98 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
99 void *p_data, unsigned int bytes)
101 if (WARN_ON(bytes > 4))
104 if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
107 memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
111 static int map_aperture(struct intel_vgpu *vgpu, bool map)
113 phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
114 unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
119 if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
123 vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
125 if (!vgpu->gm.aperture_va)
128 memunmap(vgpu->gm.aperture_va);
129 vgpu->gm.aperture_va = NULL;
132 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
133 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
134 val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
136 val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
138 first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
140 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
141 aperture_pa >> PAGE_SHIFT,
142 aperture_sz >> PAGE_SHIFT,
145 memunmap(vgpu->gm.aperture_va);
146 vgpu->gm.aperture_va = NULL;
150 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
154 static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
160 if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
163 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
164 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
165 start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
167 start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
169 start &= ~GENMASK(3, 0);
170 end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
172 ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
176 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
180 static int emulate_pci_command_write(struct intel_vgpu *vgpu,
181 unsigned int offset, void *p_data, unsigned int bytes)
183 u8 old = vgpu_cfg_space(vgpu)[offset];
184 u8 new = *(u8 *)p_data;
185 u8 changed = old ^ new;
188 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
189 if (!(changed & PCI_COMMAND_MEMORY))
192 if (old & PCI_COMMAND_MEMORY) {
193 ret = trap_gttmmio(vgpu, false);
196 ret = map_aperture(vgpu, false);
200 ret = trap_gttmmio(vgpu, true);
203 ret = map_aperture(vgpu, true);
211 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
212 void *p_data, unsigned int bytes)
214 u32 new = *(u32 *)(p_data);
215 bool lo = IS_ALIGNED(offset, 8);
219 vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
220 struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
223 * Power-up software can determine how much address
224 * space the device requires by writing a value of
225 * all 1's to the register and then reading the value
226 * back. The device will return 0's in all don't-care
229 if (new == 0xffffffff) {
231 case PCI_BASE_ADDRESS_0:
232 case PCI_BASE_ADDRESS_1:
233 size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
234 intel_vgpu_write_pci_bar(vgpu, offset,
235 size >> (lo ? 0 : 32), lo);
237 * Untrap the BAR, since guest hasn't configured a
240 ret = trap_gttmmio(vgpu, false);
242 case PCI_BASE_ADDRESS_2:
243 case PCI_BASE_ADDRESS_3:
244 size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
245 intel_vgpu_write_pci_bar(vgpu, offset,
246 size >> (lo ? 0 : 32), lo);
247 ret = map_aperture(vgpu, false);
250 /* Unimplemented BARs */
251 intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
255 case PCI_BASE_ADDRESS_0:
256 case PCI_BASE_ADDRESS_1:
258 * Untrap the old BAR first, since guest has
259 * re-configured the BAR
261 trap_gttmmio(vgpu, false);
262 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
263 ret = trap_gttmmio(vgpu, mmio_enabled);
265 case PCI_BASE_ADDRESS_2:
266 case PCI_BASE_ADDRESS_3:
267 map_aperture(vgpu, false);
268 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
269 ret = map_aperture(vgpu, mmio_enabled);
272 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
279 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
282 * Zero on success, negative error code if failed.
284 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
285 void *p_data, unsigned int bytes)
289 if (WARN_ON(bytes > 4))
292 if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
295 /* First check if it's PCI_COMMAND */
296 if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
297 if (WARN_ON(bytes > 2))
299 return emulate_pci_command_write(vgpu, offset, p_data, bytes);
302 switch (rounddown(offset, 4)) {
303 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
304 if (WARN_ON(!IS_ALIGNED(offset, 4)))
306 return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
308 case INTEL_GVT_PCI_SWSCI:
309 if (WARN_ON(!IS_ALIGNED(offset, 4)))
311 ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
316 case INTEL_GVT_PCI_OPREGION:
317 if (WARN_ON(!IS_ALIGNED(offset, 4)))
319 ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
323 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
326 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
333 * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
336 * @primary: is the vGPU presented as primary
339 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
342 struct intel_gvt *gvt = vgpu->gvt;
343 const struct intel_gvt_device_info *info = &gvt->device_info;
346 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
347 info->cfg_space_size);
350 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
351 INTEL_GVT_PCI_CLASS_VGA_OTHER;
352 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
353 INTEL_GVT_PCI_CLASS_VGA_OTHER;
356 /* Show guest that there isn't any stolen memory.*/
357 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
358 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
360 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
361 gvt_aperture_pa_base(gvt), true);
363 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
365 | PCI_COMMAND_MASTER);
367 * Clear the bar upper 32bit and let guest to assign the new value
369 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
370 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
371 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
372 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
374 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
375 pci_resource_len(gvt->dev_priv->drm.pdev, 0);
376 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
377 pci_resource_len(gvt->dev_priv->drm.pdev, 2);
381 * intel_vgpu_reset_cfg_space - reset vGPU configuration space
386 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
388 u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
389 bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
390 INTEL_GVT_PCI_CLASS_VGA_OTHER;
392 if (cmd & PCI_COMMAND_MEMORY) {
393 trap_gttmmio(vgpu, false);
394 map_aperture(vgpu, false);
398 * Currently we only do such reset when vGPU is not
399 * owned by any VM, so we simply restore entire cfg
400 * space to default value.
402 intel_vgpu_init_cfg_space(vgpu, primary);