2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
28 * Niu Bing <bing.niu@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
36 #include <uapi/linux/pci_regs.h>
41 #include "hypercall.h"
44 #include "interrupt.h"
49 #include "scheduler.h"
50 #include "sched_policy.h"
51 #include "mmio_context.h"
52 #include "cmd_parser.h"
53 #include "fb_decoder.h"
55 #include "page_track.h"
57 #define GVT_MAX_VGPU 8
59 struct intel_gvt_host {
63 const struct intel_gvt_mpt *mpt;
66 extern struct intel_gvt_host intel_gvt_host;
68 /* Describe per-platform limitations. */
69 struct intel_gvt_device_info {
70 u32 max_support_vgpus;
74 unsigned long msi_cap_offset;
77 u32 gtt_entry_size_shift;
78 int gmadr_bytes_in_cmd;
82 /* GM resources owned by a vGPU */
83 struct intel_vgpu_gm {
86 struct drm_mm_node low_gm_node;
87 struct drm_mm_node high_gm_node;
90 #define INTEL_GVT_MAX_NUM_FENCES 32
92 /* Fences owned by a vGPU */
93 struct intel_vgpu_fence {
94 struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
99 struct intel_vgpu_mmio {
103 #define INTEL_GVT_MAX_BAR_NUM 4
105 struct intel_vgpu_pci_bar {
110 struct intel_vgpu_cfg_space {
111 unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
112 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
116 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
118 struct intel_vgpu_irq {
119 bool irq_warn_once[INTEL_GVT_EVENT_MAX];
120 DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
121 INTEL_GVT_EVENT_MAX);
124 struct intel_vgpu_opregion {
127 u32 gfn[INTEL_GVT_OPREGION_PAGES];
130 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
132 struct intel_vgpu_display {
133 struct intel_vgpu_i2c_edid i2c_edid;
134 struct intel_vgpu_port ports[I915_MAX_PORTS];
135 struct intel_vgpu_sbi sbi;
138 struct vgpu_sched_ctl {
143 INTEL_VGPU_EXECLIST_SUBMISSION = 1,
144 INTEL_VGPU_GUC_SUBMISSION,
147 struct intel_vgpu_submission_ops {
149 int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
150 void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
151 void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
154 struct intel_vgpu_submission {
155 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
156 struct list_head workload_q_head[I915_NUM_ENGINES];
157 struct intel_context *shadow[I915_NUM_ENGINES];
158 struct kmem_cache *workloads;
159 atomic_t running_workload_num;
161 u64 i915_context_pml4;
162 u64 i915_context_pdps[GEN8_3LVL_PDPES];
164 DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
165 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
166 void *ring_scan_buffer[I915_NUM_ENGINES];
167 int ring_scan_buffer_size[I915_NUM_ENGINES];
168 const struct intel_vgpu_submission_ops *ops;
169 int virtual_submission_interface;
174 u64 ring_context_gpa;
175 } last_ctx[I915_NUM_ENGINES];
179 struct intel_gvt *gvt;
180 struct mutex vgpu_lock;
182 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
186 unsigned int resetting_eng;
188 /* Both sched_data and sched_ctl can be seen a part of the global gvt
189 * scheduler structure. So below 2 vgpu data are protected
190 * by sched_lock, not vgpu_lock.
193 struct vgpu_sched_ctl sched_ctl;
195 struct intel_vgpu_fence fence;
196 struct intel_vgpu_gm gm;
197 struct intel_vgpu_cfg_space cfg_space;
198 struct intel_vgpu_mmio mmio;
199 struct intel_vgpu_irq irq;
200 struct intel_vgpu_gtt gtt;
201 struct intel_vgpu_opregion opregion;
202 struct intel_vgpu_display display;
203 struct intel_vgpu_submission submission;
204 struct radix_tree_root page_track_tree;
205 u32 hws_pga[I915_NUM_ENGINES];
206 /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */
209 struct dentry *debugfs;
211 /* Hypervisor-specific device state. */
214 struct list_head dmabuf_obj_list_head;
215 struct mutex dmabuf_lock;
216 struct idr object_idr;
221 static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
226 /* validating GM healthy status*/
227 #define vgpu_is_vm_unhealthy(ret_val) \
228 (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
230 struct intel_gvt_gm {
231 unsigned long vgpu_allocated_low_gm_size;
232 unsigned long vgpu_allocated_high_gm_size;
235 struct intel_gvt_fence {
236 unsigned long vgpu_allocated_fence_num;
239 /* Special MMIO blocks. */
240 struct gvt_mmio_block {
248 #define INTEL_GVT_MMIO_HASH_BITS 11
250 struct intel_gvt_mmio {
252 /* Register contains RO bits */
253 #define F_RO (1 << 0)
254 /* Register contains graphics address */
255 #define F_GMADR (1 << 1)
256 /* Mode mask registers with high 16 bits as the mask bits */
257 #define F_MODE_MASK (1 << 2)
258 /* This reg can be accessed by GPU commands */
259 #define F_CMD_ACCESS (1 << 3)
260 /* This reg has been accessed by a VM */
261 #define F_ACCESSED (1 << 4)
262 /* This reg requires save & restore during host PM suspend/resume */
263 #define F_PM_SAVE (1 << 5)
264 /* This reg could be accessed by unaligned address */
265 #define F_UNALIGN (1 << 6)
266 /* This reg is in GVT's mmio save-restor list and in hardware
267 * logical context image
269 #define F_SR_IN_CTX (1 << 7)
270 /* Value of command write of this reg needs to be patched */
271 #define F_CMD_WRITE_PATCH (1 << 8)
273 struct gvt_mmio_block *mmio_block;
274 unsigned int num_mmio_block;
276 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
277 unsigned long num_tracked_mmio;
280 struct intel_gvt_firmware {
283 bool firmware_loaded;
286 #define NR_MAX_INTEL_VGPU_TYPES 20
287 struct intel_vgpu_type {
289 unsigned int avail_instance;
290 unsigned int low_gm_size;
291 unsigned int high_gm_size;
294 enum intel_vgpu_edid resolution;
298 /* GVT scope lock, protect GVT itself, and all resource currently
299 * not yet protected by special locks(vgpu and scheduler lock).
302 /* scheduler scope lock, protect gvt and vgpu schedule related data */
303 struct mutex sched_lock;
306 struct idr vgpu_idr; /* vGPU IDR pool */
308 struct intel_gvt_device_info device_info;
309 struct intel_gvt_gm gm;
310 struct intel_gvt_fence fence;
311 struct intel_gvt_mmio mmio;
312 struct intel_gvt_firmware firmware;
313 struct intel_gvt_irq irq;
314 struct intel_gvt_gtt gtt;
315 struct intel_gvt_workload_scheduler scheduler;
316 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
317 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
318 struct intel_vgpu_type *types;
319 unsigned int num_types;
320 struct intel_vgpu *idle_vgpu;
322 struct task_struct *service_thread;
323 wait_queue_head_t service_thread_wq;
325 /* service_request is always used in bit operation, we should always
326 * use it with atomic bit ops so that no need to use gvt big lock.
328 unsigned long service_request;
331 struct engine_mmio *mmio;
332 int ctx_mmio_count[I915_NUM_ENGINES];
333 u32 *tlb_mmio_offset_list;
334 u32 tlb_mmio_offset_list_cnt;
335 u32 *mocs_mmio_offset_list;
336 u32 mocs_mmio_offset_list_cnt;
338 bool is_reg_whitelist_updated;
340 struct dentry *debugfs_root;
343 static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
349 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
351 /* Scheduling trigger by timer */
352 INTEL_GVT_REQUEST_SCHED = 1,
354 /* Scheduling trigger by event */
355 INTEL_GVT_REQUEST_EVENT_SCHED = 2,
358 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
361 set_bit(service, (void *)&gvt->service_request);
362 wake_up(&gvt->service_thread_wq);
365 void intel_gvt_free_firmware(struct intel_gvt *gvt);
366 int intel_gvt_load_firmware(struct intel_gvt *gvt);
368 /* Aperture/GM space definitions for GVT device */
369 #define MB_TO_BYTES(mb) ((mb) << 20ULL)
370 #define BYTES_TO_MB(b) ((b) >> 20ULL)
372 #define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
373 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
376 #define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt)
378 /* Aperture/GM space definitions for GVT device */
379 #define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end
380 #define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
382 #define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
383 #define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
384 #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
386 #define gvt_aperture_gmadr_base(gvt) (0)
387 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
388 + gvt_aperture_sz(gvt) - 1)
390 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
391 + gvt_aperture_sz(gvt))
392 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
393 + gvt_hidden_sz(gvt) - 1)
395 #define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
397 /* Aperture/GM space definitions for vGPU */
398 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
399 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
400 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
401 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
403 #define vgpu_aperture_pa_base(vgpu) \
404 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
406 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
408 #define vgpu_aperture_pa_end(vgpu) \
409 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
411 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
412 #define vgpu_aperture_gmadr_end(vgpu) \
413 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
415 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
416 #define vgpu_hidden_gmadr_end(vgpu) \
417 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
419 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
420 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
422 /* ring context size i.e. the first 0x50 dwords*/
423 #define RING_CTX_SIZE 320
425 struct intel_vgpu_creation_params {
427 __u64 low_gm_sz; /* in MB */
428 __u64 high_gm_sz; /* in MB */
437 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
438 struct intel_vgpu_creation_params *param);
439 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
440 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
441 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
442 u32 fence, u64 value);
444 /* Macros for easily accessing vGPU virtual/shadow register.
445 Explicitly seperate use for typed MMIO reg or real offset.*/
446 #define vgpu_vreg_t(vgpu, reg) \
447 (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
448 #define vgpu_vreg(vgpu, offset) \
449 (*(u32 *)(vgpu->mmio.vreg + (offset)))
450 #define vgpu_vreg64_t(vgpu, reg) \
451 (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
452 #define vgpu_vreg64(vgpu, offset) \
453 (*(u64 *)(vgpu->mmio.vreg + (offset)))
455 #define for_each_active_vgpu(gvt, vgpu, id) \
456 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
457 for_each_if(vgpu->active)
459 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
460 u32 offset, u32 val, bool low)
464 /* BAR offset should be 32 bits algiend */
465 offset = rounddown(offset, 4);
466 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
470 * only update bit 31 - bit 4,
471 * leave the bit 3 - bit 0 unchanged.
473 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
479 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
480 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
482 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
483 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
484 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
485 struct intel_vgpu_type *type);
486 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
487 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
488 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
489 intel_engine_mask_t engine_mask);
490 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
491 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
492 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
494 /* validating GM functions */
495 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
496 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
497 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
499 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
500 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
501 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
503 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
504 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
505 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
507 #define gvt_gmadr_is_aperture(gvt, gmadr) \
508 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
509 (gmadr <= gvt_aperture_gmadr_end(gvt)))
511 #define gvt_gmadr_is_hidden(gvt, gmadr) \
512 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
513 (gmadr <= gvt_hidden_gmadr_end(gvt)))
515 #define gvt_gmadr_is_valid(gvt, gmadr) \
516 (gvt_gmadr_is_aperture(gvt, gmadr) || \
517 gvt_gmadr_is_hidden(gvt, gmadr))
519 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
520 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
521 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
522 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
523 unsigned long *h_index);
524 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
525 unsigned long *g_index);
527 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
529 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
531 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
532 void *p_data, unsigned int bytes);
534 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
535 void *p_data, unsigned int bytes);
537 void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
539 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
541 /* We are 64bit bar. */
542 return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
543 PCI_BASE_ADDRESS_MEM_MASK;
546 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
547 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
548 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
550 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
551 void populate_pvinfo_page(struct intel_vgpu *vgpu);
553 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
554 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
556 struct intel_gvt_ops {
557 int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
559 int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
561 int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
563 int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
565 struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
566 struct intel_vgpu_type *);
567 void (*vgpu_destroy)(struct intel_vgpu *vgpu);
568 void (*vgpu_release)(struct intel_vgpu *vgpu);
569 void (*vgpu_reset)(struct intel_vgpu *);
570 void (*vgpu_activate)(struct intel_vgpu *);
571 void (*vgpu_deactivate)(struct intel_vgpu *);
572 struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
574 bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
575 int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
576 int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
577 int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
579 void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
584 GVT_FAILSAFE_UNSUPPORTED_GUEST,
585 GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
586 GVT_FAILSAFE_GUEST_ERR,
589 static inline void mmio_hw_access_pre(struct intel_gt *gt)
591 intel_runtime_pm_get(gt->uncore->rpm);
594 static inline void mmio_hw_access_post(struct intel_gt *gt)
596 intel_runtime_pm_put_unchecked(gt->uncore->rpm);
600 * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
602 * @offset: register offset
605 static inline void intel_gvt_mmio_set_accessed(
606 struct intel_gvt *gvt, unsigned int offset)
608 gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
612 * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command
614 * @offset: register offset
617 * True if an MMIO is able to be accessed by GPU commands
619 static inline bool intel_gvt_mmio_is_cmd_accessible(
620 struct intel_gvt *gvt, unsigned int offset)
622 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
626 * intel_gvt_mmio_set_cmd_accessible -
627 * mark a MMIO could be accessible by command
629 * @offset: register offset
632 static inline void intel_gvt_mmio_set_cmd_accessible(
633 struct intel_gvt *gvt, unsigned int offset)
635 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS;
639 * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
641 * @offset: register offset
644 static inline bool intel_gvt_mmio_is_unalign(
645 struct intel_gvt *gvt, unsigned int offset)
647 return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
651 * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
653 * @offset: register offset
656 * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
659 static inline bool intel_gvt_mmio_has_mode_mask(
660 struct intel_gvt *gvt, unsigned int offset)
662 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
666 * intel_gvt_mmio_is_sr_in_ctx -
667 * check if an MMIO has F_SR_IN_CTX mask
669 * @offset: register offset
672 * True if an MMIO has an F_SR_IN_CTX mask, false if it isn't.
675 static inline bool intel_gvt_mmio_is_sr_in_ctx(
676 struct intel_gvt *gvt, unsigned int offset)
678 return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX;
682 * intel_gvt_mmio_set_sr_in_ctx -
683 * mask an MMIO in GVT's mmio save-restore list and also
684 * in hardware logical context image
686 * @offset: register offset
689 static inline void intel_gvt_mmio_set_sr_in_ctx(
690 struct intel_gvt *gvt, unsigned int offset)
692 gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX;
695 void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
697 * intel_gvt_mmio_set_cmd_write_patch -
698 * mark an MMIO if its cmd write needs to be
701 * @offset: register offset
704 static inline void intel_gvt_mmio_set_cmd_write_patch(
705 struct intel_gvt *gvt, unsigned int offset)
707 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH;
711 * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to
714 * @offset: register offset
717 * True if GPU commmand write to an MMIO should be patched
719 static inline bool intel_gvt_mmio_is_cmd_write_patch(
720 struct intel_gvt *gvt, unsigned int offset)
722 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
725 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
726 void intel_gvt_debugfs_init(struct intel_gvt *gvt);
727 void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
729 int intel_gvt_pm_resume(struct intel_gvt *gvt);