Merge branch 'misc.namei' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_pgtable.h
index f004c01..0277838 100644 (file)
@@ -25,6 +25,46 @@ static inline u64 kvm_get_parange(u64 mmfr0)
 
 typedef u64 kvm_pte_t;
 
+#define KVM_PTE_VALID                  BIT(0)
+
+#define KVM_PTE_ADDR_MASK              GENMASK(47, PAGE_SHIFT)
+#define KVM_PTE_ADDR_51_48             GENMASK(15, 12)
+
+static inline bool kvm_pte_valid(kvm_pte_t pte)
+{
+       return pte & KVM_PTE_VALID;
+}
+
+static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
+{
+       u64 pa = pte & KVM_PTE_ADDR_MASK;
+
+       if (PAGE_SHIFT == 16)
+               pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
+
+       return pa;
+}
+
+static inline u64 kvm_granule_shift(u32 level)
+{
+       /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
+       return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
+}
+
+static inline u64 kvm_granule_size(u32 level)
+{
+       return BIT(kvm_granule_shift(level));
+}
+
+static inline bool kvm_level_supports_block_mapping(u32 level)
+{
+       /*
+        * Reject invalid block mappings and don't bother with 4TB mappings for
+        * 52-bit PAs.
+        */
+       return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
+}
+
 /**
  * struct kvm_pgtable_mm_ops - Memory management callbacks.
  * @zalloc_page:               Allocate a single zeroed memory page.
@@ -75,31 +115,16 @@ enum kvm_pgtable_stage2_flags {
        KVM_PGTABLE_S2_IDMAP                    = BIT(1),
 };
 
-/**
- * struct kvm_pgtable - KVM page-table.
- * @ia_bits:           Maximum input address size, in bits.
- * @start_level:       Level at which the page-table walk starts.
- * @pgd:               Pointer to the first top-level entry of the page-table.
- * @mm_ops:            Memory management callbacks.
- * @mmu:               Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
- */
-struct kvm_pgtable {
-       u32                                     ia_bits;
-       u32                                     start_level;
-       kvm_pte_t                               *pgd;
-       struct kvm_pgtable_mm_ops               *mm_ops;
-
-       /* Stage-2 only */
-       struct kvm_s2_mmu                       *mmu;
-       enum kvm_pgtable_stage2_flags           flags;
-};
-
 /**
  * enum kvm_pgtable_prot - Page-table permissions and attributes.
  * @KVM_PGTABLE_PROT_X:                Execute permission.
  * @KVM_PGTABLE_PROT_W:                Write permission.
  * @KVM_PGTABLE_PROT_R:                Read permission.
  * @KVM_PGTABLE_PROT_DEVICE:   Device attributes.
+ * @KVM_PGTABLE_PROT_SW0:      Software bit 0.
+ * @KVM_PGTABLE_PROT_SW1:      Software bit 1.
+ * @KVM_PGTABLE_PROT_SW2:      Software bit 2.
+ * @KVM_PGTABLE_PROT_SW3:      Software bit 3.
  */
 enum kvm_pgtable_prot {
        KVM_PGTABLE_PROT_X                      = BIT(0),
@@ -107,21 +132,48 @@ enum kvm_pgtable_prot {
        KVM_PGTABLE_PROT_R                      = BIT(2),
 
        KVM_PGTABLE_PROT_DEVICE                 = BIT(3),
+
+       KVM_PGTABLE_PROT_SW0                    = BIT(55),
+       KVM_PGTABLE_PROT_SW1                    = BIT(56),
+       KVM_PGTABLE_PROT_SW2                    = BIT(57),
+       KVM_PGTABLE_PROT_SW3                    = BIT(58),
 };
 
-#define PAGE_HYP               (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
+#define KVM_PGTABLE_PROT_RW    (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
+#define KVM_PGTABLE_PROT_RWX   (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
+
+#define PKVM_HOST_MEM_PROT     KVM_PGTABLE_PROT_RWX
+#define PKVM_HOST_MMIO_PROT    KVM_PGTABLE_PROT_RW
+
+#define PAGE_HYP               KVM_PGTABLE_PROT_RW
 #define PAGE_HYP_EXEC          (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
 #define PAGE_HYP_RO            (KVM_PGTABLE_PROT_R)
 #define PAGE_HYP_DEVICE                (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
 
+typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
+                                          enum kvm_pgtable_prot prot);
+
 /**
- * struct kvm_mem_range - Range of Intermediate Physical Addresses
- * @start:     Start of the range.
- * @end:       End of the range.
+ * struct kvm_pgtable - KVM page-table.
+ * @ia_bits:           Maximum input address size, in bits.
+ * @start_level:       Level at which the page-table walk starts.
+ * @pgd:               Pointer to the first top-level entry of the page-table.
+ * @mm_ops:            Memory management callbacks.
+ * @mmu:               Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
+ * @flags:             Stage-2 page-table flags.
+ * @force_pte_cb:      Function that returns true if page level mappings must
+ *                     be used instead of block mappings.
  */
-struct kvm_mem_range {
-       u64 start;
-       u64 end;
+struct kvm_pgtable {
+       u32                                     ia_bits;
+       u32                                     start_level;
+       kvm_pte_t                               *pgd;
+       struct kvm_pgtable_mm_ops               *mm_ops;
+
+       /* Stage-2 only */
+       struct kvm_s2_mmu                       *mmu;
+       enum kvm_pgtable_stage2_flags           flags;
+       kvm_pgtable_force_pte_cb_t              force_pte_cb;
 };
 
 /**
@@ -216,21 +268,24 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
 
 /**
- * kvm_pgtable_stage2_init_flags() - Initialise a guest stage-2 page-table.
+ * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
  * @pgt:       Uninitialised page-table structure to initialise.
  * @arch:      Arch-specific KVM structure representing the guest virtual
  *             machine.
  * @mm_ops:    Memory management callbacks.
  * @flags:     Stage-2 configuration flags.
+ * @force_pte_cb: Function that returns true if page level mappings must
+ *             be used instead of block mappings.
  *
  * Return: 0 on success, negative error code on failure.
  */
-int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
-                                 struct kvm_pgtable_mm_ops *mm_ops,
-                                 enum kvm_pgtable_stage2_flags flags);
+int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
+                             struct kvm_pgtable_mm_ops *mm_ops,
+                             enum kvm_pgtable_stage2_flags flags,
+                             kvm_pgtable_force_pte_cb_t force_pte_cb);
 
 #define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \
-       kvm_pgtable_stage2_init_flags(pgt, arch, mm_ops, 0)
+       __kvm_pgtable_stage2_init(pgt, arch, mm_ops, 0, NULL)
 
 /**
  * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
@@ -374,7 +429,8 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
  * If there is a valid, leaf page-table entry used to translate @addr, then
  * relax the permissions in that entry according to the read, write and
  * execute permissions specified by @prot. No permissions are removed, and
- * TLB invalidation is performed after updating the entry.
+ * TLB invalidation is performed after updating the entry. Software bits cannot
+ * be set or cleared using kvm_pgtable_stage2_relax_perms().
  *
  * Return: 0 on success, negative error code on failure.
  */
@@ -433,22 +489,42 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
                     struct kvm_pgtable_walker *walker);
 
 /**
- * kvm_pgtable_stage2_find_range() - Find a range of Intermediate Physical
- *                                  Addresses with compatible permission
- *                                  attributes.
- * @pgt:       Page-table structure initialised by kvm_pgtable_stage2_init*().
- * @addr:      Address that must be covered by the range.
- * @prot:      Protection attributes that the range must be compatible with.
- * @range:     Range structure used to limit the search space at call time and
- *             that will hold the result.
+ * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
+ *                         with its level.
+ * @pgt:       Page-table structure initialised by kvm_pgtable_*_init()
+ *             or a similar initialiser.
+ * @addr:      Input address for the start of the walk.
+ * @ptep:      Pointer to storage for the retrieved PTE.
+ * @level:     Pointer to storage for the level of the retrieved PTE.
+ *
+ * The offset of @addr within a page is ignored.
  *
- * The offset of @addr within a page is ignored. An IPA is compatible with @prot
- * iff its corresponding stage-2 page-table entry has default ownership and, if
- * valid, is mapped with protection attributes identical to @prot.
+ * The walker will walk the page-table entries corresponding to the input
+ * address specified, retrieving the leaf corresponding to this address.
+ * Invalid entries are treated as leaf entries.
  *
  * Return: 0 on success, negative error code on failure.
  */
-int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
-                                 enum kvm_pgtable_prot prot,
-                                 struct kvm_mem_range *range);
+int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
+                        kvm_pte_t *ptep, u32 *level);
+
+/**
+ * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
+ *                                stage-2 Page-Table Entry.
+ * @pte:       Page-table entry
+ *
+ * Return: protection attributes of the page-table entry in the enum
+ *        kvm_pgtable_prot format.
+ */
+enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
+
+/**
+ * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
+ *                             Page-Table Entry.
+ * @pte:       Page-table entry
+ *
+ * Return: protection attributes of the page-table entry in the enum
+ *        kvm_pgtable_prot format.
+ */
+enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
 #endif /* __ARM64_KVM_PGTABLE_H__ */