1 // SPDX-License-Identifier: GPL-2.0
3 * guest access functions
5 * Copyright IBM Corp. 2014
9 #include <linux/vmalloc.h>
10 #include <linux/mm_types.h>
11 #include <linux/err.h>
12 #include <linux/pgtable.h>
13 #include <linux/bitfield.h>
18 #include <asm/switch_to.h>
23 unsigned long origin : 52; /* Region- or Segment-Table Origin */
25 unsigned long g : 1; /* Subspace Group Control */
26 unsigned long p : 1; /* Private Space Control */
27 unsigned long s : 1; /* Storage-Alteration-Event Control */
28 unsigned long x : 1; /* Space-Switch-Event Control */
29 unsigned long r : 1; /* Real-Space Control */
31 unsigned long dt : 2; /* Designation-Type Control */
32 unsigned long tl : 2; /* Region- or Segment-Table Length */
37 ASCE_TYPE_SEGMENT = 0,
38 ASCE_TYPE_REGION3 = 1,
39 ASCE_TYPE_REGION2 = 2,
43 union region1_table_entry {
46 unsigned long rto: 52;/* Region-Table Origin */
48 unsigned long p : 1; /* DAT-Protection Bit */
50 unsigned long tf : 2; /* Region-Second-Table Offset */
51 unsigned long i : 1; /* Region-Invalid Bit */
53 unsigned long tt : 2; /* Table-Type Bits */
54 unsigned long tl : 2; /* Region-Second-Table Length */
58 union region2_table_entry {
61 unsigned long rto: 52;/* Region-Table Origin */
63 unsigned long p : 1; /* DAT-Protection Bit */
65 unsigned long tf : 2; /* Region-Third-Table Offset */
66 unsigned long i : 1; /* Region-Invalid Bit */
68 unsigned long tt : 2; /* Table-Type Bits */
69 unsigned long tl : 2; /* Region-Third-Table Length */
73 struct region3_table_entry_fc0 {
74 unsigned long sto: 52;/* Segment-Table Origin */
76 unsigned long fc : 1; /* Format-Control */
77 unsigned long p : 1; /* DAT-Protection Bit */
79 unsigned long tf : 2; /* Segment-Table Offset */
80 unsigned long i : 1; /* Region-Invalid Bit */
81 unsigned long cr : 1; /* Common-Region Bit */
82 unsigned long tt : 2; /* Table-Type Bits */
83 unsigned long tl : 2; /* Segment-Table Length */
86 struct region3_table_entry_fc1 {
87 unsigned long rfaa : 33; /* Region-Frame Absolute Address */
89 unsigned long av : 1; /* ACCF-Validity Control */
90 unsigned long acc: 4; /* Access-Control Bits */
91 unsigned long f : 1; /* Fetch-Protection Bit */
92 unsigned long fc : 1; /* Format-Control */
93 unsigned long p : 1; /* DAT-Protection Bit */
94 unsigned long iep: 1; /* Instruction-Execution-Protection */
96 unsigned long i : 1; /* Region-Invalid Bit */
97 unsigned long cr : 1; /* Common-Region Bit */
98 unsigned long tt : 2; /* Table-Type Bits */
102 union region3_table_entry {
104 struct region3_table_entry_fc0 fc0;
105 struct region3_table_entry_fc1 fc1;
108 unsigned long fc : 1; /* Format-Control */
110 unsigned long i : 1; /* Region-Invalid Bit */
111 unsigned long cr : 1; /* Common-Region Bit */
112 unsigned long tt : 2; /* Table-Type Bits */
117 struct segment_entry_fc0 {
118 unsigned long pto: 53;/* Page-Table Origin */
119 unsigned long fc : 1; /* Format-Control */
120 unsigned long p : 1; /* DAT-Protection Bit */
122 unsigned long i : 1; /* Segment-Invalid Bit */
123 unsigned long cs : 1; /* Common-Segment Bit */
124 unsigned long tt : 2; /* Table-Type Bits */
128 struct segment_entry_fc1 {
129 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
131 unsigned long av : 1; /* ACCF-Validity Control */
132 unsigned long acc: 4; /* Access-Control Bits */
133 unsigned long f : 1; /* Fetch-Protection Bit */
134 unsigned long fc : 1; /* Format-Control */
135 unsigned long p : 1; /* DAT-Protection Bit */
136 unsigned long iep: 1; /* Instruction-Execution-Protection */
138 unsigned long i : 1; /* Segment-Invalid Bit */
139 unsigned long cs : 1; /* Common-Segment Bit */
140 unsigned long tt : 2; /* Table-Type Bits */
144 union segment_table_entry {
146 struct segment_entry_fc0 fc0;
147 struct segment_entry_fc1 fc1;
150 unsigned long fc : 1; /* Format-Control */
152 unsigned long i : 1; /* Segment-Invalid Bit */
153 unsigned long cs : 1; /* Common-Segment Bit */
154 unsigned long tt : 2; /* Table-Type Bits */
160 TABLE_TYPE_SEGMENT = 0,
161 TABLE_TYPE_REGION3 = 1,
162 TABLE_TYPE_REGION2 = 2,
163 TABLE_TYPE_REGION1 = 3
166 union page_table_entry {
169 unsigned long pfra : 52; /* Page-Frame Real Address */
170 unsigned long z : 1; /* Zero Bit */
171 unsigned long i : 1; /* Page-Invalid Bit */
172 unsigned long p : 1; /* DAT-Protection Bit */
173 unsigned long iep: 1; /* Instruction-Execution-Protection */
179 * vaddress union in order to easily decode a virtual address into its
180 * region first index, region second index etc. parts.
185 unsigned long rfx : 11;
186 unsigned long rsx : 11;
187 unsigned long rtx : 11;
188 unsigned long sx : 11;
189 unsigned long px : 8;
190 unsigned long bx : 12;
193 unsigned long rfx01 : 2;
195 unsigned long rsx01 : 2;
197 unsigned long rtx01 : 2;
199 unsigned long sx01 : 2;
205 * raddress union which will contain the result (real or absolute address)
206 * after a page table walk. The rfaa, sfaa and pfra members are used to
207 * simply assign them the value of a region, segment or page table entry.
211 unsigned long rfaa : 33; /* Region-Frame Absolute Address */
212 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
213 unsigned long pfra : 52; /* Page-Frame Real Address */
236 unsigned long i : 1; /* ALEN-Invalid Bit */
238 unsigned long fo : 1; /* Fetch-Only Bit */
239 unsigned long p : 1; /* Private Bit */
240 unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
241 unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
244 unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
246 unsigned long astesn : 32; /* ASTE Sequence Number */
250 unsigned long i : 1; /* ASX-Invalid Bit */
251 unsigned long ato : 29; /* Authority-Table Origin */
253 unsigned long b : 1; /* Base-Space Bit */
254 unsigned long ax : 16; /* Authorization Index */
255 unsigned long atl : 12; /* Authority-Table Length */
257 unsigned long ca : 1; /* Controlled-ASN Bit */
258 unsigned long ra : 1; /* Reusable-ASN Bit */
259 unsigned long asce : 64; /* Address-Space-Control Element */
260 unsigned long ald : 32;
261 unsigned long astesn : 32;
262 /* .. more fields there */
265 int ipte_lock_held(struct kvm *kvm)
270 read_lock(&kvm->arch.sca_lock);
271 rc = kvm_s390_get_ipte_control(kvm)->kh != 0;
272 read_unlock(&kvm->arch.sca_lock);
275 return kvm->arch.ipte_lock_count != 0;
278 static void ipte_lock_simple(struct kvm *kvm)
280 union ipte_control old, new, *ic;
282 mutex_lock(&kvm->arch.ipte_mutex);
283 kvm->arch.ipte_lock_count++;
284 if (kvm->arch.ipte_lock_count > 1)
287 read_lock(&kvm->arch.sca_lock);
288 ic = kvm_s390_get_ipte_control(kvm);
290 old = READ_ONCE(*ic);
292 read_unlock(&kvm->arch.sca_lock);
298 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
299 read_unlock(&kvm->arch.sca_lock);
301 mutex_unlock(&kvm->arch.ipte_mutex);
304 static void ipte_unlock_simple(struct kvm *kvm)
306 union ipte_control old, new, *ic;
308 mutex_lock(&kvm->arch.ipte_mutex);
309 kvm->arch.ipte_lock_count--;
310 if (kvm->arch.ipte_lock_count)
312 read_lock(&kvm->arch.sca_lock);
313 ic = kvm_s390_get_ipte_control(kvm);
315 old = READ_ONCE(*ic);
318 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
319 read_unlock(&kvm->arch.sca_lock);
320 wake_up(&kvm->arch.ipte_wq);
322 mutex_unlock(&kvm->arch.ipte_mutex);
325 static void ipte_lock_siif(struct kvm *kvm)
327 union ipte_control old, new, *ic;
330 read_lock(&kvm->arch.sca_lock);
331 ic = kvm_s390_get_ipte_control(kvm);
333 old = READ_ONCE(*ic);
335 read_unlock(&kvm->arch.sca_lock);
342 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
343 read_unlock(&kvm->arch.sca_lock);
346 static void ipte_unlock_siif(struct kvm *kvm)
348 union ipte_control old, new, *ic;
350 read_lock(&kvm->arch.sca_lock);
351 ic = kvm_s390_get_ipte_control(kvm);
353 old = READ_ONCE(*ic);
358 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
359 read_unlock(&kvm->arch.sca_lock);
361 wake_up(&kvm->arch.ipte_wq);
364 void ipte_lock(struct kvm *kvm)
369 ipte_lock_simple(kvm);
372 void ipte_unlock(struct kvm *kvm)
375 ipte_unlock_siif(kvm);
377 ipte_unlock_simple(kvm);
380 static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
386 unsigned long ald_addr, authority_table_addr;
394 save_access_regs(vcpu->run->s.regs.acrs);
395 alet.val = vcpu->run->s.regs.acrs[ar];
397 if (ar == 0 || alet.val == 0) {
398 asce->val = vcpu->arch.sie_block->gcr[1];
400 } else if (alet.val == 1) {
401 asce->val = vcpu->arch.sie_block->gcr[7];
406 return PGM_ALET_SPECIFICATION;
409 ald_addr = vcpu->arch.sie_block->gcr[5];
411 ald_addr = vcpu->arch.sie_block->gcr[2];
412 ald_addr &= 0x7fffffc0;
414 rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
418 if (alet.alen / 8 > ald.all)
419 return PGM_ALEN_TRANSLATION;
421 if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
422 return PGM_ADDRESSING;
424 rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
430 return PGM_ALEN_TRANSLATION;
431 if (ale.alesn != alet.alesn)
432 return PGM_ALE_SEQUENCE;
434 rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
439 return PGM_ASTE_VALIDITY;
440 if (aste.astesn != ale.astesn)
441 return PGM_ASTE_SEQUENCE;
444 eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
445 if (ale.aleax != eax) {
446 if (eax / 16 > aste.atl)
447 return PGM_EXTENDED_AUTHORITY;
449 authority_table_addr = aste.ato * 4 + eax / 4;
451 rc = read_guest_real(vcpu, authority_table_addr,
457 if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
458 return PGM_EXTENDED_AUTHORITY;
462 if (ale.fo == 1 && mode == GACC_STORE)
463 return PGM_PROTECTION;
465 asce->val = aste.asce;
469 struct trans_exc_code_bits {
470 unsigned long addr : 52; /* Translation-exception Address */
471 unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
473 unsigned long b56 : 1;
475 unsigned long b60 : 1;
476 unsigned long b61 : 1;
477 unsigned long as : 2; /* ASCE Identifier */
481 FSI_UNKNOWN = 0, /* Unknown whether fetch or store */
482 FSI_STORE = 1, /* Exception was due to store operation */
483 FSI_FETCH = 2 /* Exception was due to fetch operation */
492 /* Dummy value for passing an initialized value when code != PGM_PROTECTION */
496 static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
497 enum gacc_mode mode, enum prot_type prot, bool terminate)
499 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
500 struct trans_exc_code_bits *tec;
502 memset(pgm, 0, sizeof(*pgm));
504 tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
510 /* We should never get here, acts like termination */
536 case PGM_PAGE_TRANSLATION:
537 case PGM_REGION_FIRST_TRANS:
538 case PGM_REGION_SECOND_TRANS:
539 case PGM_REGION_THIRD_TRANS:
540 case PGM_SEGMENT_TRANSLATION:
542 * op_access_id only applies to MOVE_PAGE -> set bit 61
543 * exc_access_id has to be set to 0 for some instructions. Both
544 * cases have to be handled by the caller.
546 tec->addr = gva >> PAGE_SHIFT;
547 tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
548 tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
550 case PGM_ALEN_TRANSLATION:
551 case PGM_ALE_SEQUENCE:
552 case PGM_ASTE_VALIDITY:
553 case PGM_ASTE_SEQUENCE:
554 case PGM_EXTENDED_AUTHORITY:
556 * We can always store exc_access_id, as it is
557 * undefined for non-ar cases. It is undefined for
558 * most DAT protection exceptions.
560 pgm->exc_access_id = ar;
566 static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
567 enum gacc_mode mode, enum prot_type prot)
569 return trans_exc_ending(vcpu, code, gva, ar, mode, prot, false);
572 static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
573 unsigned long ga, u8 ar, enum gacc_mode mode)
576 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
584 if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME))
585 psw.as = PSW_BITS_AS_PRIMARY;
588 case PSW_BITS_AS_PRIMARY:
589 asce->val = vcpu->arch.sie_block->gcr[1];
591 case PSW_BITS_AS_SECONDARY:
592 asce->val = vcpu->arch.sie_block->gcr[7];
594 case PSW_BITS_AS_HOME:
595 asce->val = vcpu->arch.sie_block->gcr[13];
597 case PSW_BITS_AS_ACCREG:
598 rc = ar_translation(vcpu, asce, ar, mode);
600 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
606 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
608 return kvm_read_guest(kvm, gpa, val, sizeof(*val));
612 * guest_translate - translate a guest virtual into a guest absolute address
614 * @gva: guest virtual address
615 * @gpa: points to where guest physical (absolute) address should be stored
616 * @asce: effective asce
617 * @mode: indicates the access mode to be used
618 * @prot: returns the type for protection exceptions
620 * Translate a guest virtual address into a guest absolute address by means
621 * of dynamic address translation as specified by the architecture.
622 * If the resulting absolute address is not available in the configuration
623 * an addressing exception is indicated and @gpa will not be changed.
625 * Returns: - zero on success; @gpa contains the resulting absolute address
626 * - a negative value if guest access failed due to e.g. broken
628 * - a positive value if an access exception happened. In this case
629 * the returned value is the program interruption code as defined
630 * by the architecture
632 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
633 unsigned long *gpa, const union asce asce,
634 enum gacc_mode mode, enum prot_type *prot)
636 union vaddress vaddr = {.addr = gva};
637 union raddress raddr = {.addr = gva};
638 union page_table_entry pte;
639 int dat_protection = 0;
640 int iep_protection = 0;
641 union ctlreg0 ctlreg0;
643 int edat1, edat2, iep;
645 ctlreg0.val = vcpu->arch.sie_block->gcr[0];
646 edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
647 edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
648 iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
651 ptr = asce.origin * PAGE_SIZE;
653 case ASCE_TYPE_REGION1:
654 if (vaddr.rfx01 > asce.tl)
655 return PGM_REGION_FIRST_TRANS;
656 ptr += vaddr.rfx * 8;
658 case ASCE_TYPE_REGION2:
660 return PGM_ASCE_TYPE;
661 if (vaddr.rsx01 > asce.tl)
662 return PGM_REGION_SECOND_TRANS;
663 ptr += vaddr.rsx * 8;
665 case ASCE_TYPE_REGION3:
666 if (vaddr.rfx || vaddr.rsx)
667 return PGM_ASCE_TYPE;
668 if (vaddr.rtx01 > asce.tl)
669 return PGM_REGION_THIRD_TRANS;
670 ptr += vaddr.rtx * 8;
672 case ASCE_TYPE_SEGMENT:
673 if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
674 return PGM_ASCE_TYPE;
675 if (vaddr.sx01 > asce.tl)
676 return PGM_SEGMENT_TRANSLATION;
681 case ASCE_TYPE_REGION1: {
682 union region1_table_entry rfte;
684 if (kvm_is_error_gpa(vcpu->kvm, ptr))
685 return PGM_ADDRESSING;
686 if (deref_table(vcpu->kvm, ptr, &rfte.val))
689 return PGM_REGION_FIRST_TRANS;
690 if (rfte.tt != TABLE_TYPE_REGION1)
691 return PGM_TRANSLATION_SPEC;
692 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
693 return PGM_REGION_SECOND_TRANS;
695 dat_protection |= rfte.p;
696 ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
699 case ASCE_TYPE_REGION2: {
700 union region2_table_entry rste;
702 if (kvm_is_error_gpa(vcpu->kvm, ptr))
703 return PGM_ADDRESSING;
704 if (deref_table(vcpu->kvm, ptr, &rste.val))
707 return PGM_REGION_SECOND_TRANS;
708 if (rste.tt != TABLE_TYPE_REGION2)
709 return PGM_TRANSLATION_SPEC;
710 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
711 return PGM_REGION_THIRD_TRANS;
713 dat_protection |= rste.p;
714 ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
717 case ASCE_TYPE_REGION3: {
718 union region3_table_entry rtte;
720 if (kvm_is_error_gpa(vcpu->kvm, ptr))
721 return PGM_ADDRESSING;
722 if (deref_table(vcpu->kvm, ptr, &rtte.val))
725 return PGM_REGION_THIRD_TRANS;
726 if (rtte.tt != TABLE_TYPE_REGION3)
727 return PGM_TRANSLATION_SPEC;
728 if (rtte.cr && asce.p && edat2)
729 return PGM_TRANSLATION_SPEC;
730 if (rtte.fc && edat2) {
731 dat_protection |= rtte.fc1.p;
732 iep_protection = rtte.fc1.iep;
733 raddr.rfaa = rtte.fc1.rfaa;
734 goto absolute_address;
736 if (vaddr.sx01 < rtte.fc0.tf)
737 return PGM_SEGMENT_TRANSLATION;
738 if (vaddr.sx01 > rtte.fc0.tl)
739 return PGM_SEGMENT_TRANSLATION;
741 dat_protection |= rtte.fc0.p;
742 ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
745 case ASCE_TYPE_SEGMENT: {
746 union segment_table_entry ste;
748 if (kvm_is_error_gpa(vcpu->kvm, ptr))
749 return PGM_ADDRESSING;
750 if (deref_table(vcpu->kvm, ptr, &ste.val))
753 return PGM_SEGMENT_TRANSLATION;
754 if (ste.tt != TABLE_TYPE_SEGMENT)
755 return PGM_TRANSLATION_SPEC;
756 if (ste.cs && asce.p)
757 return PGM_TRANSLATION_SPEC;
758 if (ste.fc && edat1) {
759 dat_protection |= ste.fc1.p;
760 iep_protection = ste.fc1.iep;
761 raddr.sfaa = ste.fc1.sfaa;
762 goto absolute_address;
764 dat_protection |= ste.fc0.p;
765 ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
768 if (kvm_is_error_gpa(vcpu->kvm, ptr))
769 return PGM_ADDRESSING;
770 if (deref_table(vcpu->kvm, ptr, &pte.val))
773 return PGM_PAGE_TRANSLATION;
775 return PGM_TRANSLATION_SPEC;
776 dat_protection |= pte.p;
777 iep_protection = pte.iep;
778 raddr.pfra = pte.pfra;
780 raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
782 if (mode == GACC_STORE && dat_protection) {
783 *prot = PROT_TYPE_DAT;
784 return PGM_PROTECTION;
786 if (mode == GACC_IFETCH && iep_protection && iep) {
787 *prot = PROT_TYPE_IEP;
788 return PGM_PROTECTION;
790 if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
791 return PGM_ADDRESSING;
796 static inline int is_low_address(unsigned long ga)
798 /* Check for address ranges 0..511 and 4096..4607 */
799 return (ga & ~0x11fful) == 0;
802 static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
803 const union asce asce)
805 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
806 psw_t *psw = &vcpu->arch.sie_block->gpsw;
810 if (psw_bits(*psw).dat && asce.p)
815 static int vm_check_access_key(struct kvm *kvm, u8 access_key,
816 enum gacc_mode mode, gpa_t gpa)
818 u8 storage_key, access_control;
819 bool fetch_protected;
826 hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
827 if (kvm_is_error_hva(hva))
828 return PGM_ADDRESSING;
830 mmap_read_lock(current->mm);
831 r = get_guest_storage_key(current->mm, hva, &storage_key);
832 mmap_read_unlock(current->mm);
835 access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
836 if (access_control == access_key)
838 fetch_protected = storage_key & _PAGE_FP_BIT;
839 if ((mode == GACC_FETCH || mode == GACC_IFETCH) && !fetch_protected)
841 return PGM_PROTECTION;
844 static bool fetch_prot_override_applicable(struct kvm_vcpu *vcpu, enum gacc_mode mode,
847 psw_t *psw = &vcpu->arch.sie_block->gpsw;
848 unsigned long override;
850 if (mode == GACC_FETCH || mode == GACC_IFETCH) {
851 /* check if fetch protection override enabled */
852 override = vcpu->arch.sie_block->gcr[0];
853 override &= CR0_FETCH_PROTECTION_OVERRIDE;
854 /* not applicable if subject to DAT && private space */
855 override = override && !(psw_bits(*psw).dat && asce.p);
861 static bool fetch_prot_override_applies(unsigned long ga, unsigned int len)
863 return ga < 2048 && ga + len <= 2048;
866 static bool storage_prot_override_applicable(struct kvm_vcpu *vcpu)
868 /* check if storage protection override enabled */
869 return vcpu->arch.sie_block->gcr[0] & CR0_STORAGE_PROTECTION_OVERRIDE;
872 static bool storage_prot_override_applies(u8 access_control)
874 /* matches special storage protection override key (9) -> allow */
875 return access_control == PAGE_SPO_ACC;
878 static int vcpu_check_access_key(struct kvm_vcpu *vcpu, u8 access_key,
879 enum gacc_mode mode, union asce asce, gpa_t gpa,
880 unsigned long ga, unsigned int len)
882 u8 storage_key, access_control;
886 /* access key 0 matches any storage key -> allow */
890 * caller needs to ensure that gfn is accessible, so we can
891 * assume that this cannot fail
893 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa));
894 mmap_read_lock(current->mm);
895 r = get_guest_storage_key(current->mm, hva, &storage_key);
896 mmap_read_unlock(current->mm);
899 access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
900 /* access key matches storage key -> allow */
901 if (access_control == access_key)
903 if (mode == GACC_FETCH || mode == GACC_IFETCH) {
904 /* it is a fetch and fetch protection is off -> allow */
905 if (!(storage_key & _PAGE_FP_BIT))
907 if (fetch_prot_override_applicable(vcpu, mode, asce) &&
908 fetch_prot_override_applies(ga, len))
911 if (storage_prot_override_applicable(vcpu) &&
912 storage_prot_override_applies(access_control))
914 return PGM_PROTECTION;
918 * guest_range_to_gpas() - Calculate guest physical addresses of page fragments
919 * covering a logical range
921 * @ga: guest address, start of range
922 * @ar: access register
923 * @gpas: output argument, may be NULL
924 * @len: length of range in bytes
925 * @asce: address-space-control element to use for translation
927 * @access_key: access key to mach the range's storage keys against
929 * Translate a logical range to a series of guest absolute addresses,
930 * such that the concatenation of page fragments starting at each gpa make up
932 * The translation is performed as if done by the cpu for the given @asce, @ar,
933 * @mode and state of the @vcpu.
934 * If the translation causes an exception, its program interruption code is
935 * returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
936 * such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
937 * a correct exception into the guest.
938 * The resulting gpas are stored into @gpas, unless it is NULL.
940 * Note: All fragments except the first one start at the beginning of a page.
941 * When deriving the boundaries of a fragment from a gpa, all but the last
942 * fragment end at the end of the page.
946 * * <0 - translation could not be performed, for example if guest
947 * memory could not be accessed
948 * * >0 - an access exception occurred. In this case the returned value
949 * is the program interruption code and the contents of pgm may
950 * be used to inject an exception into the guest.
952 static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
953 unsigned long *gpas, unsigned long len,
954 const union asce asce, enum gacc_mode mode,
957 psw_t *psw = &vcpu->arch.sie_block->gpsw;
958 unsigned int offset = offset_in_page(ga);
959 unsigned int fragment_len;
960 int lap_enabled, rc = 0;
964 lap_enabled = low_address_protection_enabled(vcpu, asce);
965 while (min(PAGE_SIZE - offset, len) > 0) {
966 fragment_len = min(PAGE_SIZE - offset, len);
967 ga = kvm_s390_logical_to_effective(vcpu, ga);
968 if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
969 return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
971 if (psw_bits(*psw).dat) {
972 rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
976 gpa = kvm_s390_real_to_abs(vcpu, ga);
977 if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
983 return trans_exc(vcpu, rc, ga, ar, mode, prot);
984 rc = vcpu_check_access_key(vcpu, access_key, mode, asce, gpa, ga,
987 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_KEYC);
997 static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
998 void *data, unsigned int len)
1000 const unsigned int offset = offset_in_page(gpa);
1001 const gfn_t gfn = gpa_to_gfn(gpa);
1004 if (mode == GACC_STORE)
1005 rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
1007 rc = kvm_read_guest_page(kvm, gfn, data, offset, len);
1012 access_guest_page_with_key(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
1013 void *data, unsigned int len, u8 access_key)
1015 struct kvm_memory_slot *slot;
1021 gfn = gpa >> PAGE_SHIFT;
1022 slot = gfn_to_memslot(kvm, gfn);
1023 hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
1025 if (kvm_is_error_hva(hva))
1026 return PGM_ADDRESSING;
1028 * Check if it's a ro memslot, even tho that can't occur (they're unsupported).
1029 * Don't try to actually handle that case.
1031 if (!writable && mode == GACC_STORE)
1033 hva += offset_in_page(gpa);
1034 if (mode == GACC_STORE)
1035 rc = copy_to_user_key((void __user *)hva, data, len, access_key);
1037 rc = copy_from_user_key(data, (void __user *)hva, len, access_key);
1039 return PGM_PROTECTION;
1040 if (mode == GACC_STORE)
1041 mark_page_dirty_in_slot(kvm, slot, gfn);
1045 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data,
1046 unsigned long len, enum gacc_mode mode, u8 access_key)
1048 int offset = offset_in_page(gpa);
1052 while (min(PAGE_SIZE - offset, len) > 0) {
1053 fragment_len = min(PAGE_SIZE - offset, len);
1054 rc = access_guest_page_with_key(kvm, mode, gpa, data, fragment_len, access_key);
1058 len -= fragment_len;
1059 data += fragment_len;
1060 gpa += fragment_len;
1065 int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
1066 void *data, unsigned long len, enum gacc_mode mode,
1069 psw_t *psw = &vcpu->arch.sie_block->gpsw;
1070 unsigned long nr_pages, idx;
1071 unsigned long gpa_array[2];
1072 unsigned int fragment_len;
1073 unsigned long *gpas;
1074 enum prot_type prot;
1077 bool try_storage_prot_override;
1078 bool try_fetch_prot_override;
1083 ga = kvm_s390_logical_to_effective(vcpu, ga);
1084 rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
1087 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
1089 if (nr_pages > ARRAY_SIZE(gpa_array))
1090 gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
1093 try_fetch_prot_override = fetch_prot_override_applicable(vcpu, mode, asce);
1094 try_storage_prot_override = storage_prot_override_applicable(vcpu);
1095 need_ipte_lock = psw_bits(*psw).dat && !asce.r;
1097 ipte_lock(vcpu->kvm);
1099 * Since we do the access further down ultimately via a move instruction
1100 * that does key checking and returns an error in case of a protection
1101 * violation, we don't need to do the check during address translation.
1102 * Skip it by passing access key 0, which matches any storage key,
1103 * obviating the need for any further checks. As a result the check is
1104 * handled entirely in hardware on access, we only need to take care to
1105 * forego key protection checking if fetch protection override applies or
1106 * retry with the special key 9 in case of storage protection override.
1108 rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode, 0);
1111 for (idx = 0; idx < nr_pages; idx++) {
1112 fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
1113 if (try_fetch_prot_override && fetch_prot_override_applies(ga, fragment_len)) {
1114 rc = access_guest_page(vcpu->kvm, mode, gpas[idx],
1115 data, fragment_len);
1117 rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
1118 data, fragment_len, access_key);
1120 if (rc == PGM_PROTECTION && try_storage_prot_override)
1121 rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
1122 data, fragment_len, PAGE_SPO_ACC);
1125 len -= fragment_len;
1126 data += fragment_len;
1127 ga = kvm_s390_logical_to_effective(vcpu, ga + fragment_len);
1130 bool terminate = (mode == GACC_STORE) && (idx > 0);
1132 if (rc == PGM_PROTECTION)
1133 prot = PROT_TYPE_KEYC;
1136 rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
1140 ipte_unlock(vcpu->kvm);
1141 if (nr_pages > ARRAY_SIZE(gpa_array))
1146 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
1147 void *data, unsigned long len, enum gacc_mode mode)
1149 unsigned int fragment_len;
1153 while (len && !rc) {
1154 gpa = kvm_s390_real_to_abs(vcpu, gra);
1155 fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
1156 rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len);
1157 len -= fragment_len;
1158 gra += fragment_len;
1159 data += fragment_len;
1165 * cmpxchg_guest_abs_with_key() - Perform cmpxchg on guest absolute address.
1166 * @kvm: Virtual machine instance.
1167 * @gpa: Absolute guest address of the location to be changed.
1168 * @len: Operand length of the cmpxchg, required: 1 <= len <= 16. Providing a
1169 * non power of two will result in failure.
1170 * @old_addr: Pointer to old value. If the location at @gpa contains this value,
1171 * the exchange will succeed. After calling cmpxchg_guest_abs_with_key()
1172 * *@old_addr contains the value at @gpa before the attempt to
1173 * exchange the value.
1174 * @new: The value to place at @gpa.
1175 * @access_key: The access key to use for the guest access.
1176 * @success: output value indicating if an exchange occurred.
1178 * Atomically exchange the value at @gpa by @new, if it contains *@old.
1179 * Honors storage keys.
1181 * Return: * 0: successful exchange
1182 * * >0: a program interruption code indicating the reason cmpxchg could
1184 * * -EINVAL: address misaligned or len not power of two
1185 * * -EAGAIN: transient failure (len 1 or 2)
1186 * * -EOPNOTSUPP: read-only memslot (should never occur)
1188 int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len,
1189 __uint128_t *old_addr, __uint128_t new,
1190 u8 access_key, bool *success)
1192 gfn_t gfn = gpa_to_gfn(gpa);
1193 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1198 if (!IS_ALIGNED(gpa, len))
1201 hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
1202 if (kvm_is_error_hva(hva))
1203 return PGM_ADDRESSING;
1205 * Check if it's a read-only memslot, even though that cannot occur
1206 * since those are unsupported.
1207 * Don't try to actually handle that case.
1212 hva += offset_in_page(gpa);
1214 * The cmpxchg_user_key macro depends on the type of "old", so we need
1215 * a case for each valid length and get some code duplication as long
1216 * as we don't introduce a new macro.
1222 ret = cmpxchg_user_key((u8 __user *)hva, &old, *old_addr, new, access_key);
1223 *success = !ret && old == *old_addr;
1230 ret = cmpxchg_user_key((u16 __user *)hva, &old, *old_addr, new, access_key);
1231 *success = !ret && old == *old_addr;
1238 ret = cmpxchg_user_key((u32 __user *)hva, &old, *old_addr, new, access_key);
1239 *success = !ret && old == *old_addr;
1246 ret = cmpxchg_user_key((u64 __user *)hva, &old, *old_addr, new, access_key);
1247 *success = !ret && old == *old_addr;
1254 ret = cmpxchg_user_key((__uint128_t __user *)hva, &old, *old_addr, new, access_key);
1255 *success = !ret && old == *old_addr;
1263 mark_page_dirty_in_slot(kvm, slot, gfn);
1265 * Assume that the fault is caused by protection, either key protection
1266 * or user page write protection.
1269 ret = PGM_PROTECTION;
1274 * guest_translate_address_with_key - translate guest logical into guest absolute address
1275 * @vcpu: virtual cpu
1276 * @gva: Guest virtual address
1277 * @ar: Access register
1278 * @gpa: Guest physical address
1279 * @mode: Translation access mode
1280 * @access_key: access key to mach the storage key with
1282 * Parameter semantics are the same as the ones from guest_translate.
1283 * The memory contents at the guest address are not changed.
1285 * Note: The IPTE lock is not taken during this function, so the caller
1286 * has to take care of this.
1288 int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
1289 unsigned long *gpa, enum gacc_mode mode,
1295 gva = kvm_s390_logical_to_effective(vcpu, gva);
1296 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
1299 return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode,
1304 * check_gva_range - test a range of guest virtual addresses for accessibility
1305 * @vcpu: virtual cpu
1306 * @gva: Guest virtual address
1307 * @ar: Access register
1308 * @length: Length of test range
1309 * @mode: Translation access mode
1310 * @access_key: access key to mach the storage keys with
1312 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
1313 unsigned long length, enum gacc_mode mode, u8 access_key)
1318 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
1321 ipte_lock(vcpu->kvm);
1322 rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode,
1324 ipte_unlock(vcpu->kvm);
1330 * check_gpa_range - test a range of guest physical addresses for accessibility
1331 * @kvm: virtual machine instance
1332 * @gpa: guest physical address
1333 * @length: length of test range
1334 * @mode: access mode to test, relevant for storage keys
1335 * @access_key: access key to mach the storage keys with
1337 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length,
1338 enum gacc_mode mode, u8 access_key)
1340 unsigned int fragment_len;
1343 while (length && !rc) {
1344 fragment_len = min(PAGE_SIZE - offset_in_page(gpa), length);
1345 rc = vm_check_access_key(kvm, access_key, mode, gpa);
1346 length -= fragment_len;
1347 gpa += fragment_len;
1353 * kvm_s390_check_low_addr_prot_real - check for low-address protection
1354 * @vcpu: virtual cpu
1355 * @gra: Guest real address
1357 * Checks whether an address is subject to low-address protection and set
1358 * up vcpu->arch.pgm accordingly if necessary.
1360 * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
1362 int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
1364 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
1366 if (!ctlreg0.lap || !is_low_address(gra))
1368 return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
1372 * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
1373 * @sg: pointer to the shadow guest address space structure
1374 * @saddr: faulting address in the shadow gmap
1375 * @pgt: pointer to the beginning of the page table for the given address if
1376 * successful (return value 0), or to the first invalid DAT entry in
1377 * case of exceptions (return value > 0)
1378 * @dat_protection: referenced memory is write protected
1379 * @fake: pgt references contiguous guest memory block, not a pgtable
1381 static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
1382 unsigned long *pgt, int *dat_protection,
1385 struct gmap *parent;
1387 union vaddress vaddr;
1392 *dat_protection = 0;
1393 parent = sg->parent;
1395 asce.val = sg->orig_asce;
1396 ptr = asce.origin * PAGE_SIZE;
1400 asce.dt = ASCE_TYPE_REGION1;
1403 case ASCE_TYPE_REGION1:
1404 if (vaddr.rfx01 > asce.tl && !*fake)
1405 return PGM_REGION_FIRST_TRANS;
1407 case ASCE_TYPE_REGION2:
1409 return PGM_ASCE_TYPE;
1410 if (vaddr.rsx01 > asce.tl)
1411 return PGM_REGION_SECOND_TRANS;
1413 case ASCE_TYPE_REGION3:
1414 if (vaddr.rfx || vaddr.rsx)
1415 return PGM_ASCE_TYPE;
1416 if (vaddr.rtx01 > asce.tl)
1417 return PGM_REGION_THIRD_TRANS;
1419 case ASCE_TYPE_SEGMENT:
1420 if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
1421 return PGM_ASCE_TYPE;
1422 if (vaddr.sx01 > asce.tl)
1423 return PGM_SEGMENT_TRANSLATION;
1428 case ASCE_TYPE_REGION1: {
1429 union region1_table_entry rfte;
1432 ptr += vaddr.rfx * _REGION1_SIZE;
1436 *pgt = ptr + vaddr.rfx * 8;
1437 rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
1441 return PGM_REGION_FIRST_TRANS;
1442 if (rfte.tt != TABLE_TYPE_REGION1)
1443 return PGM_TRANSLATION_SPEC;
1444 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
1445 return PGM_REGION_SECOND_TRANS;
1446 if (sg->edat_level >= 1)
1447 *dat_protection |= rfte.p;
1448 ptr = rfte.rto * PAGE_SIZE;
1450 rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
1455 case ASCE_TYPE_REGION2: {
1456 union region2_table_entry rste;
1459 ptr += vaddr.rsx * _REGION2_SIZE;
1463 *pgt = ptr + vaddr.rsx * 8;
1464 rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
1468 return PGM_REGION_SECOND_TRANS;
1469 if (rste.tt != TABLE_TYPE_REGION2)
1470 return PGM_TRANSLATION_SPEC;
1471 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
1472 return PGM_REGION_THIRD_TRANS;
1473 if (sg->edat_level >= 1)
1474 *dat_protection |= rste.p;
1475 ptr = rste.rto * PAGE_SIZE;
1477 rste.p |= *dat_protection;
1478 rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
1483 case ASCE_TYPE_REGION3: {
1484 union region3_table_entry rtte;
1487 ptr += vaddr.rtx * _REGION3_SIZE;
1491 *pgt = ptr + vaddr.rtx * 8;
1492 rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
1496 return PGM_REGION_THIRD_TRANS;
1497 if (rtte.tt != TABLE_TYPE_REGION3)
1498 return PGM_TRANSLATION_SPEC;
1499 if (rtte.cr && asce.p && sg->edat_level >= 2)
1500 return PGM_TRANSLATION_SPEC;
1501 if (rtte.fc && sg->edat_level >= 2) {
1502 *dat_protection |= rtte.fc0.p;
1504 ptr = rtte.fc1.rfaa * _REGION3_SIZE;
1508 if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
1509 return PGM_SEGMENT_TRANSLATION;
1510 if (sg->edat_level >= 1)
1511 *dat_protection |= rtte.fc0.p;
1512 ptr = rtte.fc0.sto * PAGE_SIZE;
1514 rtte.fc0.p |= *dat_protection;
1515 rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
1520 case ASCE_TYPE_SEGMENT: {
1521 union segment_table_entry ste;
1524 ptr += vaddr.sx * _SEGMENT_SIZE;
1528 *pgt = ptr + vaddr.sx * 8;
1529 rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
1533 return PGM_SEGMENT_TRANSLATION;
1534 if (ste.tt != TABLE_TYPE_SEGMENT)
1535 return PGM_TRANSLATION_SPEC;
1536 if (ste.cs && asce.p)
1537 return PGM_TRANSLATION_SPEC;
1538 *dat_protection |= ste.fc0.p;
1539 if (ste.fc && sg->edat_level >= 1) {
1541 ptr = ste.fc1.sfaa * _SEGMENT_SIZE;
1545 ptr = ste.fc0.pto * (PAGE_SIZE / 2);
1547 ste.fc0.p |= *dat_protection;
1548 rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
1553 /* Return the parent address of the page table */
1559 * kvm_s390_shadow_fault - handle fault on a shadow page table
1560 * @vcpu: virtual cpu
1561 * @sg: pointer to the shadow guest address space structure
1562 * @saddr: faulting address in the shadow gmap
1563 * @datptr: will contain the address of the faulting DAT table entry, or of
1564 * the valid leaf, plus some flags
1566 * Returns: - 0 if the shadow fault was successfully resolved
1567 * - > 0 (pgm exception code) on exceptions while faulting
1568 * - -EAGAIN if the caller can retry immediately
1569 * - -EFAULT when accessing invalid guest addresses
1570 * - -ENOMEM if out of memory
1572 int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
1573 unsigned long saddr, unsigned long *datptr)
1575 union vaddress vaddr;
1576 union page_table_entry pte;
1577 unsigned long pgt = 0;
1578 int dat_protection, fake;
1581 mmap_read_lock(sg->mm);
1583 * We don't want any guest-2 tables to change - so the parent
1584 * tables/pointers we read stay valid - unshadowing is however
1585 * always possible - only guest_table_lock protects us.
1587 ipte_lock(vcpu->kvm);
1589 rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
1591 rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
1596 pte.val = pgt + vaddr.px * PAGE_SIZE;
1601 case PGM_SEGMENT_TRANSLATION:
1602 case PGM_REGION_THIRD_TRANS:
1603 case PGM_REGION_SECOND_TRANS:
1604 case PGM_REGION_FIRST_TRANS:
1608 pgt += vaddr.px * 8;
1609 rc = gmap_read_table(sg->parent, pgt, &pte.val);
1612 *datptr = pgt | dat_protection * PEI_DAT_PROT;
1614 rc = PGM_PAGE_TRANSLATION;
1616 rc = PGM_TRANSLATION_SPEC;
1618 pte.p |= dat_protection;
1620 rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
1621 ipte_unlock(vcpu->kvm);
1622 mmap_read_unlock(sg->mm);