KVM: arm64: nv: Move TLBI range decoding to a helper
authorMarc Zyngier <maz@kernel.org>
Wed, 14 May 2025 10:34:48 +0000 (11:34 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 19 May 2025 06:59:46 +0000 (07:59 +0100)
As we are about to expand out TLB invalidation capabilities to support
recursive virtualisation, move the decoding of a TLBI by range into
a helper that returns the base, the range and the ASID.

Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20250514103501.2225951-6-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_nested.h
arch/arm64/kvm/sys_regs.c

index 4ba3780..9d56fd9 100644 (file)
@@ -231,6 +231,38 @@ static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
                shift;                                                  \
        })
 
+static inline u64 decode_range_tlbi(u64 val, u64 *range, u16 *asid)
+{
+       u64 base, tg, num, scale;
+       int shift;
+
+       tg      = FIELD_GET(GENMASK(47, 46), val);
+
+       switch(tg) {
+       case 1:
+               shift = 12;
+               break;
+       case 2:
+               shift = 14;
+               break;
+       case 3:
+       default:                /* IMPDEF: handle tg==0 as 64k */
+               shift = 16;
+               break;
+       }
+
+       base    = (val & GENMASK(36, 0)) << shift;
+
+       if (asid)
+               *asid = FIELD_GET(TLBIR_ASID_MASK, val);
+
+       scale   = FIELD_GET(GENMASK(45, 44), val);
+       num     = FIELD_GET(GENMASK(43, 39), val);
+       *range  = __TLBI_RANGE_PAGES(num, scale) << shift;
+
+       return base;
+}
+
 static inline unsigned int ps_to_output_size(unsigned int ps)
 {
        switch (ps) {
index 005ad28..26e02e1 100644 (file)
@@ -3546,8 +3546,7 @@ static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 {
        u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
        u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
-       u64 base, range, tg, num, scale;
-       int shift;
+       u64 base, range;
 
        if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
                return undef_access(vcpu, p, r);
@@ -3557,26 +3556,7 @@ static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
         * of the guest's S2 (different base granule size, for example), we
         * decide to ignore TTL and only use the described range.
         */
-       tg      = FIELD_GET(GENMASK(47, 46), p->regval);
-       scale   = FIELD_GET(GENMASK(45, 44), p->regval);
-       num     = FIELD_GET(GENMASK(43, 39), p->regval);
-       base    = p->regval & GENMASK(36, 0);
-
-       switch(tg) {
-       case 1:
-               shift = 12;
-               break;
-       case 2:
-               shift = 14;
-               break;
-       case 3:
-       default:                /* IMPDEF: handle tg==0 as 64k */
-               shift = 16;
-               break;
-       }
-
-       base <<= shift;
-       range = __TLBI_RANGE_PAGES(num, scale) << shift;
+       base = decode_range_tlbi(p->regval, &range, NULL);
 
        kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
                                   &(union tlbi_info) {