static __always_inline void arch_exit_to_user_mode(void)
{
if (test_thread_flag(TIF_FPU))
- __load_fpu_regs();
+ __load_user_fpu_regs();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
debug_user_asce(1);
return likely(test_facility(129));
}
-void save_fpu_regs(void);
-void load_fpu_regs(void);
-void __load_fpu_regs(void);
+void save_user_fpu_regs(void);
+void load_user_fpu_regs(void);
+void __load_user_fpu_regs(void);
enum {
KERNEL_FPC_BIT = 0,
state->mask = S390_lowcore.fpu_flags;
if (!test_thread_flag(TIF_FPU)) {
/* Save user space FPU state and register contents */
- save_fpu_regs();
+ save_user_fpu_regs();
} else if (state->mask & flags) {
/* Save FPU/vector register in-use by the kernel */
__kernel_fpu_begin(state, flags);
struct gs_cb *gs_cb; /* Current guarded storage cb */
struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
struct pgm_tdb trap_tdb; /* Transaction abort diagnose block */
- struct fpu fpu; /* FP and VX register save area */
+ struct fpu ufpu; /* User FP and VX register save area */
};
/* Flag to disable transactions. */
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
- .fpu.regs = (void *) init_task.thread.fpu.fprs, \
+ .ufpu.regs = (void *)init_task.thread.ufpu.fprs, \
.last_break = 1, \
}
static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
- save_fpu_regs();
+ save_user_fpu_regs();
}
/* Load registers after signal return */
user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs));
- fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
+ fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.ufpu);
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
return -EFAULT;
return 0;
regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
memcpy(¤t->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs));
- fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
+ fpregs_load((_s390_fp_regs *)&user_sregs.fpregs, ¤t->thread.ufpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0;
/* Save vector registers to signal stack */
if (cpu_has_vx()) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
- vxrs[i] = current->thread.fpu.vxrs[i].low;
+ vxrs[i] = current->thread.ufpu.vxrs[i].low;
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
sizeof(sregs_ext->vxrs_low)) ||
__copy_to_user(&sregs_ext->vxrs_high,
- current->thread.fpu.vxrs + __NUM_VXRS_LOW,
+ current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
}
if (cpu_has_vx()) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
- __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
+ __copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
&sregs_ext->vxrs_high,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
for (i = 0; i < __NUM_VXRS_LOW; i++)
- current->thread.fpu.vxrs[i].low = vxrs[i];
+ current->thread.ufpu.vxrs[i].low = vxrs[i];
}
return 0;
}
if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask))
goto badframe;
set_current_blocked(&set);
- save_fpu_regs();
+ save_user_fpu_regs();
if (restore_sigregs32(regs, &frame->sregs))
goto badframe;
if (restore_sigregs_ext32(regs, &frame->sregs_ext))
set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
- save_fpu_regs();
+ save_user_fpu_regs();
if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
}
EXPORT_SYMBOL(__kernel_fpu_end);
-void __load_fpu_regs(void)
+void __load_user_fpu_regs(void)
{
- struct fpu *state = ¤t->thread.fpu;
- void *regs = current->thread.fpu.regs;
+ struct fpu *state = ¤t->thread.ufpu;
+ void *regs = current->thread.ufpu.regs;
fpu_lfpc_safe(&state->fpc);
if (likely(cpu_has_vx()))
clear_thread_flag(TIF_FPU);
}
-void load_fpu_regs(void)
+void load_user_fpu_regs(void)
{
raw_local_irq_disable();
- __load_fpu_regs();
+ __load_user_fpu_regs();
raw_local_irq_enable();
}
-EXPORT_SYMBOL(load_fpu_regs);
+EXPORT_SYMBOL(load_user_fpu_regs);
-void save_fpu_regs(void)
+void save_user_fpu_regs(void)
{
unsigned long flags;
struct fpu *state;
if (test_thread_flag(TIF_FPU))
goto out;
- state = ¤t->thread.fpu;
- regs = current->thread.fpu.regs;
+ state = ¤t->thread.ufpu;
+ regs = current->thread.ufpu.regs;
fpu_stfpc(&state->fpc);
if (likely(cpu_has_vx()))
out:
local_irq_restore(flags);
}
-EXPORT_SYMBOL(save_fpu_regs);
+EXPORT_SYMBOL(save_user_fpu_regs);
idx -= PERF_REG_S390_FP0;
if (cpu_has_vx())
- fp = *(freg_t *)(current->thread.fpu.vxrs + idx);
+ fp = *(freg_t *)(current->thread.ufpu.vxrs + idx);
else
- fp = current->thread.fpu.fprs[idx];
+ fp = current->thread.ufpu.fprs[idx];
return fp.ui;
}
*/
regs_user->regs = task_pt_regs(current);
if (user_mode(regs_user->regs))
- save_fpu_regs();
+ save_user_fpu_regs();
regs_user->abi = perf_reg_abi(current);
}
* task and set the TIF_FPU flag to lazy restore the FPU register
* state when returning to user space.
*/
- save_fpu_regs();
+ save_user_fpu_regs();
*dst = *src;
- dst->thread.fpu.regs = dst->thread.fpu.fprs;
+ dst->thread.ufpu.regs = dst->thread.ufpu.fprs;
/*
* Don't transfer over the runtime instrumentation or the guarded
void execve_tail(void)
{
- current->thread.fpu.fpc = 0;
+ current->thread.ufpu.fpc = 0;
fpu_sfpc(0);
}
struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
{
- save_fpu_regs();
+ save_user_fpu_regs();
save_access_regs(&prev->thread.acrs[0]);
save_ri_cb(prev->thread.ri_cb);
save_gs_cb(prev->thread.gs_cb);
/*
* floating point control reg. is in the thread structure
*/
- tmp = child->thread.fpu.fpc;
+ tmp = child->thread.ufpu.fpc;
tmp <<= BITS_PER_LONG - 32;
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
- * floating point regs. are either in child->thread.fpu
- * or the child->thread.fpu.vxrs array
+ * floating point regs. are either in child->thread.ufpu
+ * or the child->thread.ufpu.vxrs array
*/
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
if (cpu_has_vx())
tmp = *(addr_t *)
- ((addr_t) child->thread.fpu.vxrs + 2*offset);
+ ((addr_t)child->thread.ufpu.vxrs + 2 * offset);
else
tmp = *(addr_t *)
- ((addr_t) child->thread.fpu.fprs + offset);
+ ((addr_t)child->thread.ufpu.fprs + offset);
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
/*
*/
if ((unsigned int)data != 0)
return -EINVAL;
- child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
+ child->thread.ufpu.fpc = data >> (BITS_PER_LONG - 32);
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
- * floating point regs. are either in child->thread.fpu
- * or the child->thread.fpu.vxrs array
+ * floating point regs. are either in child->thread.ufpu
+ * or the child->thread.ufpu.vxrs array
*/
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
if (cpu_has_vx())
*(addr_t *)((addr_t)
- child->thread.fpu.vxrs + 2*offset) = data;
+ child->thread.ufpu.vxrs + 2 * offset) = data;
else
*(addr_t *)((addr_t)
- child->thread.fpu.fprs + offset) = data;
+ child->thread.ufpu.fprs + offset) = data;
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
/*
/*
* floating point control reg. is in the thread structure
*/
- tmp = child->thread.fpu.fpc;
+ tmp = child->thread.ufpu.fpc;
} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
- * floating point regs. are either in child->thread.fpu
- * or the child->thread.fpu.vxrs array
+ * floating point regs. are either in child->thread.ufpu
+ * or the child->thread.ufpu.vxrs array
*/
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
if (cpu_has_vx())
tmp = *(__u32 *)
- ((addr_t) child->thread.fpu.vxrs + 2*offset);
+ ((addr_t)child->thread.ufpu.vxrs + 2 * offset);
else
tmp = *(__u32 *)
- ((addr_t) child->thread.fpu.fprs + offset);
+ ((addr_t)child->thread.ufpu.fprs + offset);
} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
/*
/*
* floating point control reg. is in the thread structure
*/
- child->thread.fpu.fpc = data;
+ child->thread.ufpu.fpc = data;
} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
- * floating point regs. are either in child->thread.fpu
- * or the child->thread.fpu.vxrs array
+ * floating point regs. are either in child->thread.ufpu
+ * or the child->thread.ufpu.vxrs array
*/
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
if (cpu_has_vx())
*(__u32 *)((addr_t)
- child->thread.fpu.vxrs + 2*offset) = tmp;
+ child->thread.ufpu.vxrs + 2 * offset) = tmp;
else
*(__u32 *)((addr_t)
- child->thread.fpu.fprs + offset) = tmp;
+ child->thread.ufpu.fprs + offset) = tmp;
} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
/*
_s390_fp_regs fp_regs;
if (target == current)
- save_fpu_regs();
+ save_user_fpu_regs();
- fp_regs.fpc = target->thread.fpu.fpc;
- fpregs_store(&fp_regs, &target->thread.fpu);
+ fp_regs.fpc = target->thread.ufpu.fpc;
+ fpregs_store(&fp_regs, &target->thread.ufpu);
return membuf_write(&to, &fp_regs, sizeof(fp_regs));
}
freg_t fprs[__NUM_FPRS];
if (target == current)
- save_fpu_regs();
+ save_user_fpu_regs();
if (cpu_has_vx())
- convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+ convert_vx_to_fp(fprs, target->thread.ufpu.vxrs);
else
- memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
+ memcpy(&fprs, target->thread.ufpu.fprs, sizeof(fprs));
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
- u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
+ u32 ufpc[2] = { target->thread.ufpu.fpc, 0 };
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
0, offsetof(s390_fp_regs, fprs));
if (rc)
return rc;
if (ufpc[1] != 0)
return -EINVAL;
- target->thread.fpu.fpc = ufpc[0];
+ target->thread.ufpu.fpc = ufpc[0];
}
if (rc == 0 && count > 0)
return rc;
if (cpu_has_vx())
- convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
+ convert_fp_to_vx(target->thread.ufpu.vxrs, fprs);
else
- memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
+ memcpy(target->thread.ufpu.fprs, &fprs, sizeof(fprs));
return rc;
}
if (!cpu_has_vx())
return -ENODEV;
if (target == current)
- save_fpu_regs();
+ save_user_fpu_regs();
for (i = 0; i < __NUM_VXRS_LOW; i++)
- vxrs[i] = target->thread.fpu.vxrs[i].low;
+ vxrs[i] = target->thread.ufpu.vxrs[i].low;
return membuf_write(&to, vxrs, sizeof(vxrs));
}
if (!cpu_has_vx())
return -ENODEV;
if (target == current)
- save_fpu_regs();
+ save_user_fpu_regs();
for (i = 0; i < __NUM_VXRS_LOW; i++)
- vxrs[i] = target->thread.fpu.vxrs[i].low;
+ vxrs[i] = target->thread.ufpu.vxrs[i].low;
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0)
for (i = 0; i < __NUM_VXRS_LOW; i++)
- target->thread.fpu.vxrs[i].low = vxrs[i];
+ target->thread.ufpu.vxrs[i].low = vxrs[i];
return rc;
}
if (!cpu_has_vx())
return -ENODEV;
if (target == current)
- save_fpu_regs();
- return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
+ save_user_fpu_regs();
+ return membuf_write(&to, target->thread.ufpu.vxrs + __NUM_VXRS_LOW,
__NUM_VXRS_HIGH * sizeof(__vector128));
}
if (!cpu_has_vx())
return -ENODEV;
if (target == current)
- save_fpu_regs();
+ save_user_fpu_regs();
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
+ target->thread.ufpu.vxrs + __NUM_VXRS_LOW, 0, -1);
return rc;
}
static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
- save_fpu_regs();
+ save_user_fpu_regs();
}
/* Load registers after signal return */
memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs));
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs));
- fpregs_store(&user_sregs.fpregs, ¤t->thread.fpu);
+ fpregs_store(&user_sregs.fpregs, ¤t->thread.ufpu);
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
return -EFAULT;
return 0;
memcpy(¤t->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs));
- fpregs_load(&user_sregs.fpregs, ¤t->thread.fpu);
+ fpregs_load(&user_sregs.fpregs, ¤t->thread.ufpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0;
/* Save vector registers to signal stack */
if (cpu_has_vx()) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
- vxrs[i] = current->thread.fpu.vxrs[i].low;
+ vxrs[i] = current->thread.ufpu.vxrs[i].low;
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
sizeof(sregs_ext->vxrs_low)) ||
__copy_to_user(&sregs_ext->vxrs_high,
- current->thread.fpu.vxrs + __NUM_VXRS_LOW,
+ current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
}
if (cpu_has_vx()) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
- __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
+ __copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
&sregs_ext->vxrs_high,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
for (i = 0; i < __NUM_VXRS_LOW; i++)
- current->thread.fpu.vxrs[i].low = vxrs[i];
+ current->thread.ufpu.vxrs[i].low = vxrs[i];
}
return 0;
}
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
goto badframe;
set_current_blocked(&set);
- save_fpu_regs();
+ save_user_fpu_regs();
if (restore_sigregs(regs, &frame->sregs))
goto badframe;
if (restore_sigregs_ext(regs, &frame->sregs_ext))
set_current_blocked(&set);
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
- save_fpu_regs();
+ save_user_fpu_regs();
if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
}
/* get vector interrupt code from fpc */
- save_fpu_regs();
- vic = (current->thread.fpu.fpc & 0xf00) >> 8;
+ save_user_fpu_regs();
+ vic = (current->thread.ufpu.fpc & 0xf00) >> 8;
switch (vic) {
case 1: /* invalid vector operation */
si_code = FPE_FLTINV;
static void data_exception(struct pt_regs *regs)
{
- save_fpu_regs();
- if (current->thread.fpu.fpc & FPC_DXC_MASK)
- do_fp_trap(regs, current->thread.fpu.fpc);
+ save_user_fpu_regs();
+ if (current->thread.ufpu.fpc & FPC_DXC_MASK)
+ do_fp_trap(regs, current->thread.ufpu.fpc);
else
do_trap(regs, SIGILL, ILL_ILLOPN, "data exception");
}
mci.val = mchk->mcic;
/* take care of lazy register loading */
- save_fpu_regs();
+ save_user_fpu_regs();
save_access_regs(vcpu->run->s.regs.acrs);
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
}
rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
vcpu->run->s.regs.gprs, 128);
- rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
+ rc |= put_guest_lc(vcpu, current->thread.ufpu.fpc,
(u32 __user *) __LC_FP_CREG_SAVE_AREA);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
(u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
sizeof(sie_page->pv_grregs));
}
if (test_thread_flag(TIF_FPU))
- load_fpu_regs();
+ load_user_fpu_regs();
exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
/* save host (userspace) fprs/vrs */
- save_fpu_regs();
- vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
- vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
+ save_user_fpu_regs();
+ vcpu->arch.host_fpregs.fpc = current->thread.ufpu.fpc;
+ vcpu->arch.host_fpregs.regs = current->thread.ufpu.regs;
if (cpu_has_vx())
- current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+ current->thread.ufpu.regs = vcpu->run->s.regs.vrs;
else
- current->thread.fpu.regs = vcpu->run->s.regs.fprs;
- current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
+ current->thread.ufpu.regs = vcpu->run->s.regs.fprs;
+ current->thread.ufpu.fpc = vcpu->run->s.regs.fpc;
/* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
/* Save guest register state */
- save_fpu_regs();
- vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+ save_user_fpu_regs();
+ vcpu->run->s.regs.fpc = current->thread.ufpu.fpc;
/* Restore will be done lazily at return */
- current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
- current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
+ current->thread.ufpu.fpc = vcpu->arch.host_fpregs.fpc;
+ current->thread.ufpu.regs = vcpu->arch.host_fpregs.regs;
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
store_regs_fmt2(vcpu);
}
* switch in the run ioctl. Let's update our copies before we save
* it into the save area
*/
- save_fpu_regs();
- vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+ save_user_fpu_regs();
+ vcpu->run->s.regs.fpc = current->thread.ufpu.fpc;
save_access_regs(vcpu->run->s.regs.acrs);
return kvm_s390_store_status_unloaded(vcpu, addr);
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
barrier();
if (test_thread_flag(TIF_FPU))
- load_fpu_regs();
+ load_user_fpu_regs();
if (!kvm_s390_vcpu_sie_inhibited(vcpu))
rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
barrier();