1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
37 #define OpImplicit 1ull /* No generic decode */
38 #define OpReg 2ull /* Register */
39 #define OpMem 3ull /* Memory */
40 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI 5ull /* ES:DI/EDI/RDI */
42 #define OpMem64 6ull /* Memory, 64-bit */
43 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44 #define OpDX 8ull /* DX register */
45 #define OpCL 9ull /* CL register (for shifts) */
46 #define OpImmByte 10ull /* 8-bit sign extended immediate */
47 #define OpOne 11ull /* Implied 1 */
48 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
49 #define OpMem16 13ull /* Memory operand (16-bit). */
50 #define OpMem32 14ull /* Memory operand (32-bit). */
51 #define OpImmU 15ull /* Immediate operand, zero extended */
52 #define OpSI 16ull /* SI/ESI/RSI */
53 #define OpImmFAddr 17ull /* Immediate far address */
54 #define OpMemFAddr 18ull /* Far address in memory */
55 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
56 #define OpES 20ull /* ES */
57 #define OpCS 21ull /* CS */
58 #define OpSS 22ull /* SS */
59 #define OpDS 23ull /* DS */
60 #define OpFS 24ull /* FS */
61 #define OpGS 25ull /* GS */
62 #define OpMem8 26ull /* 8-bit zero extended memory operand */
63 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
68 #define OpBits 5 /* Width of operand field */
69 #define OpMask ((1ull << OpBits) - 1)
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp (1<<0) /* 8-bit operands. */
82 /* Destination operand type. */
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg (OpReg << DstShift)
86 #define DstMem (OpMem << DstShift)
87 #define DstAcc (OpAcc << DstShift)
88 #define DstDI (OpDI << DstShift)
89 #define DstMem64 (OpMem64 << DstShift)
90 #define DstMem16 (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX (OpDX << DstShift)
93 #define DstAccLo (OpAccLo << DstShift)
94 #define DstMask (OpMask << DstShift)
95 /* Source operand type. */
97 #define SrcNone (OpNone << SrcShift)
98 #define SrcReg (OpReg << SrcShift)
99 #define SrcMem (OpMem << SrcShift)
100 #define SrcMem16 (OpMem16 << SrcShift)
101 #define SrcMem32 (OpMem32 << SrcShift)
102 #define SrcImm (OpImm << SrcShift)
103 #define SrcImmByte (OpImmByte << SrcShift)
104 #define SrcOne (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU (OpImmU << SrcShift)
107 #define SrcSI (OpSI << SrcShift)
108 #define SrcXLat (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc (OpAcc << SrcShift)
112 #define SrcImmU16 (OpImmU16 << SrcShift)
113 #define SrcImm64 (OpImm64 << SrcShift)
114 #define SrcDX (OpDX << SrcShift)
115 #define SrcMem8 (OpMem8 << SrcShift)
116 #define SrcAccHi (OpAccHi << SrcShift)
117 #define SrcMask (OpMask << SrcShift)
118 #define BitOp (1<<11)
119 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
120 #define String (1<<13) /* String instruction (rep capable) */
121 #define Stack (1<<14) /* Stack instruction (push/pop) */
122 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape (5<<15) /* Escape to coprocessor instruction */
128 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
130 #define Sse (1<<18) /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM (1<<19)
133 /* Destination is only written; never read. */
136 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined (1<<25) /* No Such Instruction */
141 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
144 #define PageTable (1 << 29) /* instruction used to write page table */
145 #define NotImpl (1 << 30) /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift (31)
148 #define Src2None (OpNone << Src2Shift)
149 #define Src2Mem (OpMem << Src2Shift)
150 #define Src2CL (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One (OpOne << Src2Shift)
153 #define Src2Imm (OpImm << Src2Shift)
154 #define Src2ES (OpES << Src2Shift)
155 #define Src2CS (OpCS << Src2Shift)
156 #define Src2SS (OpSS << Src2Shift)
157 #define Src2DS (OpDS << Src2Shift)
158 #define Src2FS (OpFS << Src2Shift)
159 #define Src2GS (OpGS << Src2Shift)
160 #define Src2Mask (OpMask << Src2Shift)
161 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
162 #define AlignMask ((u64)7 << 41)
163 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
164 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
165 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
166 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
168 #define NoWrite ((u64)1 << 45) /* No writeback */
169 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
170 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
171 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
172 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
173 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
174 #define NearBranch ((u64)1 << 52) /* Near branches */
175 #define No16 ((u64)1 << 53) /* No 16 bit operand */
176 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
177 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
179 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
181 #define X2(x...) x, x
182 #define X3(x...) X2(x), x
183 #define X4(x...) X2(x), X2(x)
184 #define X5(x...) X4(x), x
185 #define X6(x...) X4(x), X2(x)
186 #define X7(x...) X4(x), X3(x)
187 #define X8(x...) X4(x), X4(x)
188 #define X16(x...) X8(x), X8(x)
190 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
191 #define FASTOP_SIZE 8
194 * fastop functions have a special calling convention:
199 * flags: rflags (in/out)
200 * ex: rsi (in:fastop pointer, out:zero if exception)
202 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
203 * different operand sizes can be reached by calculation, rather than a jump
204 * table (which would be bigger than the code).
206 * fastop functions are declared as taking a never-defined fastop parameter,
207 * so they can't be called from C directly.
216 int (*execute)(struct x86_emulate_ctxt *ctxt);
217 const struct opcode *group;
218 const struct group_dual *gdual;
219 const struct gprefix *gprefix;
220 const struct escape *esc;
221 const struct instr_dual *idual;
222 const struct mode_dual *mdual;
223 void (*fastop)(struct fastop *fake);
225 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
229 struct opcode mod012[8];
230 struct opcode mod3[8];
234 struct opcode pfx_no;
235 struct opcode pfx_66;
236 struct opcode pfx_f2;
237 struct opcode pfx_f3;
242 struct opcode high[64];
246 struct opcode mod012;
251 struct opcode mode32;
252 struct opcode mode64;
255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
257 enum x86_transfer_type {
259 X86_TRANSFER_CALL_JMP,
261 X86_TRANSFER_TASK_SWITCH,
264 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
266 if (!(ctxt->regs_valid & (1 << nr))) {
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
270 return ctxt->_regs[nr];
273 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
275 ctxt->regs_valid |= 1 << nr;
276 ctxt->regs_dirty |= 1 << nr;
277 return &ctxt->_regs[nr];
280 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
283 return reg_write(ctxt, nr);
286 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
290 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
291 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
294 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
296 ctxt->regs_dirty = 0;
297 ctxt->regs_valid = 0;
301 * These EFLAGS bits are restored from saved value during emulation, and
302 * any changes are written back to the saved value after emulation.
304 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
305 X86_EFLAGS_PF|X86_EFLAGS_CF)
313 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
315 #define FOP_FUNC(name) \
316 ".align " __stringify(FASTOP_SIZE) " \n\t" \
317 ".type " name ", @function \n\t" \
320 #define FOP_RET "ret \n\t"
322 #define FOP_START(op) \
323 extern void em_##op(struct fastop *fake); \
324 asm(".pushsection .text, \"ax\" \n\t" \
325 ".global em_" #op " \n\t" \
332 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
335 #define FOP1E(op, dst) \
336 FOP_FUNC(#op "_" #dst) \
337 "10: " #op " %" #dst " \n\t" FOP_RET
339 #define FOP1EEX(op, dst) \
340 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
342 #define FASTOP1(op) \
347 ON64(FOP1E(op##q, rax)) \
350 /* 1-operand, using src2 (for MUL/DIV r/m) */
351 #define FASTOP1SRC2(op, name) \
356 ON64(FOP1E(op, rcx)) \
359 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
360 #define FASTOP1SRC2EX(op, name) \
365 ON64(FOP1EEX(op, rcx)) \
368 #define FOP2E(op, dst, src) \
369 FOP_FUNC(#op "_" #dst "_" #src) \
370 #op " %" #src ", %" #dst " \n\t" FOP_RET
372 #define FASTOP2(op) \
374 FOP2E(op##b, al, dl) \
375 FOP2E(op##w, ax, dx) \
376 FOP2E(op##l, eax, edx) \
377 ON64(FOP2E(op##q, rax, rdx)) \
380 /* 2 operand, word only */
381 #define FASTOP2W(op) \
384 FOP2E(op##w, ax, dx) \
385 FOP2E(op##l, eax, edx) \
386 ON64(FOP2E(op##q, rax, rdx)) \
389 /* 2 operand, src is CL */
390 #define FASTOP2CL(op) \
392 FOP2E(op##b, al, cl) \
393 FOP2E(op##w, ax, cl) \
394 FOP2E(op##l, eax, cl) \
395 ON64(FOP2E(op##q, rax, cl)) \
398 /* 2 operand, src and dest are reversed */
399 #define FASTOP2R(op, name) \
401 FOP2E(op##b, dl, al) \
402 FOP2E(op##w, dx, ax) \
403 FOP2E(op##l, edx, eax) \
404 ON64(FOP2E(op##q, rdx, rax)) \
407 #define FOP3E(op, dst, src, src2) \
408 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
409 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
411 /* 3-operand, word-only, src2=cl */
412 #define FASTOP3WCL(op) \
415 FOP3E(op##w, ax, dx, cl) \
416 FOP3E(op##l, eax, edx, cl) \
417 ON64(FOP3E(op##q, rax, rdx, cl)) \
420 /* Special case for SETcc - 1 instruction per cc */
421 #define FOP_SETCC(op) \
423 ".type " #op ", @function \n\t" \
428 asm(".global kvm_fastop_exception \n"
429 "kvm_fastop_exception: xor %esi, %esi; ret");
450 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
454 * XXX: inoutclob user must know where the argument is being expanded.
455 * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
457 #define asm_safe(insn, inoutclob...) \
461 asm volatile("1:" insn "\n" \
463 ".pushsection .fixup, \"ax\"\n" \
464 "3: movl $1, %[_fault]\n" \
467 _ASM_EXTABLE(1b, 3b) \
468 : [_fault] "+qm"(_fault) inoutclob ); \
470 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
473 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
474 enum x86_intercept intercept,
475 enum x86_intercept_stage stage)
477 struct x86_instruction_info info = {
478 .intercept = intercept,
479 .rep_prefix = ctxt->rep_prefix,
480 .modrm_mod = ctxt->modrm_mod,
481 .modrm_reg = ctxt->modrm_reg,
482 .modrm_rm = ctxt->modrm_rm,
483 .src_val = ctxt->src.val64,
484 .dst_val = ctxt->dst.val64,
485 .src_bytes = ctxt->src.bytes,
486 .dst_bytes = ctxt->dst.bytes,
487 .ad_bytes = ctxt->ad_bytes,
488 .next_rip = ctxt->eip,
491 return ctxt->ops->intercept(ctxt, &info, stage);
494 static void assign_masked(ulong *dest, ulong src, ulong mask)
496 *dest = (*dest & ~mask) | (src & mask);
499 static void assign_register(unsigned long *reg, u64 val, int bytes)
501 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
504 *(u8 *)reg = (u8)val;
507 *(u16 *)reg = (u16)val;
511 break; /* 64b: zero-extend */
518 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
520 return (1UL << (ctxt->ad_bytes << 3)) - 1;
523 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
526 struct desc_struct ss;
528 if (ctxt->mode == X86EMUL_MODE_PROT64)
530 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
531 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
534 static int stack_size(struct x86_emulate_ctxt *ctxt)
536 return (__fls(stack_mask(ctxt)) + 1) >> 3;
539 /* Access/update address held in a register, based on addressing mode. */
540 static inline unsigned long
541 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
543 if (ctxt->ad_bytes == sizeof(unsigned long))
546 return reg & ad_mask(ctxt);
549 static inline unsigned long
550 register_address(struct x86_emulate_ctxt *ctxt, int reg)
552 return address_mask(ctxt, reg_read(ctxt, reg));
555 static void masked_increment(ulong *reg, ulong mask, int inc)
557 assign_masked(reg, *reg + inc, mask);
561 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
563 ulong *preg = reg_rmw(ctxt, reg);
565 assign_register(preg, *preg + inc, ctxt->ad_bytes);
568 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
570 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
573 static u32 desc_limit_scaled(struct desc_struct *desc)
575 u32 limit = get_desc_limit(desc);
577 return desc->g ? (limit << 12) | 0xfff : limit;
580 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
582 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
585 return ctxt->ops->get_cached_segment_base(ctxt, seg);
588 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
589 u32 error, bool valid)
592 ctxt->exception.vector = vec;
593 ctxt->exception.error_code = error;
594 ctxt->exception.error_code_valid = valid;
595 return X86EMUL_PROPAGATE_FAULT;
598 static int emulate_db(struct x86_emulate_ctxt *ctxt)
600 return emulate_exception(ctxt, DB_VECTOR, 0, false);
603 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
605 return emulate_exception(ctxt, GP_VECTOR, err, true);
608 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
610 return emulate_exception(ctxt, SS_VECTOR, err, true);
613 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
615 return emulate_exception(ctxt, UD_VECTOR, 0, false);
618 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
620 return emulate_exception(ctxt, TS_VECTOR, err, true);
623 static int emulate_de(struct x86_emulate_ctxt *ctxt)
625 return emulate_exception(ctxt, DE_VECTOR, 0, false);
628 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
630 return emulate_exception(ctxt, NM_VECTOR, 0, false);
633 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
636 struct desc_struct desc;
638 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
642 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
647 struct desc_struct desc;
649 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
650 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
654 * x86 defines three classes of vector instructions: explicitly
655 * aligned, explicitly unaligned, and the rest, which change behaviour
656 * depending on whether they're AVX encoded or not.
658 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
659 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
660 * 512 bytes of data must be aligned to a 16 byte boundary.
662 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
664 u64 alignment = ctxt->d & AlignMask;
666 if (likely(size < 16))
681 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
682 struct segmented_address addr,
683 unsigned *max_size, unsigned size,
684 bool write, bool fetch,
685 enum x86emul_mode mode, ulong *linear)
687 struct desc_struct desc;
694 la = seg_base(ctxt, addr.seg) + addr.ea;
697 case X86EMUL_MODE_PROT64:
699 va_bits = ctxt_virt_addr_bits(ctxt);
700 if (get_canonical(la, va_bits) != la)
703 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
704 if (size > *max_size)
708 *linear = la = (u32)la;
709 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
713 /* code segment in protected mode or read-only data segment */
714 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
715 || !(desc.type & 2)) && write)
717 /* unreadable code segment */
718 if (!fetch && (desc.type & 8) && !(desc.type & 2))
720 lim = desc_limit_scaled(&desc);
721 if (!(desc.type & 8) && (desc.type & 4)) {
722 /* expand-down segment */
725 lim = desc.d ? 0xffffffff : 0xffff;
729 if (lim == 0xffffffff)
732 *max_size = (u64)lim + 1 - addr.ea;
733 if (size > *max_size)
738 if (la & (insn_alignment(ctxt, size) - 1))
739 return emulate_gp(ctxt, 0);
740 return X86EMUL_CONTINUE;
742 if (addr.seg == VCPU_SREG_SS)
743 return emulate_ss(ctxt, 0);
745 return emulate_gp(ctxt, 0);
748 static int linearize(struct x86_emulate_ctxt *ctxt,
749 struct segmented_address addr,
750 unsigned size, bool write,
754 return __linearize(ctxt, addr, &max_size, size, write, false,
758 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
759 enum x86emul_mode mode)
764 struct segmented_address addr = { .seg = VCPU_SREG_CS,
767 if (ctxt->op_bytes != sizeof(unsigned long))
768 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
769 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
770 if (rc == X86EMUL_CONTINUE)
771 ctxt->_eip = addr.ea;
775 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
777 return assign_eip(ctxt, dst, ctxt->mode);
780 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
781 const struct desc_struct *cs_desc)
783 enum x86emul_mode mode = ctxt->mode;
787 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
791 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
793 mode = X86EMUL_MODE_PROT64;
795 mode = X86EMUL_MODE_PROT32; /* temporary value */
798 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
799 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
800 rc = assign_eip(ctxt, dst, mode);
801 if (rc == X86EMUL_CONTINUE)
806 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
808 return assign_eip_near(ctxt, ctxt->_eip + rel);
811 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
812 struct segmented_address addr,
819 rc = linearize(ctxt, addr, size, false, &linear);
820 if (rc != X86EMUL_CONTINUE)
822 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
825 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
826 struct segmented_address addr,
833 rc = linearize(ctxt, addr, size, true, &linear);
834 if (rc != X86EMUL_CONTINUE)
836 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
840 * Prefetch the remaining bytes of the instruction without crossing page
841 * boundary if they are not in fetch_cache yet.
843 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
846 unsigned size, max_size;
847 unsigned long linear;
848 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
849 struct segmented_address addr = { .seg = VCPU_SREG_CS,
850 .ea = ctxt->eip + cur_size };
853 * We do not know exactly how many bytes will be needed, and
854 * __linearize is expensive, so fetch as much as possible. We
855 * just have to avoid going beyond the 15 byte limit, the end
856 * of the segment, or the end of the page.
858 * __linearize is called with size 0 so that it does not do any
859 * boundary check itself. Instead, we use max_size to check
862 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
864 if (unlikely(rc != X86EMUL_CONTINUE))
867 size = min_t(unsigned, 15UL ^ cur_size, max_size);
868 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
871 * One instruction can only straddle two pages,
872 * and one has been loaded at the beginning of
873 * x86_decode_insn. So, if not enough bytes
874 * still, we must have hit the 15-byte boundary.
876 if (unlikely(size < op_size))
877 return emulate_gp(ctxt, 0);
879 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
880 size, &ctxt->exception);
881 if (unlikely(rc != X86EMUL_CONTINUE))
883 ctxt->fetch.end += size;
884 return X86EMUL_CONTINUE;
887 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
890 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
892 if (unlikely(done_size < size))
893 return __do_insn_fetch_bytes(ctxt, size - done_size);
895 return X86EMUL_CONTINUE;
898 /* Fetch next part of the instruction being emulated. */
899 #define insn_fetch(_type, _ctxt) \
902 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
903 if (rc != X86EMUL_CONTINUE) \
905 ctxt->_eip += sizeof(_type); \
906 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
907 ctxt->fetch.ptr += sizeof(_type); \
911 #define insn_fetch_arr(_arr, _size, _ctxt) \
913 rc = do_insn_fetch_bytes(_ctxt, _size); \
914 if (rc != X86EMUL_CONTINUE) \
916 ctxt->_eip += (_size); \
917 memcpy(_arr, ctxt->fetch.ptr, _size); \
918 ctxt->fetch.ptr += (_size); \
922 * Given the 'reg' portion of a ModRM byte, and a register block, return a
923 * pointer into the block that addresses the relevant register.
924 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
926 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
930 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
932 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
933 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
935 p = reg_rmw(ctxt, modrm_reg);
939 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
940 struct segmented_address addr,
941 u16 *size, unsigned long *address, int op_bytes)
948 rc = segmented_read_std(ctxt, addr, size, 2);
949 if (rc != X86EMUL_CONTINUE)
952 rc = segmented_read_std(ctxt, addr, address, op_bytes);
966 FASTOP1SRC2(mul, mul_ex);
967 FASTOP1SRC2(imul, imul_ex);
968 FASTOP1SRC2EX(div, div_ex);
969 FASTOP1SRC2EX(idiv, idiv_ex);
998 FASTOP2R(cmp, cmp_r);
1000 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1002 /* If src is zero, do not writeback, but update flags */
1003 if (ctxt->src.val == 0)
1004 ctxt->dst.type = OP_NONE;
1005 return fastop(ctxt, em_bsf);
1008 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1010 /* If src is zero, do not writeback, but update flags */
1011 if (ctxt->src.val == 0)
1012 ctxt->dst.type = OP_NONE;
1013 return fastop(ctxt, em_bsr);
1016 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1019 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1021 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1022 asm("push %[flags]; popf; call *%[fastop]"
1023 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
1027 static void fetch_register_operand(struct operand *op)
1029 switch (op->bytes) {
1031 op->val = *(u8 *)op->addr.reg;
1034 op->val = *(u16 *)op->addr.reg;
1037 op->val = *(u32 *)op->addr.reg;
1040 op->val = *(u64 *)op->addr.reg;
1045 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1047 ctxt->ops->get_fpu(ctxt);
1049 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1050 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1051 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1052 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1053 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1054 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1055 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1056 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1057 #ifdef CONFIG_X86_64
1058 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1059 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1060 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1061 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1062 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1063 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1064 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1065 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1069 ctxt->ops->put_fpu(ctxt);
1072 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1075 ctxt->ops->get_fpu(ctxt);
1077 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1078 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1079 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1080 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1081 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1082 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1083 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1084 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1085 #ifdef CONFIG_X86_64
1086 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1087 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1088 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1089 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1090 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1091 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1092 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1093 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1097 ctxt->ops->put_fpu(ctxt);
1100 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1102 ctxt->ops->get_fpu(ctxt);
1104 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1105 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1106 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1107 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1108 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1109 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1110 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1111 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1114 ctxt->ops->put_fpu(ctxt);
1117 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1119 ctxt->ops->get_fpu(ctxt);
1121 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1122 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1123 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1124 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1125 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1126 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1127 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1128 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1131 ctxt->ops->put_fpu(ctxt);
1134 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1136 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1137 return emulate_nm(ctxt);
1139 ctxt->ops->get_fpu(ctxt);
1140 asm volatile("fninit");
1141 ctxt->ops->put_fpu(ctxt);
1142 return X86EMUL_CONTINUE;
1145 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1149 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1150 return emulate_nm(ctxt);
1152 ctxt->ops->get_fpu(ctxt);
1153 asm volatile("fnstcw %0": "+m"(fcw));
1154 ctxt->ops->put_fpu(ctxt);
1156 ctxt->dst.val = fcw;
1158 return X86EMUL_CONTINUE;
1161 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1165 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1166 return emulate_nm(ctxt);
1168 ctxt->ops->get_fpu(ctxt);
1169 asm volatile("fnstsw %0": "+m"(fsw));
1170 ctxt->ops->put_fpu(ctxt);
1172 ctxt->dst.val = fsw;
1174 return X86EMUL_CONTINUE;
1177 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1180 unsigned reg = ctxt->modrm_reg;
1182 if (!(ctxt->d & ModRM))
1183 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1185 if (ctxt->d & Sse) {
1189 read_sse_reg(ctxt, &op->vec_val, reg);
1192 if (ctxt->d & Mmx) {
1201 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1202 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1204 fetch_register_operand(op);
1205 op->orig_val = op->val;
1208 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1210 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1211 ctxt->modrm_seg = VCPU_SREG_SS;
1214 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1218 int index_reg, base_reg, scale;
1219 int rc = X86EMUL_CONTINUE;
1222 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1223 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1224 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1226 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1227 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1228 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1229 ctxt->modrm_seg = VCPU_SREG_DS;
1231 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1233 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1234 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1236 if (ctxt->d & Sse) {
1239 op->addr.xmm = ctxt->modrm_rm;
1240 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1243 if (ctxt->d & Mmx) {
1246 op->addr.mm = ctxt->modrm_rm & 7;
1249 fetch_register_operand(op);
1255 if (ctxt->ad_bytes == 2) {
1256 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1257 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1258 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1259 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1261 /* 16-bit ModR/M decode. */
1262 switch (ctxt->modrm_mod) {
1264 if (ctxt->modrm_rm == 6)
1265 modrm_ea += insn_fetch(u16, ctxt);
1268 modrm_ea += insn_fetch(s8, ctxt);
1271 modrm_ea += insn_fetch(u16, ctxt);
1274 switch (ctxt->modrm_rm) {
1276 modrm_ea += bx + si;
1279 modrm_ea += bx + di;
1282 modrm_ea += bp + si;
1285 modrm_ea += bp + di;
1294 if (ctxt->modrm_mod != 0)
1301 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1302 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1303 ctxt->modrm_seg = VCPU_SREG_SS;
1304 modrm_ea = (u16)modrm_ea;
1306 /* 32/64-bit ModR/M decode. */
1307 if ((ctxt->modrm_rm & 7) == 4) {
1308 sib = insn_fetch(u8, ctxt);
1309 index_reg |= (sib >> 3) & 7;
1310 base_reg |= sib & 7;
1313 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1314 modrm_ea += insn_fetch(s32, ctxt);
1316 modrm_ea += reg_read(ctxt, base_reg);
1317 adjust_modrm_seg(ctxt, base_reg);
1318 /* Increment ESP on POP [ESP] */
1319 if ((ctxt->d & IncSP) &&
1320 base_reg == VCPU_REGS_RSP)
1321 modrm_ea += ctxt->op_bytes;
1324 modrm_ea += reg_read(ctxt, index_reg) << scale;
1325 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1326 modrm_ea += insn_fetch(s32, ctxt);
1327 if (ctxt->mode == X86EMUL_MODE_PROT64)
1328 ctxt->rip_relative = 1;
1330 base_reg = ctxt->modrm_rm;
1331 modrm_ea += reg_read(ctxt, base_reg);
1332 adjust_modrm_seg(ctxt, base_reg);
1334 switch (ctxt->modrm_mod) {
1336 modrm_ea += insn_fetch(s8, ctxt);
1339 modrm_ea += insn_fetch(s32, ctxt);
1343 op->addr.mem.ea = modrm_ea;
1344 if (ctxt->ad_bytes != 8)
1345 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1351 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1354 int rc = X86EMUL_CONTINUE;
1357 switch (ctxt->ad_bytes) {
1359 op->addr.mem.ea = insn_fetch(u16, ctxt);
1362 op->addr.mem.ea = insn_fetch(u32, ctxt);
1365 op->addr.mem.ea = insn_fetch(u64, ctxt);
1372 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1376 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1377 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1379 if (ctxt->src.bytes == 2)
1380 sv = (s16)ctxt->src.val & (s16)mask;
1381 else if (ctxt->src.bytes == 4)
1382 sv = (s32)ctxt->src.val & (s32)mask;
1384 sv = (s64)ctxt->src.val & (s64)mask;
1386 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1387 ctxt->dst.addr.mem.ea + (sv >> 3));
1390 /* only subword offset */
1391 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1394 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1395 unsigned long addr, void *dest, unsigned size)
1398 struct read_cache *mc = &ctxt->mem_read;
1400 if (mc->pos < mc->end)
1403 WARN_ON((mc->end + size) >= sizeof(mc->data));
1405 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1407 if (rc != X86EMUL_CONTINUE)
1413 memcpy(dest, mc->data + mc->pos, size);
1415 return X86EMUL_CONTINUE;
1418 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1419 struct segmented_address addr,
1426 rc = linearize(ctxt, addr, size, false, &linear);
1427 if (rc != X86EMUL_CONTINUE)
1429 return read_emulated(ctxt, linear, data, size);
1432 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1433 struct segmented_address addr,
1440 rc = linearize(ctxt, addr, size, true, &linear);
1441 if (rc != X86EMUL_CONTINUE)
1443 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1447 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1448 struct segmented_address addr,
1449 const void *orig_data, const void *data,
1455 rc = linearize(ctxt, addr, size, true, &linear);
1456 if (rc != X86EMUL_CONTINUE)
1458 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1459 size, &ctxt->exception);
1462 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1463 unsigned int size, unsigned short port,
1466 struct read_cache *rc = &ctxt->io_read;
1468 if (rc->pos == rc->end) { /* refill pio read ahead */
1469 unsigned int in_page, n;
1470 unsigned int count = ctxt->rep_prefix ?
1471 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1472 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1473 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1474 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1475 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1478 rc->pos = rc->end = 0;
1479 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1484 if (ctxt->rep_prefix && (ctxt->d & String) &&
1485 !(ctxt->eflags & X86_EFLAGS_DF)) {
1486 ctxt->dst.data = rc->data + rc->pos;
1487 ctxt->dst.type = OP_MEM_STR;
1488 ctxt->dst.count = (rc->end - rc->pos) / size;
1491 memcpy(dest, rc->data + rc->pos, size);
1497 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1498 u16 index, struct desc_struct *desc)
1503 ctxt->ops->get_idt(ctxt, &dt);
1505 if (dt.size < index * 8 + 7)
1506 return emulate_gp(ctxt, index << 3 | 0x2);
1508 addr = dt.address + index * 8;
1509 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1513 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1514 u16 selector, struct desc_ptr *dt)
1516 const struct x86_emulate_ops *ops = ctxt->ops;
1519 if (selector & 1 << 2) {
1520 struct desc_struct desc;
1523 memset (dt, 0, sizeof *dt);
1524 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1528 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1529 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1531 ops->get_gdt(ctxt, dt);
1534 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1535 u16 selector, ulong *desc_addr_p)
1538 u16 index = selector >> 3;
1541 get_descriptor_table_ptr(ctxt, selector, &dt);
1543 if (dt.size < index * 8 + 7)
1544 return emulate_gp(ctxt, selector & 0xfffc);
1546 addr = dt.address + index * 8;
1548 #ifdef CONFIG_X86_64
1549 if (addr >> 32 != 0) {
1552 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1553 if (!(efer & EFER_LMA))
1558 *desc_addr_p = addr;
1559 return X86EMUL_CONTINUE;
1562 /* allowed just for 8 bytes segments */
1563 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1564 u16 selector, struct desc_struct *desc,
1569 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1570 if (rc != X86EMUL_CONTINUE)
1573 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1577 /* allowed just for 8 bytes segments */
1578 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1579 u16 selector, struct desc_struct *desc)
1584 rc = get_descriptor_ptr(ctxt, selector, &addr);
1585 if (rc != X86EMUL_CONTINUE)
1588 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1592 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1593 u16 selector, int seg, u8 cpl,
1594 enum x86_transfer_type transfer,
1595 struct desc_struct *desc)
1597 struct desc_struct seg_desc, old_desc;
1599 unsigned err_vec = GP_VECTOR;
1601 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1607 memset(&seg_desc, 0, sizeof seg_desc);
1609 if (ctxt->mode == X86EMUL_MODE_REAL) {
1610 /* set real mode segment descriptor (keep limit etc. for
1612 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1613 set_desc_base(&seg_desc, selector << 4);
1615 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1616 /* VM86 needs a clean new segment descriptor */
1617 set_desc_base(&seg_desc, selector << 4);
1618 set_desc_limit(&seg_desc, 0xffff);
1628 /* TR should be in GDT only */
1629 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1632 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1633 if (null_selector) {
1634 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1637 if (seg == VCPU_SREG_SS) {
1638 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1642 * ctxt->ops->set_segment expects the CPL to be in
1643 * SS.DPL, so fake an expand-up 32-bit data segment.
1653 /* Skip all following checks */
1657 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1658 if (ret != X86EMUL_CONTINUE)
1661 err_code = selector & 0xfffc;
1662 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1665 /* can't load system descriptor into segment selector */
1666 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1667 if (transfer == X86_TRANSFER_CALL_JMP)
1668 return X86EMUL_UNHANDLEABLE;
1673 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1682 * segment is not a writable data segment or segment
1683 * selector's RPL != CPL or segment selector's RPL != CPL
1685 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1689 if (!(seg_desc.type & 8))
1692 if (seg_desc.type & 4) {
1698 if (rpl > cpl || dpl != cpl)
1701 /* in long-mode d/b must be clear if l is set */
1702 if (seg_desc.d && seg_desc.l) {
1705 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1706 if (efer & EFER_LMA)
1710 /* CS(RPL) <- CPL */
1711 selector = (selector & 0xfffc) | cpl;
1714 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1716 old_desc = seg_desc;
1717 seg_desc.type |= 2; /* busy */
1718 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1719 sizeof(seg_desc), &ctxt->exception);
1720 if (ret != X86EMUL_CONTINUE)
1723 case VCPU_SREG_LDTR:
1724 if (seg_desc.s || seg_desc.type != 2)
1727 default: /* DS, ES, FS, or GS */
1729 * segment is not a data or readable code segment or
1730 * ((segment is a data or nonconforming code segment)
1731 * and (both RPL and CPL > DPL))
1733 if ((seg_desc.type & 0xa) == 0x8 ||
1734 (((seg_desc.type & 0xc) != 0xc) &&
1735 (rpl > dpl && cpl > dpl)))
1741 /* mark segment as accessed */
1742 if (!(seg_desc.type & 1)) {
1744 ret = write_segment_descriptor(ctxt, selector,
1746 if (ret != X86EMUL_CONTINUE)
1749 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1750 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1751 sizeof(base3), &ctxt->exception);
1752 if (ret != X86EMUL_CONTINUE)
1754 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1755 ((u64)base3 << 32), ctxt))
1756 return emulate_gp(ctxt, 0);
1759 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1762 return X86EMUL_CONTINUE;
1764 return emulate_exception(ctxt, err_vec, err_code, true);
1767 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1768 u16 selector, int seg)
1770 u8 cpl = ctxt->ops->cpl(ctxt);
1773 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1774 * they can load it at CPL<3 (Intel's manual says only LSS can,
1777 * However, the Intel manual says that putting IST=1/DPL=3 in
1778 * an interrupt gate will result in SS=3 (the AMD manual instead
1779 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1780 * and only forbid it here.
1782 if (seg == VCPU_SREG_SS && selector == 3 &&
1783 ctxt->mode == X86EMUL_MODE_PROT64)
1784 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1786 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1787 X86_TRANSFER_NONE, NULL);
1790 static void write_register_operand(struct operand *op)
1792 return assign_register(op->addr.reg, op->val, op->bytes);
1795 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1799 write_register_operand(op);
1802 if (ctxt->lock_prefix)
1803 return segmented_cmpxchg(ctxt,
1809 return segmented_write(ctxt,
1815 return segmented_write(ctxt,
1818 op->bytes * op->count);
1821 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1824 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1832 return X86EMUL_CONTINUE;
1835 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1837 struct segmented_address addr;
1839 rsp_increment(ctxt, -bytes);
1840 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1841 addr.seg = VCPU_SREG_SS;
1843 return segmented_write(ctxt, addr, data, bytes);
1846 static int em_push(struct x86_emulate_ctxt *ctxt)
1848 /* Disable writeback. */
1849 ctxt->dst.type = OP_NONE;
1850 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1853 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1854 void *dest, int len)
1857 struct segmented_address addr;
1859 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1860 addr.seg = VCPU_SREG_SS;
1861 rc = segmented_read(ctxt, addr, dest, len);
1862 if (rc != X86EMUL_CONTINUE)
1865 rsp_increment(ctxt, len);
1869 static int em_pop(struct x86_emulate_ctxt *ctxt)
1871 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1874 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1875 void *dest, int len)
1878 unsigned long val, change_mask;
1879 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1880 int cpl = ctxt->ops->cpl(ctxt);
1882 rc = emulate_pop(ctxt, &val, len);
1883 if (rc != X86EMUL_CONTINUE)
1886 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1887 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1888 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1889 X86_EFLAGS_AC | X86_EFLAGS_ID;
1891 switch(ctxt->mode) {
1892 case X86EMUL_MODE_PROT64:
1893 case X86EMUL_MODE_PROT32:
1894 case X86EMUL_MODE_PROT16:
1896 change_mask |= X86_EFLAGS_IOPL;
1898 change_mask |= X86_EFLAGS_IF;
1900 case X86EMUL_MODE_VM86:
1902 return emulate_gp(ctxt, 0);
1903 change_mask |= X86_EFLAGS_IF;
1905 default: /* real mode */
1906 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1910 *(unsigned long *)dest =
1911 (ctxt->eflags & ~change_mask) | (val & change_mask);
1916 static int em_popf(struct x86_emulate_ctxt *ctxt)
1918 ctxt->dst.type = OP_REG;
1919 ctxt->dst.addr.reg = &ctxt->eflags;
1920 ctxt->dst.bytes = ctxt->op_bytes;
1921 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1924 static int em_enter(struct x86_emulate_ctxt *ctxt)
1927 unsigned frame_size = ctxt->src.val;
1928 unsigned nesting_level = ctxt->src2.val & 31;
1932 return X86EMUL_UNHANDLEABLE;
1934 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1935 rc = push(ctxt, &rbp, stack_size(ctxt));
1936 if (rc != X86EMUL_CONTINUE)
1938 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1940 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1941 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1943 return X86EMUL_CONTINUE;
1946 static int em_leave(struct x86_emulate_ctxt *ctxt)
1948 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1950 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1953 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1955 int seg = ctxt->src2.val;
1957 ctxt->src.val = get_segment_selector(ctxt, seg);
1958 if (ctxt->op_bytes == 4) {
1959 rsp_increment(ctxt, -2);
1963 return em_push(ctxt);
1966 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1968 int seg = ctxt->src2.val;
1969 unsigned long selector;
1972 rc = emulate_pop(ctxt, &selector, 2);
1973 if (rc != X86EMUL_CONTINUE)
1976 if (ctxt->modrm_reg == VCPU_SREG_SS)
1977 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1978 if (ctxt->op_bytes > 2)
1979 rsp_increment(ctxt, ctxt->op_bytes - 2);
1981 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1985 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1987 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1988 int rc = X86EMUL_CONTINUE;
1989 int reg = VCPU_REGS_RAX;
1991 while (reg <= VCPU_REGS_RDI) {
1992 (reg == VCPU_REGS_RSP) ?
1993 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1996 if (rc != X86EMUL_CONTINUE)
2005 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2007 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2008 return em_push(ctxt);
2011 static int em_popa(struct x86_emulate_ctxt *ctxt)
2013 int rc = X86EMUL_CONTINUE;
2014 int reg = VCPU_REGS_RDI;
2017 while (reg >= VCPU_REGS_RAX) {
2018 if (reg == VCPU_REGS_RSP) {
2019 rsp_increment(ctxt, ctxt->op_bytes);
2023 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2024 if (rc != X86EMUL_CONTINUE)
2026 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2032 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2034 const struct x86_emulate_ops *ops = ctxt->ops;
2041 /* TODO: Add limit checks */
2042 ctxt->src.val = ctxt->eflags;
2044 if (rc != X86EMUL_CONTINUE)
2047 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2049 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2051 if (rc != X86EMUL_CONTINUE)
2054 ctxt->src.val = ctxt->_eip;
2056 if (rc != X86EMUL_CONTINUE)
2059 ops->get_idt(ctxt, &dt);
2061 eip_addr = dt.address + (irq << 2);
2062 cs_addr = dt.address + (irq << 2) + 2;
2064 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
2065 if (rc != X86EMUL_CONTINUE)
2068 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
2069 if (rc != X86EMUL_CONTINUE)
2072 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2073 if (rc != X86EMUL_CONTINUE)
2081 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2085 invalidate_registers(ctxt);
2086 rc = __emulate_int_real(ctxt, irq);
2087 if (rc == X86EMUL_CONTINUE)
2088 writeback_registers(ctxt);
2092 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2094 switch(ctxt->mode) {
2095 case X86EMUL_MODE_REAL:
2096 return __emulate_int_real(ctxt, irq);
2097 case X86EMUL_MODE_VM86:
2098 case X86EMUL_MODE_PROT16:
2099 case X86EMUL_MODE_PROT32:
2100 case X86EMUL_MODE_PROT64:
2102 /* Protected mode interrupts unimplemented yet */
2103 return X86EMUL_UNHANDLEABLE;
2107 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2109 int rc = X86EMUL_CONTINUE;
2110 unsigned long temp_eip = 0;
2111 unsigned long temp_eflags = 0;
2112 unsigned long cs = 0;
2113 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2114 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2115 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2116 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2117 X86_EFLAGS_AC | X86_EFLAGS_ID |
2119 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2122 /* TODO: Add stack limit check */
2124 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2126 if (rc != X86EMUL_CONTINUE)
2129 if (temp_eip & ~0xffff)
2130 return emulate_gp(ctxt, 0);
2132 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2134 if (rc != X86EMUL_CONTINUE)
2137 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2139 if (rc != X86EMUL_CONTINUE)
2142 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2144 if (rc != X86EMUL_CONTINUE)
2147 ctxt->_eip = temp_eip;
2149 if (ctxt->op_bytes == 4)
2150 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2151 else if (ctxt->op_bytes == 2) {
2152 ctxt->eflags &= ~0xffff;
2153 ctxt->eflags |= temp_eflags;
2156 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2157 ctxt->eflags |= X86_EFLAGS_FIXED;
2158 ctxt->ops->set_nmi_mask(ctxt, false);
2163 static int em_iret(struct x86_emulate_ctxt *ctxt)
2165 switch(ctxt->mode) {
2166 case X86EMUL_MODE_REAL:
2167 return emulate_iret_real(ctxt);
2168 case X86EMUL_MODE_VM86:
2169 case X86EMUL_MODE_PROT16:
2170 case X86EMUL_MODE_PROT32:
2171 case X86EMUL_MODE_PROT64:
2173 /* iret from protected mode unimplemented yet */
2174 return X86EMUL_UNHANDLEABLE;
2178 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2182 struct desc_struct new_desc;
2183 u8 cpl = ctxt->ops->cpl(ctxt);
2185 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2187 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2188 X86_TRANSFER_CALL_JMP,
2190 if (rc != X86EMUL_CONTINUE)
2193 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2194 /* Error handling is not implemented. */
2195 if (rc != X86EMUL_CONTINUE)
2196 return X86EMUL_UNHANDLEABLE;
2201 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2203 return assign_eip_near(ctxt, ctxt->src.val);
2206 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2211 old_eip = ctxt->_eip;
2212 rc = assign_eip_near(ctxt, ctxt->src.val);
2213 if (rc != X86EMUL_CONTINUE)
2215 ctxt->src.val = old_eip;
2220 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2222 u64 old = ctxt->dst.orig_val64;
2224 if (ctxt->dst.bytes == 16)
2225 return X86EMUL_UNHANDLEABLE;
2227 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2228 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2229 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2230 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2231 ctxt->eflags &= ~X86_EFLAGS_ZF;
2233 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2234 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2236 ctxt->eflags |= X86_EFLAGS_ZF;
2238 return X86EMUL_CONTINUE;
2241 static int em_ret(struct x86_emulate_ctxt *ctxt)
2246 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2247 if (rc != X86EMUL_CONTINUE)
2250 return assign_eip_near(ctxt, eip);
2253 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2256 unsigned long eip, cs;
2257 int cpl = ctxt->ops->cpl(ctxt);
2258 struct desc_struct new_desc;
2260 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2261 if (rc != X86EMUL_CONTINUE)
2263 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2264 if (rc != X86EMUL_CONTINUE)
2266 /* Outer-privilege level return is not implemented */
2267 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2268 return X86EMUL_UNHANDLEABLE;
2269 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2272 if (rc != X86EMUL_CONTINUE)
2274 rc = assign_eip_far(ctxt, eip, &new_desc);
2275 /* Error handling is not implemented. */
2276 if (rc != X86EMUL_CONTINUE)
2277 return X86EMUL_UNHANDLEABLE;
2282 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2286 rc = em_ret_far(ctxt);
2287 if (rc != X86EMUL_CONTINUE)
2289 rsp_increment(ctxt, ctxt->src.val);
2290 return X86EMUL_CONTINUE;
2293 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2295 /* Save real source value, then compare EAX against destination. */
2296 ctxt->dst.orig_val = ctxt->dst.val;
2297 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2298 ctxt->src.orig_val = ctxt->src.val;
2299 ctxt->src.val = ctxt->dst.orig_val;
2300 fastop(ctxt, em_cmp);
2302 if (ctxt->eflags & X86_EFLAGS_ZF) {
2303 /* Success: write back to memory; no update of EAX */
2304 ctxt->src.type = OP_NONE;
2305 ctxt->dst.val = ctxt->src.orig_val;
2307 /* Failure: write the value we saw to EAX. */
2308 ctxt->src.type = OP_REG;
2309 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2310 ctxt->src.val = ctxt->dst.orig_val;
2311 /* Create write-cycle to dest by writing the same value */
2312 ctxt->dst.val = ctxt->dst.orig_val;
2314 return X86EMUL_CONTINUE;
2317 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2319 int seg = ctxt->src2.val;
2323 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2325 rc = load_segment_descriptor(ctxt, sel, seg);
2326 if (rc != X86EMUL_CONTINUE)
2329 ctxt->dst.val = ctxt->src.val;
2333 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2335 u32 eax, ebx, ecx, edx;
2339 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2340 return edx & bit(X86_FEATURE_LM);
2343 #define GET_SMSTATE(type, smbase, offset) \
2346 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2348 if (r != X86EMUL_CONTINUE) \
2349 return X86EMUL_UNHANDLEABLE; \
2353 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2355 desc->g = (flags >> 23) & 1;
2356 desc->d = (flags >> 22) & 1;
2357 desc->l = (flags >> 21) & 1;
2358 desc->avl = (flags >> 20) & 1;
2359 desc->p = (flags >> 15) & 1;
2360 desc->dpl = (flags >> 13) & 3;
2361 desc->s = (flags >> 12) & 1;
2362 desc->type = (flags >> 8) & 15;
2365 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2367 struct desc_struct desc;
2371 selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2374 offset = 0x7f84 + n * 12;
2376 offset = 0x7f2c + (n - 3) * 12;
2378 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2379 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2380 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2381 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2382 return X86EMUL_CONTINUE;
2385 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2387 struct desc_struct desc;
2392 offset = 0x7e00 + n * 16;
2394 selector = GET_SMSTATE(u16, smbase, offset);
2395 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2396 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2397 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2398 base3 = GET_SMSTATE(u32, smbase, offset + 12);
2400 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2401 return X86EMUL_CONTINUE;
2404 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2410 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2411 * Then enable protected mode. However, PCID cannot be enabled
2412 * if EFER.LMA=0, so set it separately.
2414 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2416 return X86EMUL_UNHANDLEABLE;
2418 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2420 return X86EMUL_UNHANDLEABLE;
2422 if (cr4 & X86_CR4_PCIDE) {
2423 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2425 return X86EMUL_UNHANDLEABLE;
2428 return X86EMUL_CONTINUE;
2431 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2433 struct desc_struct desc;
2439 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
2440 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
2441 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2442 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
2444 for (i = 0; i < 8; i++)
2445 *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2447 val = GET_SMSTATE(u32, smbase, 0x7fcc);
2448 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2449 val = GET_SMSTATE(u32, smbase, 0x7fc8);
2450 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2452 selector = GET_SMSTATE(u32, smbase, 0x7fc4);
2453 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
2454 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
2455 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
2456 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2458 selector = GET_SMSTATE(u32, smbase, 0x7fc0);
2459 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
2460 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
2461 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
2462 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2464 dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
2465 dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
2466 ctxt->ops->set_gdt(ctxt, &dt);
2468 dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
2469 dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
2470 ctxt->ops->set_idt(ctxt, &dt);
2472 for (i = 0; i < 6; i++) {
2473 int r = rsm_load_seg_32(ctxt, smbase, i);
2474 if (r != X86EMUL_CONTINUE)
2478 cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2480 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2482 return rsm_enter_protected_mode(ctxt, cr0, cr4);
2485 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2487 struct desc_struct desc;
2494 for (i = 0; i < 16; i++)
2495 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2497 ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
2498 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2500 val = GET_SMSTATE(u32, smbase, 0x7f68);
2501 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2502 val = GET_SMSTATE(u32, smbase, 0x7f60);
2503 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2505 cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
2506 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
2507 cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
2508 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2509 val = GET_SMSTATE(u64, smbase, 0x7ed0);
2510 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2512 selector = GET_SMSTATE(u32, smbase, 0x7e90);
2513 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2514 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
2515 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
2516 base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
2517 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2519 dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
2520 dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
2521 ctxt->ops->set_idt(ctxt, &dt);
2523 selector = GET_SMSTATE(u32, smbase, 0x7e70);
2524 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2525 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
2526 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
2527 base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
2528 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2530 dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
2531 dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
2532 ctxt->ops->set_gdt(ctxt, &dt);
2534 r = rsm_enter_protected_mode(ctxt, cr0, cr4);
2535 if (r != X86EMUL_CONTINUE)
2538 for (i = 0; i < 6; i++) {
2539 r = rsm_load_seg_64(ctxt, smbase, i);
2540 if (r != X86EMUL_CONTINUE)
2544 return X86EMUL_CONTINUE;
2547 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2549 unsigned long cr0, cr4, efer;
2553 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2554 return emulate_ud(ctxt);
2557 * Get back to real mode, to prepare a safe state in which to load
2558 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2559 * supports long mode.
2561 cr4 = ctxt->ops->get_cr(ctxt, 4);
2562 if (emulator_has_longmode(ctxt)) {
2563 struct desc_struct cs_desc;
2565 /* Zero CR4.PCIDE before CR0.PG. */
2566 if (cr4 & X86_CR4_PCIDE) {
2567 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2568 cr4 &= ~X86_CR4_PCIDE;
2571 /* A 32-bit code segment is required to clear EFER.LMA. */
2572 memset(&cs_desc, 0, sizeof(cs_desc));
2574 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2575 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2578 /* For the 64-bit case, this will clear EFER.LMA. */
2579 cr0 = ctxt->ops->get_cr(ctxt, 0);
2580 if (cr0 & X86_CR0_PE)
2581 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2583 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
2584 if (cr4 & X86_CR4_PAE)
2585 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2587 /* And finally go back to 32-bit mode. */
2589 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2591 smbase = ctxt->ops->get_smbase(ctxt);
2592 if (emulator_has_longmode(ctxt))
2593 ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2595 ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2597 if (ret != X86EMUL_CONTINUE) {
2598 /* FIXME: should triple fault */
2599 return X86EMUL_UNHANDLEABLE;
2602 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2603 ctxt->ops->set_nmi_mask(ctxt, false);
2605 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2606 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2607 return X86EMUL_CONTINUE;
2611 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2612 struct desc_struct *cs, struct desc_struct *ss)
2614 cs->l = 0; /* will be adjusted later */
2615 set_desc_base(cs, 0); /* flat segment */
2616 cs->g = 1; /* 4kb granularity */
2617 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2618 cs->type = 0x0b; /* Read, Execute, Accessed */
2620 cs->dpl = 0; /* will be adjusted later */
2625 set_desc_base(ss, 0); /* flat segment */
2626 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2627 ss->g = 1; /* 4kb granularity */
2629 ss->type = 0x03; /* Read/Write, Accessed */
2630 ss->d = 1; /* 32bit stack segment */
2637 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2639 u32 eax, ebx, ecx, edx;
2642 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2643 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2644 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2645 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2648 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2650 const struct x86_emulate_ops *ops = ctxt->ops;
2651 u32 eax, ebx, ecx, edx;
2654 * syscall should always be enabled in longmode - so only become
2655 * vendor specific (cpuid) if other modes are active...
2657 if (ctxt->mode == X86EMUL_MODE_PROT64)
2662 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2664 * Intel ("GenuineIntel")
2665 * remark: Intel CPUs only support "syscall" in 64bit
2666 * longmode. Also an 64bit guest with a
2667 * 32bit compat-app running will #UD !! While this
2668 * behaviour can be fixed (by emulating) into AMD
2669 * response - CPUs of AMD can't behave like Intel.
2671 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2672 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2673 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2676 /* AMD ("AuthenticAMD") */
2677 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2678 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2679 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2682 /* AMD ("AMDisbetter!") */
2683 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2684 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2685 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2688 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2692 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2694 const struct x86_emulate_ops *ops = ctxt->ops;
2695 struct desc_struct cs, ss;
2700 /* syscall is not available in real mode */
2701 if (ctxt->mode == X86EMUL_MODE_REAL ||
2702 ctxt->mode == X86EMUL_MODE_VM86)
2703 return emulate_ud(ctxt);
2705 if (!(em_syscall_is_enabled(ctxt)))
2706 return emulate_ud(ctxt);
2708 ops->get_msr(ctxt, MSR_EFER, &efer);
2709 setup_syscalls_segments(ctxt, &cs, &ss);
2711 if (!(efer & EFER_SCE))
2712 return emulate_ud(ctxt);
2714 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2716 cs_sel = (u16)(msr_data & 0xfffc);
2717 ss_sel = (u16)(msr_data + 8);
2719 if (efer & EFER_LMA) {
2723 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2724 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2726 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2727 if (efer & EFER_LMA) {
2728 #ifdef CONFIG_X86_64
2729 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2732 ctxt->mode == X86EMUL_MODE_PROT64 ?
2733 MSR_LSTAR : MSR_CSTAR, &msr_data);
2734 ctxt->_eip = msr_data;
2736 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2737 ctxt->eflags &= ~msr_data;
2738 ctxt->eflags |= X86_EFLAGS_FIXED;
2742 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2743 ctxt->_eip = (u32)msr_data;
2745 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2748 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2749 return X86EMUL_CONTINUE;
2752 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2754 const struct x86_emulate_ops *ops = ctxt->ops;
2755 struct desc_struct cs, ss;
2760 ops->get_msr(ctxt, MSR_EFER, &efer);
2761 /* inject #GP if in real mode */
2762 if (ctxt->mode == X86EMUL_MODE_REAL)
2763 return emulate_gp(ctxt, 0);
2766 * Not recognized on AMD in compat mode (but is recognized in legacy
2769 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2770 && !vendor_intel(ctxt))
2771 return emulate_ud(ctxt);
2773 /* sysenter/sysexit have not been tested in 64bit mode. */
2774 if (ctxt->mode == X86EMUL_MODE_PROT64)
2775 return X86EMUL_UNHANDLEABLE;
2777 setup_syscalls_segments(ctxt, &cs, &ss);
2779 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2780 if ((msr_data & 0xfffc) == 0x0)
2781 return emulate_gp(ctxt, 0);
2783 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2784 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2785 ss_sel = cs_sel + 8;
2786 if (efer & EFER_LMA) {
2791 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2792 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2794 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2795 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2797 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2798 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2801 return X86EMUL_CONTINUE;
2804 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2806 const struct x86_emulate_ops *ops = ctxt->ops;
2807 struct desc_struct cs, ss;
2808 u64 msr_data, rcx, rdx;
2810 u16 cs_sel = 0, ss_sel = 0;
2812 /* inject #GP if in real mode or Virtual 8086 mode */
2813 if (ctxt->mode == X86EMUL_MODE_REAL ||
2814 ctxt->mode == X86EMUL_MODE_VM86)
2815 return emulate_gp(ctxt, 0);
2817 setup_syscalls_segments(ctxt, &cs, &ss);
2819 if ((ctxt->rex_prefix & 0x8) != 0x0)
2820 usermode = X86EMUL_MODE_PROT64;
2822 usermode = X86EMUL_MODE_PROT32;
2824 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2825 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2829 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2831 case X86EMUL_MODE_PROT32:
2832 cs_sel = (u16)(msr_data + 16);
2833 if ((msr_data & 0xfffc) == 0x0)
2834 return emulate_gp(ctxt, 0);
2835 ss_sel = (u16)(msr_data + 24);
2839 case X86EMUL_MODE_PROT64:
2840 cs_sel = (u16)(msr_data + 32);
2841 if (msr_data == 0x0)
2842 return emulate_gp(ctxt, 0);
2843 ss_sel = cs_sel + 8;
2846 if (emul_is_noncanonical_address(rcx, ctxt) ||
2847 emul_is_noncanonical_address(rdx, ctxt))
2848 return emulate_gp(ctxt, 0);
2851 cs_sel |= SEGMENT_RPL_MASK;
2852 ss_sel |= SEGMENT_RPL_MASK;
2854 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2855 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2858 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2860 return X86EMUL_CONTINUE;
2863 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2866 if (ctxt->mode == X86EMUL_MODE_REAL)
2868 if (ctxt->mode == X86EMUL_MODE_VM86)
2870 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2871 return ctxt->ops->cpl(ctxt) > iopl;
2874 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2877 const struct x86_emulate_ops *ops = ctxt->ops;
2878 struct desc_struct tr_seg;
2881 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2882 unsigned mask = (1 << len) - 1;
2885 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2888 if (desc_limit_scaled(&tr_seg) < 103)
2890 base = get_desc_base(&tr_seg);
2891 #ifdef CONFIG_X86_64
2892 base |= ((u64)base3) << 32;
2894 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2895 if (r != X86EMUL_CONTINUE)
2897 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2899 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2900 if (r != X86EMUL_CONTINUE)
2902 if ((perm >> bit_idx) & mask)
2907 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2913 if (emulator_bad_iopl(ctxt))
2914 if (!emulator_io_port_access_allowed(ctxt, port, len))
2917 ctxt->perm_ok = true;
2922 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2925 * Intel CPUs mask the counter and pointers in quite strange
2926 * manner when ECX is zero due to REP-string optimizations.
2928 #ifdef CONFIG_X86_64
2929 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2932 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2935 case 0xa4: /* movsb */
2936 case 0xa5: /* movsd/w */
2937 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2939 case 0xaa: /* stosb */
2940 case 0xab: /* stosd/w */
2941 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2946 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2947 struct tss_segment_16 *tss)
2949 tss->ip = ctxt->_eip;
2950 tss->flag = ctxt->eflags;
2951 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2952 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2953 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2954 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2955 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2956 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2957 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2958 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2960 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2961 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2962 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2963 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2964 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2967 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2968 struct tss_segment_16 *tss)
2973 ctxt->_eip = tss->ip;
2974 ctxt->eflags = tss->flag | 2;
2975 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2976 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2977 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2978 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2979 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2980 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2981 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2982 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2985 * SDM says that segment selectors are loaded before segment
2988 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2989 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2990 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2991 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2992 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2997 * Now load segment descriptors. If fault happens at this stage
2998 * it is handled in a context of new task
3000 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3001 X86_TRANSFER_TASK_SWITCH, NULL);
3002 if (ret != X86EMUL_CONTINUE)
3004 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3005 X86_TRANSFER_TASK_SWITCH, NULL);
3006 if (ret != X86EMUL_CONTINUE)
3008 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3009 X86_TRANSFER_TASK_SWITCH, NULL);
3010 if (ret != X86EMUL_CONTINUE)
3012 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3013 X86_TRANSFER_TASK_SWITCH, NULL);
3014 if (ret != X86EMUL_CONTINUE)
3016 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3017 X86_TRANSFER_TASK_SWITCH, NULL);
3018 if (ret != X86EMUL_CONTINUE)
3021 return X86EMUL_CONTINUE;
3024 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3025 u16 tss_selector, u16 old_tss_sel,
3026 ulong old_tss_base, struct desc_struct *new_desc)
3028 const struct x86_emulate_ops *ops = ctxt->ops;
3029 struct tss_segment_16 tss_seg;
3031 u32 new_tss_base = get_desc_base(new_desc);
3033 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3035 if (ret != X86EMUL_CONTINUE)
3038 save_state_to_tss16(ctxt, &tss_seg);
3040 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3042 if (ret != X86EMUL_CONTINUE)
3045 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3047 if (ret != X86EMUL_CONTINUE)
3050 if (old_tss_sel != 0xffff) {
3051 tss_seg.prev_task_link = old_tss_sel;
3053 ret = ops->write_std(ctxt, new_tss_base,
3054 &tss_seg.prev_task_link,
3055 sizeof tss_seg.prev_task_link,
3057 if (ret != X86EMUL_CONTINUE)
3061 return load_state_from_tss16(ctxt, &tss_seg);
3064 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3065 struct tss_segment_32 *tss)
3067 /* CR3 and ldt selector are not saved intentionally */
3068 tss->eip = ctxt->_eip;
3069 tss->eflags = ctxt->eflags;
3070 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3071 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3072 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3073 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3074 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3075 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3076 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3077 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3079 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3080 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3081 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3082 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3083 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3084 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3087 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3088 struct tss_segment_32 *tss)
3093 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3094 return emulate_gp(ctxt, 0);
3095 ctxt->_eip = tss->eip;
3096 ctxt->eflags = tss->eflags | 2;
3098 /* General purpose registers */
3099 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3100 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3101 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3102 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3103 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3104 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3105 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3106 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3109 * SDM says that segment selectors are loaded before segment
3110 * descriptors. This is important because CPL checks will
3113 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3114 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3115 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3116 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3117 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3118 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3119 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3122 * If we're switching between Protected Mode and VM86, we need to make
3123 * sure to update the mode before loading the segment descriptors so
3124 * that the selectors are interpreted correctly.
3126 if (ctxt->eflags & X86_EFLAGS_VM) {
3127 ctxt->mode = X86EMUL_MODE_VM86;
3130 ctxt->mode = X86EMUL_MODE_PROT32;
3135 * Now load segment descriptors. If fault happenes at this stage
3136 * it is handled in a context of new task
3138 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3139 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3140 if (ret != X86EMUL_CONTINUE)
3142 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3143 X86_TRANSFER_TASK_SWITCH, NULL);
3144 if (ret != X86EMUL_CONTINUE)
3146 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3147 X86_TRANSFER_TASK_SWITCH, NULL);
3148 if (ret != X86EMUL_CONTINUE)
3150 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3151 X86_TRANSFER_TASK_SWITCH, NULL);
3152 if (ret != X86EMUL_CONTINUE)
3154 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3155 X86_TRANSFER_TASK_SWITCH, NULL);
3156 if (ret != X86EMUL_CONTINUE)
3158 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3159 X86_TRANSFER_TASK_SWITCH, NULL);
3160 if (ret != X86EMUL_CONTINUE)
3162 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3163 X86_TRANSFER_TASK_SWITCH, NULL);
3168 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3169 u16 tss_selector, u16 old_tss_sel,
3170 ulong old_tss_base, struct desc_struct *new_desc)
3172 const struct x86_emulate_ops *ops = ctxt->ops;
3173 struct tss_segment_32 tss_seg;
3175 u32 new_tss_base = get_desc_base(new_desc);
3176 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3177 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3179 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3181 if (ret != X86EMUL_CONTINUE)
3184 save_state_to_tss32(ctxt, &tss_seg);
3186 /* Only GP registers and segment selectors are saved */
3187 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3188 ldt_sel_offset - eip_offset, &ctxt->exception);
3189 if (ret != X86EMUL_CONTINUE)
3192 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3194 if (ret != X86EMUL_CONTINUE)
3197 if (old_tss_sel != 0xffff) {
3198 tss_seg.prev_task_link = old_tss_sel;
3200 ret = ops->write_std(ctxt, new_tss_base,
3201 &tss_seg.prev_task_link,
3202 sizeof tss_seg.prev_task_link,
3204 if (ret != X86EMUL_CONTINUE)
3208 return load_state_from_tss32(ctxt, &tss_seg);
3211 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3212 u16 tss_selector, int idt_index, int reason,
3213 bool has_error_code, u32 error_code)
3215 const struct x86_emulate_ops *ops = ctxt->ops;
3216 struct desc_struct curr_tss_desc, next_tss_desc;
3218 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3219 ulong old_tss_base =
3220 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3222 ulong desc_addr, dr7;
3224 /* FIXME: old_tss_base == ~0 ? */
3226 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3227 if (ret != X86EMUL_CONTINUE)
3229 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3230 if (ret != X86EMUL_CONTINUE)
3233 /* FIXME: check that next_tss_desc is tss */
3236 * Check privileges. The three cases are task switch caused by...
3238 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3239 * 2. Exception/IRQ/iret: No check is performed
3240 * 3. jmp/call to TSS/task-gate: No check is performed since the
3241 * hardware checks it before exiting.
3243 if (reason == TASK_SWITCH_GATE) {
3244 if (idt_index != -1) {
3245 /* Software interrupts */
3246 struct desc_struct task_gate_desc;
3249 ret = read_interrupt_descriptor(ctxt, idt_index,
3251 if (ret != X86EMUL_CONTINUE)
3254 dpl = task_gate_desc.dpl;
3255 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3256 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3260 desc_limit = desc_limit_scaled(&next_tss_desc);
3261 if (!next_tss_desc.p ||
3262 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3263 desc_limit < 0x2b)) {
3264 return emulate_ts(ctxt, tss_selector & 0xfffc);
3267 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3268 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3269 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3272 if (reason == TASK_SWITCH_IRET)
3273 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3275 /* set back link to prev task only if NT bit is set in eflags
3276 note that old_tss_sel is not used after this point */
3277 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3278 old_tss_sel = 0xffff;
3280 if (next_tss_desc.type & 8)
3281 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3282 old_tss_base, &next_tss_desc);
3284 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3285 old_tss_base, &next_tss_desc);
3286 if (ret != X86EMUL_CONTINUE)
3289 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3290 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3292 if (reason != TASK_SWITCH_IRET) {
3293 next_tss_desc.type |= (1 << 1); /* set busy flag */
3294 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3297 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3298 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3300 if (has_error_code) {
3301 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3302 ctxt->lock_prefix = 0;
3303 ctxt->src.val = (unsigned long) error_code;
3304 ret = em_push(ctxt);
3307 ops->get_dr(ctxt, 7, &dr7);
3308 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3313 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3314 u16 tss_selector, int idt_index, int reason,
3315 bool has_error_code, u32 error_code)
3319 invalidate_registers(ctxt);
3320 ctxt->_eip = ctxt->eip;
3321 ctxt->dst.type = OP_NONE;
3323 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3324 has_error_code, error_code);
3326 if (rc == X86EMUL_CONTINUE) {
3327 ctxt->eip = ctxt->_eip;
3328 writeback_registers(ctxt);
3331 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3334 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3337 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3339 register_address_increment(ctxt, reg, df * op->bytes);
3340 op->addr.mem.ea = register_address(ctxt, reg);
3343 static int em_das(struct x86_emulate_ctxt *ctxt)
3346 bool af, cf, old_cf;
3348 cf = ctxt->eflags & X86_EFLAGS_CF;
3354 af = ctxt->eflags & X86_EFLAGS_AF;
3355 if ((al & 0x0f) > 9 || af) {
3357 cf = old_cf | (al >= 250);
3362 if (old_al > 0x99 || old_cf) {
3368 /* Set PF, ZF, SF */
3369 ctxt->src.type = OP_IMM;
3371 ctxt->src.bytes = 1;
3372 fastop(ctxt, em_or);
3373 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3375 ctxt->eflags |= X86_EFLAGS_CF;
3377 ctxt->eflags |= X86_EFLAGS_AF;
3378 return X86EMUL_CONTINUE;
3381 static int em_aam(struct x86_emulate_ctxt *ctxt)
3385 if (ctxt->src.val == 0)
3386 return emulate_de(ctxt);
3388 al = ctxt->dst.val & 0xff;
3389 ah = al / ctxt->src.val;
3390 al %= ctxt->src.val;
3392 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3394 /* Set PF, ZF, SF */
3395 ctxt->src.type = OP_IMM;
3397 ctxt->src.bytes = 1;
3398 fastop(ctxt, em_or);
3400 return X86EMUL_CONTINUE;
3403 static int em_aad(struct x86_emulate_ctxt *ctxt)
3405 u8 al = ctxt->dst.val & 0xff;
3406 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3408 al = (al + (ah * ctxt->src.val)) & 0xff;
3410 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3412 /* Set PF, ZF, SF */
3413 ctxt->src.type = OP_IMM;
3415 ctxt->src.bytes = 1;
3416 fastop(ctxt, em_or);
3418 return X86EMUL_CONTINUE;
3421 static int em_call(struct x86_emulate_ctxt *ctxt)
3424 long rel = ctxt->src.val;
3426 ctxt->src.val = (unsigned long)ctxt->_eip;
3427 rc = jmp_rel(ctxt, rel);
3428 if (rc != X86EMUL_CONTINUE)
3430 return em_push(ctxt);
3433 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3438 struct desc_struct old_desc, new_desc;
3439 const struct x86_emulate_ops *ops = ctxt->ops;
3440 int cpl = ctxt->ops->cpl(ctxt);
3441 enum x86emul_mode prev_mode = ctxt->mode;
3443 old_eip = ctxt->_eip;
3444 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3446 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3447 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3448 X86_TRANSFER_CALL_JMP, &new_desc);
3449 if (rc != X86EMUL_CONTINUE)
3452 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3453 if (rc != X86EMUL_CONTINUE)
3456 ctxt->src.val = old_cs;
3458 if (rc != X86EMUL_CONTINUE)
3461 ctxt->src.val = old_eip;
3463 /* If we failed, we tainted the memory, but the very least we should
3465 if (rc != X86EMUL_CONTINUE) {
3466 pr_warn_once("faulting far call emulation tainted memory\n");
3471 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3472 ctxt->mode = prev_mode;
3477 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3482 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3483 if (rc != X86EMUL_CONTINUE)
3485 rc = assign_eip_near(ctxt, eip);
3486 if (rc != X86EMUL_CONTINUE)
3488 rsp_increment(ctxt, ctxt->src.val);
3489 return X86EMUL_CONTINUE;
3492 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3494 /* Write back the register source. */
3495 ctxt->src.val = ctxt->dst.val;
3496 write_register_operand(&ctxt->src);
3498 /* Write back the memory destination with implicit LOCK prefix. */
3499 ctxt->dst.val = ctxt->src.orig_val;
3500 ctxt->lock_prefix = 1;
3501 return X86EMUL_CONTINUE;
3504 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3506 ctxt->dst.val = ctxt->src2.val;
3507 return fastop(ctxt, em_imul);
3510 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3512 ctxt->dst.type = OP_REG;
3513 ctxt->dst.bytes = ctxt->src.bytes;
3514 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3515 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3517 return X86EMUL_CONTINUE;
3520 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3524 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3525 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3526 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3527 return X86EMUL_CONTINUE;
3530 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3534 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3535 return emulate_gp(ctxt, 0);
3536 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3537 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3538 return X86EMUL_CONTINUE;
3541 static int em_mov(struct x86_emulate_ctxt *ctxt)
3543 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3544 return X86EMUL_CONTINUE;
3547 #define FFL(x) bit(X86_FEATURE_##x)
3549 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3551 u32 ebx, ecx, edx, eax = 1;
3555 * Check MOVBE is set in the guest-visible CPUID leaf.
3557 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3558 if (!(ecx & FFL(MOVBE)))
3559 return emulate_ud(ctxt);
3561 switch (ctxt->op_bytes) {
3564 * From MOVBE definition: "...When the operand size is 16 bits,
3565 * the upper word of the destination register remains unchanged
3568 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3569 * rules so we have to do the operation almost per hand.
3571 tmp = (u16)ctxt->src.val;
3572 ctxt->dst.val &= ~0xffffUL;
3573 ctxt->dst.val |= (unsigned long)swab16(tmp);
3576 ctxt->dst.val = swab32((u32)ctxt->src.val);
3579 ctxt->dst.val = swab64(ctxt->src.val);
3584 return X86EMUL_CONTINUE;
3587 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3589 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3590 return emulate_gp(ctxt, 0);
3592 /* Disable writeback. */
3593 ctxt->dst.type = OP_NONE;
3594 return X86EMUL_CONTINUE;
3597 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3601 if (ctxt->mode == X86EMUL_MODE_PROT64)
3602 val = ctxt->src.val & ~0ULL;
3604 val = ctxt->src.val & ~0U;
3606 /* #UD condition is already handled. */
3607 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3608 return emulate_gp(ctxt, 0);
3610 /* Disable writeback. */
3611 ctxt->dst.type = OP_NONE;
3612 return X86EMUL_CONTINUE;
3615 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3619 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3620 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3621 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3622 return emulate_gp(ctxt, 0);
3624 return X86EMUL_CONTINUE;
3627 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3631 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3632 return emulate_gp(ctxt, 0);
3634 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3635 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3636 return X86EMUL_CONTINUE;
3639 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3641 if (ctxt->modrm_reg > VCPU_SREG_GS)
3642 return emulate_ud(ctxt);
3644 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3645 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3646 ctxt->dst.bytes = 2;
3647 return X86EMUL_CONTINUE;
3650 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3652 u16 sel = ctxt->src.val;
3654 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3655 return emulate_ud(ctxt);
3657 if (ctxt->modrm_reg == VCPU_SREG_SS)
3658 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3660 /* Disable writeback. */
3661 ctxt->dst.type = OP_NONE;
3662 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3665 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3667 u16 sel = ctxt->src.val;
3669 /* Disable writeback. */
3670 ctxt->dst.type = OP_NONE;
3671 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3674 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3676 u16 sel = ctxt->src.val;
3678 /* Disable writeback. */
3679 ctxt->dst.type = OP_NONE;
3680 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3683 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3688 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3689 if (rc == X86EMUL_CONTINUE)
3690 ctxt->ops->invlpg(ctxt, linear);
3691 /* Disable writeback. */
3692 ctxt->dst.type = OP_NONE;
3693 return X86EMUL_CONTINUE;
3696 static int em_clts(struct x86_emulate_ctxt *ctxt)
3700 cr0 = ctxt->ops->get_cr(ctxt, 0);
3702 ctxt->ops->set_cr(ctxt, 0, cr0);
3703 return X86EMUL_CONTINUE;
3706 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3708 int rc = ctxt->ops->fix_hypercall(ctxt);
3710 if (rc != X86EMUL_CONTINUE)
3713 /* Let the processor re-execute the fixed hypercall */
3714 ctxt->_eip = ctxt->eip;
3715 /* Disable writeback. */
3716 ctxt->dst.type = OP_NONE;
3717 return X86EMUL_CONTINUE;
3720 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3721 void (*get)(struct x86_emulate_ctxt *ctxt,
3722 struct desc_ptr *ptr))
3724 struct desc_ptr desc_ptr;
3726 if (ctxt->mode == X86EMUL_MODE_PROT64)
3728 get(ctxt, &desc_ptr);
3729 if (ctxt->op_bytes == 2) {
3731 desc_ptr.address &= 0x00ffffff;
3733 /* Disable writeback. */
3734 ctxt->dst.type = OP_NONE;
3735 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3736 &desc_ptr, 2 + ctxt->op_bytes);
3739 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3741 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3744 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3746 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3749 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3751 struct desc_ptr desc_ptr;
3754 if (ctxt->mode == X86EMUL_MODE_PROT64)
3756 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3757 &desc_ptr.size, &desc_ptr.address,
3759 if (rc != X86EMUL_CONTINUE)
3761 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3762 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3763 return emulate_gp(ctxt, 0);
3765 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3767 ctxt->ops->set_idt(ctxt, &desc_ptr);
3768 /* Disable writeback. */
3769 ctxt->dst.type = OP_NONE;
3770 return X86EMUL_CONTINUE;
3773 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3775 return em_lgdt_lidt(ctxt, true);
3778 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3780 return em_lgdt_lidt(ctxt, false);
3783 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3785 if (ctxt->dst.type == OP_MEM)
3786 ctxt->dst.bytes = 2;
3787 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3788 return X86EMUL_CONTINUE;
3791 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3793 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3794 | (ctxt->src.val & 0x0f));
3795 ctxt->dst.type = OP_NONE;
3796 return X86EMUL_CONTINUE;
3799 static int em_loop(struct x86_emulate_ctxt *ctxt)
3801 int rc = X86EMUL_CONTINUE;
3803 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3804 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3805 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3806 rc = jmp_rel(ctxt, ctxt->src.val);
3811 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3813 int rc = X86EMUL_CONTINUE;
3815 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3816 rc = jmp_rel(ctxt, ctxt->src.val);
3821 static int em_in(struct x86_emulate_ctxt *ctxt)
3823 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3825 return X86EMUL_IO_NEEDED;
3827 return X86EMUL_CONTINUE;
3830 static int em_out(struct x86_emulate_ctxt *ctxt)
3832 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3834 /* Disable writeback. */
3835 ctxt->dst.type = OP_NONE;
3836 return X86EMUL_CONTINUE;
3839 static int em_cli(struct x86_emulate_ctxt *ctxt)
3841 if (emulator_bad_iopl(ctxt))
3842 return emulate_gp(ctxt, 0);
3844 ctxt->eflags &= ~X86_EFLAGS_IF;
3845 return X86EMUL_CONTINUE;
3848 static int em_sti(struct x86_emulate_ctxt *ctxt)
3850 if (emulator_bad_iopl(ctxt))
3851 return emulate_gp(ctxt, 0);
3853 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3854 ctxt->eflags |= X86_EFLAGS_IF;
3855 return X86EMUL_CONTINUE;
3858 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3860 u32 eax, ebx, ecx, edx;
3863 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3864 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3865 ctxt->ops->cpl(ctxt)) {
3866 return emulate_gp(ctxt, 0);
3869 eax = reg_read(ctxt, VCPU_REGS_RAX);
3870 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3871 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3872 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3873 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3874 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3875 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3876 return X86EMUL_CONTINUE;
3879 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3883 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3885 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3887 ctxt->eflags &= ~0xffUL;
3888 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3889 return X86EMUL_CONTINUE;
3892 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3894 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3895 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3896 return X86EMUL_CONTINUE;
3899 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3901 switch (ctxt->op_bytes) {
3902 #ifdef CONFIG_X86_64
3904 asm("bswap %0" : "+r"(ctxt->dst.val));
3908 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3911 return X86EMUL_CONTINUE;
3914 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3916 /* emulating clflush regardless of cpuid */
3917 return X86EMUL_CONTINUE;
3920 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3922 ctxt->dst.val = (s32) ctxt->src.val;
3923 return X86EMUL_CONTINUE;
3926 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3928 u32 eax = 1, ebx, ecx = 0, edx;
3930 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3931 if (!(edx & FFL(FXSR)))
3932 return emulate_ud(ctxt);
3934 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3935 return emulate_nm(ctxt);
3938 * Don't emulate a case that should never be hit, instead of working
3939 * around a lack of fxsave64/fxrstor64 on old compilers.
3941 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3942 return X86EMUL_UNHANDLEABLE;
3944 return X86EMUL_CONTINUE;
3948 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3949 * and restore MXCSR.
3951 static size_t __fxstate_size(int nregs)
3953 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3956 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3959 if (ctxt->mode == X86EMUL_MODE_PROT64)
3960 return __fxstate_size(16);
3962 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3963 return __fxstate_size(cr4_osfxsr ? 8 : 0);
3967 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3970 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
3971 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3973 * 3) 64-bit mode with REX.W prefix
3974 * - like (2), but XMM 8-15 are being saved and restored
3975 * 4) 64-bit mode without REX.W prefix
3976 * - like (3), but FIP and FDP are 64 bit
3978 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3979 * desired result. (4) is not emulated.
3981 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3982 * and FPU DS) should match.
3984 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3986 struct fxregs_state fx_state;
3989 rc = check_fxsr(ctxt);
3990 if (rc != X86EMUL_CONTINUE)
3993 ctxt->ops->get_fpu(ctxt);
3995 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3997 ctxt->ops->put_fpu(ctxt);
3999 if (rc != X86EMUL_CONTINUE)
4002 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4003 fxstate_size(ctxt));
4006 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4008 struct fxregs_state fx_state;
4012 rc = check_fxsr(ctxt);
4013 if (rc != X86EMUL_CONTINUE)
4016 ctxt->ops->get_fpu(ctxt);
4018 size = fxstate_size(ctxt);
4019 if (size < __fxstate_size(16)) {
4020 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4021 if (rc != X86EMUL_CONTINUE)
4025 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4026 if (rc != X86EMUL_CONTINUE)
4029 if (fx_state.mxcsr >> 16) {
4030 rc = emulate_gp(ctxt, 0);
4034 if (rc == X86EMUL_CONTINUE)
4035 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4038 ctxt->ops->put_fpu(ctxt);
4043 static bool valid_cr(int nr)
4055 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4057 if (!valid_cr(ctxt->modrm_reg))
4058 return emulate_ud(ctxt);
4060 return X86EMUL_CONTINUE;
4063 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4065 u64 new_val = ctxt->src.val64;
4066 int cr = ctxt->modrm_reg;
4069 static u64 cr_reserved_bits[] = {
4070 0xffffffff00000000ULL,
4071 0, 0, 0, /* CR3 checked later */
4078 return emulate_ud(ctxt);
4080 if (new_val & cr_reserved_bits[cr])
4081 return emulate_gp(ctxt, 0);
4086 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4087 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4088 return emulate_gp(ctxt, 0);
4090 cr4 = ctxt->ops->get_cr(ctxt, 4);
4091 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4093 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4094 !(cr4 & X86_CR4_PAE))
4095 return emulate_gp(ctxt, 0);
4102 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4103 if (efer & EFER_LMA) {
4105 u32 eax, ebx, ecx, edx;
4109 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4111 maxphyaddr = eax & 0xff;
4114 rsvd = rsvd_bits(maxphyaddr, 62);
4118 return emulate_gp(ctxt, 0);
4123 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4125 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4126 return emulate_gp(ctxt, 0);
4132 return X86EMUL_CONTINUE;
4135 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4139 ctxt->ops->get_dr(ctxt, 7, &dr7);
4141 /* Check if DR7.Global_Enable is set */
4142 return dr7 & (1 << 13);
4145 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4147 int dr = ctxt->modrm_reg;
4151 return emulate_ud(ctxt);
4153 cr4 = ctxt->ops->get_cr(ctxt, 4);
4154 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4155 return emulate_ud(ctxt);
4157 if (check_dr7_gd(ctxt)) {
4160 ctxt->ops->get_dr(ctxt, 6, &dr6);
4162 dr6 |= DR6_BD | DR6_RTM;
4163 ctxt->ops->set_dr(ctxt, 6, dr6);
4164 return emulate_db(ctxt);
4167 return X86EMUL_CONTINUE;
4170 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4172 u64 new_val = ctxt->src.val64;
4173 int dr = ctxt->modrm_reg;
4175 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4176 return emulate_gp(ctxt, 0);
4178 return check_dr_read(ctxt);
4181 static int check_svme(struct x86_emulate_ctxt *ctxt)
4185 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4187 if (!(efer & EFER_SVME))
4188 return emulate_ud(ctxt);
4190 return X86EMUL_CONTINUE;
4193 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4195 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4197 /* Valid physical address? */
4198 if (rax & 0xffff000000000000ULL)
4199 return emulate_gp(ctxt, 0);
4201 return check_svme(ctxt);
4204 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4206 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4208 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4209 return emulate_ud(ctxt);
4211 return X86EMUL_CONTINUE;
4214 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4216 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4217 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4219 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4220 ctxt->ops->check_pmc(ctxt, rcx))
4221 return emulate_gp(ctxt, 0);
4223 return X86EMUL_CONTINUE;
4226 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4228 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4229 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4230 return emulate_gp(ctxt, 0);
4232 return X86EMUL_CONTINUE;
4235 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4237 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4238 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4239 return emulate_gp(ctxt, 0);
4241 return X86EMUL_CONTINUE;
4244 #define D(_y) { .flags = (_y) }
4245 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4246 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4247 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4248 #define N D(NotImpl)
4249 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4250 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4251 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4252 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4253 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4254 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4255 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4256 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4257 #define II(_f, _e, _i) \
4258 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4259 #define IIP(_f, _e, _i, _p) \
4260 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4261 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4262 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4264 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4265 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4266 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4267 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4268 #define I2bvIP(_f, _e, _i, _p) \
4269 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4271 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4272 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4273 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4275 static const struct opcode group7_rm0[] = {
4277 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4281 static const struct opcode group7_rm1[] = {
4282 DI(SrcNone | Priv, monitor),
4283 DI(SrcNone | Priv, mwait),
4287 static const struct opcode group7_rm3[] = {
4288 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4289 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4290 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4291 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4292 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4293 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4294 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4295 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4298 static const struct opcode group7_rm7[] = {
4300 DIP(SrcNone, rdtscp, check_rdtsc),
4304 static const struct opcode group1[] = {
4306 F(Lock | PageTable, em_or),
4309 F(Lock | PageTable, em_and),
4315 static const struct opcode group1A[] = {
4316 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4319 static const struct opcode group2[] = {
4320 F(DstMem | ModRM, em_rol),
4321 F(DstMem | ModRM, em_ror),
4322 F(DstMem | ModRM, em_rcl),
4323 F(DstMem | ModRM, em_rcr),
4324 F(DstMem | ModRM, em_shl),
4325 F(DstMem | ModRM, em_shr),
4326 F(DstMem | ModRM, em_shl),
4327 F(DstMem | ModRM, em_sar),
4330 static const struct opcode group3[] = {
4331 F(DstMem | SrcImm | NoWrite, em_test),
4332 F(DstMem | SrcImm | NoWrite, em_test),
4333 F(DstMem | SrcNone | Lock, em_not),
4334 F(DstMem | SrcNone | Lock, em_neg),
4335 F(DstXacc | Src2Mem, em_mul_ex),
4336 F(DstXacc | Src2Mem, em_imul_ex),
4337 F(DstXacc | Src2Mem, em_div_ex),
4338 F(DstXacc | Src2Mem, em_idiv_ex),
4341 static const struct opcode group4[] = {
4342 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4343 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4347 static const struct opcode group5[] = {
4348 F(DstMem | SrcNone | Lock, em_inc),
4349 F(DstMem | SrcNone | Lock, em_dec),
4350 I(SrcMem | NearBranch, em_call_near_abs),
4351 I(SrcMemFAddr | ImplicitOps, em_call_far),
4352 I(SrcMem | NearBranch, em_jmp_abs),
4353 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4354 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4357 static const struct opcode group6[] = {
4358 DI(Prot | DstMem, sldt),
4359 DI(Prot | DstMem, str),
4360 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4361 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4365 static const struct group_dual group7 = { {
4366 II(Mov | DstMem, em_sgdt, sgdt),
4367 II(Mov | DstMem, em_sidt, sidt),
4368 II(SrcMem | Priv, em_lgdt, lgdt),
4369 II(SrcMem | Priv, em_lidt, lidt),
4370 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4371 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4372 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4376 N, EXT(0, group7_rm3),
4377 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4378 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4382 static const struct opcode group8[] = {
4384 F(DstMem | SrcImmByte | NoWrite, em_bt),
4385 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4386 F(DstMem | SrcImmByte | Lock, em_btr),
4387 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4390 static const struct group_dual group9 = { {
4391 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4393 N, N, N, N, N, N, N, N,
4396 static const struct opcode group11[] = {
4397 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4401 static const struct gprefix pfx_0f_ae_7 = {
4402 I(SrcMem | ByteOp, em_clflush), N, N, N,
4405 static const struct group_dual group15 = { {
4406 I(ModRM | Aligned16, em_fxsave),
4407 I(ModRM | Aligned16, em_fxrstor),
4408 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4410 N, N, N, N, N, N, N, N,
4413 static const struct gprefix pfx_0f_6f_0f_7f = {
4414 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4417 static const struct instr_dual instr_dual_0f_2b = {
4421 static const struct gprefix pfx_0f_2b = {
4422 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4425 static const struct gprefix pfx_0f_28_0f_29 = {
4426 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4429 static const struct gprefix pfx_0f_e7 = {
4430 N, I(Sse, em_mov), N, N,
4433 static const struct escape escape_d9 = { {
4434 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4437 N, N, N, N, N, N, N, N,
4439 N, N, N, N, N, N, N, N,
4441 N, N, N, N, N, N, N, N,
4443 N, N, N, N, N, N, N, N,
4445 N, N, N, N, N, N, N, N,
4447 N, N, N, N, N, N, N, N,
4449 N, N, N, N, N, N, N, N,
4451 N, N, N, N, N, N, N, N,
4454 static const struct escape escape_db = { {
4455 N, N, N, N, N, N, N, N,
4458 N, N, N, N, N, N, N, N,
4460 N, N, N, N, N, N, N, N,
4462 N, N, N, N, N, N, N, N,
4464 N, N, N, N, N, N, N, N,
4466 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4468 N, N, N, N, N, N, N, N,
4470 N, N, N, N, N, N, N, N,
4472 N, N, N, N, N, N, N, N,
4475 static const struct escape escape_dd = { {
4476 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4479 N, N, N, N, N, N, N, N,
4481 N, N, N, N, N, N, N, N,
4483 N, N, N, N, N, N, N, N,
4485 N, N, N, N, N, N, N, N,
4487 N, N, N, N, N, N, N, N,
4489 N, N, N, N, N, N, N, N,
4491 N, N, N, N, N, N, N, N,
4493 N, N, N, N, N, N, N, N,
4496 static const struct instr_dual instr_dual_0f_c3 = {
4497 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4500 static const struct mode_dual mode_dual_63 = {
4501 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4504 static const struct opcode opcode_table[256] = {
4506 F6ALU(Lock, em_add),
4507 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4508 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4510 F6ALU(Lock | PageTable, em_or),
4511 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4514 F6ALU(Lock, em_adc),
4515 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4516 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4518 F6ALU(Lock, em_sbb),
4519 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4520 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4522 F6ALU(Lock | PageTable, em_and), N, N,
4524 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4526 F6ALU(Lock, em_xor), N, N,
4528 F6ALU(NoWrite, em_cmp), N, N,
4530 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4532 X8(I(SrcReg | Stack, em_push)),
4534 X8(I(DstReg | Stack, em_pop)),
4536 I(ImplicitOps | Stack | No64, em_pusha),
4537 I(ImplicitOps | Stack | No64, em_popa),
4538 N, MD(ModRM, &mode_dual_63),
4541 I(SrcImm | Mov | Stack, em_push),
4542 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4543 I(SrcImmByte | Mov | Stack, em_push),
4544 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4545 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4546 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4548 X16(D(SrcImmByte | NearBranch)),
4550 G(ByteOp | DstMem | SrcImm, group1),
4551 G(DstMem | SrcImm, group1),
4552 G(ByteOp | DstMem | SrcImm | No64, group1),
4553 G(DstMem | SrcImmByte, group1),
4554 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4555 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4557 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4558 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4559 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4560 D(ModRM | SrcMem | NoAccess | DstReg),
4561 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4564 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4566 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4567 I(SrcImmFAddr | No64, em_call_far), N,
4568 II(ImplicitOps | Stack, em_pushf, pushf),
4569 II(ImplicitOps | Stack, em_popf, popf),
4570 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4572 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4573 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4574 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4575 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4577 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4578 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4579 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4580 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4582 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4584 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4586 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4587 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4588 I(ImplicitOps | NearBranch, em_ret),
4589 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4590 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4591 G(ByteOp, group11), G(0, group11),
4593 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4594 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4595 I(ImplicitOps, em_ret_far),
4596 D(ImplicitOps), DI(SrcImmByte, intn),
4597 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4599 G(Src2One | ByteOp, group2), G(Src2One, group2),
4600 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4601 I(DstAcc | SrcImmUByte | No64, em_aam),
4602 I(DstAcc | SrcImmUByte | No64, em_aad),
4603 F(DstAcc | ByteOp | No64, em_salc),
4604 I(DstAcc | SrcXLat | ByteOp, em_mov),
4606 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4608 X3(I(SrcImmByte | NearBranch, em_loop)),
4609 I(SrcImmByte | NearBranch, em_jcxz),
4610 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4611 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4613 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4614 I(SrcImmFAddr | No64, em_jmp_far),
4615 D(SrcImmByte | ImplicitOps | NearBranch),
4616 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4617 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4619 N, DI(ImplicitOps, icebp), N, N,
4620 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4621 G(ByteOp, group3), G(0, group3),
4623 D(ImplicitOps), D(ImplicitOps),
4624 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4625 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4628 static const struct opcode twobyte_table[256] = {
4630 G(0, group6), GD(0, &group7), N, N,
4631 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4632 II(ImplicitOps | Priv, em_clts, clts), N,
4633 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4634 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4636 N, N, N, N, N, N, N, N,
4637 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4638 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4640 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4641 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4642 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4644 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4647 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4648 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4649 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4652 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4653 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4654 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4655 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4656 I(ImplicitOps | EmulateOnUD, em_sysenter),
4657 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4659 N, N, N, N, N, N, N, N,
4661 X16(D(DstReg | SrcMem | ModRM)),
4663 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4668 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4673 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4675 X16(D(SrcImm | NearBranch)),
4677 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4679 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4680 II(ImplicitOps, em_cpuid, cpuid),
4681 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4682 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4683 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4685 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4686 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4687 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4688 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4689 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4690 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4692 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4693 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4694 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4695 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4696 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4697 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4701 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4702 I(DstReg | SrcMem | ModRM, em_bsf_c),
4703 I(DstReg | SrcMem | ModRM, em_bsr_c),
4704 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4706 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4707 N, ID(0, &instr_dual_0f_c3),
4708 N, N, N, GD(0, &group9),
4710 X8(I(DstReg, em_bswap)),
4712 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4714 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4715 N, N, N, N, N, N, N, N,
4717 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4720 static const struct instr_dual instr_dual_0f_38_f0 = {
4721 I(DstReg | SrcMem | Mov, em_movbe), N
4724 static const struct instr_dual instr_dual_0f_38_f1 = {
4725 I(DstMem | SrcReg | Mov, em_movbe), N
4728 static const struct gprefix three_byte_0f_38_f0 = {
4729 ID(0, &instr_dual_0f_38_f0), N, N, N
4732 static const struct gprefix three_byte_0f_38_f1 = {
4733 ID(0, &instr_dual_0f_38_f1), N, N, N
4737 * Insns below are selected by the prefix which indexed by the third opcode
4740 static const struct opcode opcode_map_0f_38[256] = {
4742 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4744 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4746 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4747 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4768 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4772 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4778 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4779 unsigned size, bool sign_extension)
4781 int rc = X86EMUL_CONTINUE;
4785 op->addr.mem.ea = ctxt->_eip;
4786 /* NB. Immediates are sign-extended as necessary. */
4787 switch (op->bytes) {
4789 op->val = insn_fetch(s8, ctxt);
4792 op->val = insn_fetch(s16, ctxt);
4795 op->val = insn_fetch(s32, ctxt);
4798 op->val = insn_fetch(s64, ctxt);
4801 if (!sign_extension) {
4802 switch (op->bytes) {
4810 op->val &= 0xffffffff;
4818 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4821 int rc = X86EMUL_CONTINUE;
4825 decode_register_operand(ctxt, op);
4828 rc = decode_imm(ctxt, op, 1, false);
4831 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4835 if (ctxt->d & BitOp)
4836 fetch_bit_operand(ctxt);
4837 op->orig_val = op->val;
4840 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4844 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4845 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4846 fetch_register_operand(op);
4847 op->orig_val = op->val;
4851 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4852 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4853 fetch_register_operand(op);
4854 op->orig_val = op->val;
4857 if (ctxt->d & ByteOp) {
4862 op->bytes = ctxt->op_bytes;
4863 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4864 fetch_register_operand(op);
4865 op->orig_val = op->val;
4869 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4871 register_address(ctxt, VCPU_REGS_RDI);
4872 op->addr.mem.seg = VCPU_SREG_ES;
4879 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4880 fetch_register_operand(op);
4885 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4888 rc = decode_imm(ctxt, op, 1, true);
4896 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4899 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4902 ctxt->memop.bytes = 1;
4903 if (ctxt->memop.type == OP_REG) {
4904 ctxt->memop.addr.reg = decode_register(ctxt,
4905 ctxt->modrm_rm, true);
4906 fetch_register_operand(&ctxt->memop);
4910 ctxt->memop.bytes = 2;
4913 ctxt->memop.bytes = 4;
4916 rc = decode_imm(ctxt, op, 2, false);
4919 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4923 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4925 register_address(ctxt, VCPU_REGS_RSI);
4926 op->addr.mem.seg = ctxt->seg_override;
4932 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4935 reg_read(ctxt, VCPU_REGS_RBX) +
4936 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4937 op->addr.mem.seg = ctxt->seg_override;
4942 op->addr.mem.ea = ctxt->_eip;
4943 op->bytes = ctxt->op_bytes + 2;
4944 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4947 ctxt->memop.bytes = ctxt->op_bytes + 2;
4951 op->val = VCPU_SREG_ES;
4955 op->val = VCPU_SREG_CS;
4959 op->val = VCPU_SREG_SS;
4963 op->val = VCPU_SREG_DS;
4967 op->val = VCPU_SREG_FS;
4971 op->val = VCPU_SREG_GS;
4974 /* Special instructions do their own operand decoding. */
4976 op->type = OP_NONE; /* Disable writeback. */
4984 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4986 int rc = X86EMUL_CONTINUE;
4987 int mode = ctxt->mode;
4988 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4989 bool op_prefix = false;
4990 bool has_seg_override = false;
4991 struct opcode opcode;
4993 ctxt->memop.type = OP_NONE;
4994 ctxt->memopp = NULL;
4995 ctxt->_eip = ctxt->eip;
4996 ctxt->fetch.ptr = ctxt->fetch.data;
4997 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4998 ctxt->opcode_len = 1;
5000 memcpy(ctxt->fetch.data, insn, insn_len);
5002 rc = __do_insn_fetch_bytes(ctxt, 1);
5003 if (rc != X86EMUL_CONTINUE)
5008 case X86EMUL_MODE_REAL:
5009 case X86EMUL_MODE_VM86:
5010 case X86EMUL_MODE_PROT16:
5011 def_op_bytes = def_ad_bytes = 2;
5013 case X86EMUL_MODE_PROT32:
5014 def_op_bytes = def_ad_bytes = 4;
5016 #ifdef CONFIG_X86_64
5017 case X86EMUL_MODE_PROT64:
5023 return EMULATION_FAILED;
5026 ctxt->op_bytes = def_op_bytes;
5027 ctxt->ad_bytes = def_ad_bytes;
5029 /* Legacy prefixes. */
5031 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5032 case 0x66: /* operand-size override */
5034 /* switch between 2/4 bytes */
5035 ctxt->op_bytes = def_op_bytes ^ 6;
5037 case 0x67: /* address-size override */
5038 if (mode == X86EMUL_MODE_PROT64)
5039 /* switch between 4/8 bytes */
5040 ctxt->ad_bytes = def_ad_bytes ^ 12;
5042 /* switch between 2/4 bytes */
5043 ctxt->ad_bytes = def_ad_bytes ^ 6;
5045 case 0x26: /* ES override */
5046 case 0x2e: /* CS override */
5047 case 0x36: /* SS override */
5048 case 0x3e: /* DS override */
5049 has_seg_override = true;
5050 ctxt->seg_override = (ctxt->b >> 3) & 3;
5052 case 0x64: /* FS override */
5053 case 0x65: /* GS override */
5054 has_seg_override = true;
5055 ctxt->seg_override = ctxt->b & 7;
5057 case 0x40 ... 0x4f: /* REX */
5058 if (mode != X86EMUL_MODE_PROT64)
5060 ctxt->rex_prefix = ctxt->b;
5062 case 0xf0: /* LOCK */
5063 ctxt->lock_prefix = 1;
5065 case 0xf2: /* REPNE/REPNZ */
5066 case 0xf3: /* REP/REPE/REPZ */
5067 ctxt->rep_prefix = ctxt->b;
5073 /* Any legacy prefix after a REX prefix nullifies its effect. */
5075 ctxt->rex_prefix = 0;
5081 if (ctxt->rex_prefix & 8)
5082 ctxt->op_bytes = 8; /* REX.W */
5084 /* Opcode byte(s). */
5085 opcode = opcode_table[ctxt->b];
5086 /* Two-byte opcode? */
5087 if (ctxt->b == 0x0f) {
5088 ctxt->opcode_len = 2;
5089 ctxt->b = insn_fetch(u8, ctxt);
5090 opcode = twobyte_table[ctxt->b];
5092 /* 0F_38 opcode map */
5093 if (ctxt->b == 0x38) {
5094 ctxt->opcode_len = 3;
5095 ctxt->b = insn_fetch(u8, ctxt);
5096 opcode = opcode_map_0f_38[ctxt->b];
5099 ctxt->d = opcode.flags;
5101 if (ctxt->d & ModRM)
5102 ctxt->modrm = insn_fetch(u8, ctxt);
5104 /* vex-prefix instructions are not implemented */
5105 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5106 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5110 while (ctxt->d & GroupMask) {
5111 switch (ctxt->d & GroupMask) {
5113 goffset = (ctxt->modrm >> 3) & 7;
5114 opcode = opcode.u.group[goffset];
5117 goffset = (ctxt->modrm >> 3) & 7;
5118 if ((ctxt->modrm >> 6) == 3)
5119 opcode = opcode.u.gdual->mod3[goffset];
5121 opcode = opcode.u.gdual->mod012[goffset];
5124 goffset = ctxt->modrm & 7;
5125 opcode = opcode.u.group[goffset];
5128 if (ctxt->rep_prefix && op_prefix)
5129 return EMULATION_FAILED;
5130 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5131 switch (simd_prefix) {
5132 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5133 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5134 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5135 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5139 if (ctxt->modrm > 0xbf)
5140 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5142 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5145 if ((ctxt->modrm >> 6) == 3)
5146 opcode = opcode.u.idual->mod3;
5148 opcode = opcode.u.idual->mod012;
5151 if (ctxt->mode == X86EMUL_MODE_PROT64)
5152 opcode = opcode.u.mdual->mode64;
5154 opcode = opcode.u.mdual->mode32;
5157 return EMULATION_FAILED;
5160 ctxt->d &= ~(u64)GroupMask;
5161 ctxt->d |= opcode.flags;
5166 return EMULATION_FAILED;
5168 ctxt->execute = opcode.u.execute;
5170 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5171 return EMULATION_FAILED;
5173 if (unlikely(ctxt->d &
5174 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5177 * These are copied unconditionally here, and checked unconditionally
5178 * in x86_emulate_insn.
5180 ctxt->check_perm = opcode.check_perm;
5181 ctxt->intercept = opcode.intercept;
5183 if (ctxt->d & NotImpl)
5184 return EMULATION_FAILED;
5186 if (mode == X86EMUL_MODE_PROT64) {
5187 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5189 else if (ctxt->d & NearBranch)
5193 if (ctxt->d & Op3264) {
5194 if (mode == X86EMUL_MODE_PROT64)
5200 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5204 ctxt->op_bytes = 16;
5205 else if (ctxt->d & Mmx)
5209 /* ModRM and SIB bytes. */
5210 if (ctxt->d & ModRM) {
5211 rc = decode_modrm(ctxt, &ctxt->memop);
5212 if (!has_seg_override) {
5213 has_seg_override = true;
5214 ctxt->seg_override = ctxt->modrm_seg;
5216 } else if (ctxt->d & MemAbs)
5217 rc = decode_abs(ctxt, &ctxt->memop);
5218 if (rc != X86EMUL_CONTINUE)
5221 if (!has_seg_override)
5222 ctxt->seg_override = VCPU_SREG_DS;
5224 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5227 * Decode and fetch the source operand: register, memory
5230 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5231 if (rc != X86EMUL_CONTINUE)
5235 * Decode and fetch the second source operand: register, memory
5238 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5239 if (rc != X86EMUL_CONTINUE)
5242 /* Decode and fetch the destination operand: register or memory. */
5243 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5245 if (ctxt->rip_relative && likely(ctxt->memopp))
5246 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5247 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5250 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5253 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5255 return ctxt->d & PageTable;
5258 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5260 /* The second termination condition only applies for REPE
5261 * and REPNE. Test if the repeat string operation prefix is
5262 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5263 * corresponding termination condition according to:
5264 * - if REPE/REPZ and ZF = 0 then done
5265 * - if REPNE/REPNZ and ZF = 1 then done
5267 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5268 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5269 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5270 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5271 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5272 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5278 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5282 ctxt->ops->get_fpu(ctxt);
5283 rc = asm_safe("fwait");
5284 ctxt->ops->put_fpu(ctxt);
5286 if (unlikely(rc != X86EMUL_CONTINUE))
5287 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5289 return X86EMUL_CONTINUE;
5292 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5295 if (op->type == OP_MM)
5296 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5299 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5301 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5303 if (!(ctxt->d & ByteOp))
5304 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5306 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5307 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5308 [fastop]"+S"(fop), ASM_CALL_CONSTRAINT
5309 : "c"(ctxt->src2.val));
5311 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5312 if (!fop) /* exception is returned in fop variable */
5313 return emulate_de(ctxt);
5314 return X86EMUL_CONTINUE;
5317 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5319 memset(&ctxt->rip_relative, 0,
5320 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5322 ctxt->io_read.pos = 0;
5323 ctxt->io_read.end = 0;
5324 ctxt->mem_read.end = 0;
5327 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5329 const struct x86_emulate_ops *ops = ctxt->ops;
5330 int rc = X86EMUL_CONTINUE;
5331 int saved_dst_type = ctxt->dst.type;
5332 unsigned emul_flags;
5334 ctxt->mem_read.pos = 0;
5336 /* LOCK prefix is allowed only with some instructions */
5337 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5338 rc = emulate_ud(ctxt);
5342 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5343 rc = emulate_ud(ctxt);
5347 emul_flags = ctxt->ops->get_hflags(ctxt);
5348 if (unlikely(ctxt->d &
5349 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5350 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5351 (ctxt->d & Undefined)) {
5352 rc = emulate_ud(ctxt);
5356 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5357 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5358 rc = emulate_ud(ctxt);
5362 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5363 rc = emulate_nm(ctxt);
5367 if (ctxt->d & Mmx) {
5368 rc = flush_pending_x87_faults(ctxt);
5369 if (rc != X86EMUL_CONTINUE)
5372 * Now that we know the fpu is exception safe, we can fetch
5375 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5376 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5377 if (!(ctxt->d & Mov))
5378 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5381 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5382 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5383 X86_ICPT_PRE_EXCEPT);
5384 if (rc != X86EMUL_CONTINUE)
5388 /* Instruction can only be executed in protected mode */
5389 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5390 rc = emulate_ud(ctxt);
5394 /* Privileged instruction can be executed only in CPL=0 */
5395 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5396 if (ctxt->d & PrivUD)
5397 rc = emulate_ud(ctxt);
5399 rc = emulate_gp(ctxt, 0);
5403 /* Do instruction specific permission checks */
5404 if (ctxt->d & CheckPerm) {
5405 rc = ctxt->check_perm(ctxt);
5406 if (rc != X86EMUL_CONTINUE)
5410 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5411 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5412 X86_ICPT_POST_EXCEPT);
5413 if (rc != X86EMUL_CONTINUE)
5417 if (ctxt->rep_prefix && (ctxt->d & String)) {
5418 /* All REP prefixes have the same first termination condition */
5419 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5420 string_registers_quirk(ctxt);
5421 ctxt->eip = ctxt->_eip;
5422 ctxt->eflags &= ~X86_EFLAGS_RF;
5428 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5429 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5430 ctxt->src.valptr, ctxt->src.bytes);
5431 if (rc != X86EMUL_CONTINUE)
5433 ctxt->src.orig_val64 = ctxt->src.val64;
5436 if (ctxt->src2.type == OP_MEM) {
5437 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5438 &ctxt->src2.val, ctxt->src2.bytes);
5439 if (rc != X86EMUL_CONTINUE)
5443 if ((ctxt->d & DstMask) == ImplicitOps)
5447 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5448 /* optimisation - avoid slow emulated read if Mov */
5449 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5450 &ctxt->dst.val, ctxt->dst.bytes);
5451 if (rc != X86EMUL_CONTINUE) {
5452 if (!(ctxt->d & NoWrite) &&
5453 rc == X86EMUL_PROPAGATE_FAULT &&
5454 ctxt->exception.vector == PF_VECTOR)
5455 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5459 /* Copy full 64-bit value for CMPXCHG8B. */
5460 ctxt->dst.orig_val64 = ctxt->dst.val64;
5464 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5465 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5466 X86_ICPT_POST_MEMACCESS);
5467 if (rc != X86EMUL_CONTINUE)
5471 if (ctxt->rep_prefix && (ctxt->d & String))
5472 ctxt->eflags |= X86_EFLAGS_RF;
5474 ctxt->eflags &= ~X86_EFLAGS_RF;
5476 if (ctxt->execute) {
5477 if (ctxt->d & Fastop) {
5478 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5479 rc = fastop(ctxt, fop);
5480 if (rc != X86EMUL_CONTINUE)
5484 rc = ctxt->execute(ctxt);
5485 if (rc != X86EMUL_CONTINUE)
5490 if (ctxt->opcode_len == 2)
5492 else if (ctxt->opcode_len == 3)
5493 goto threebyte_insn;
5496 case 0x70 ... 0x7f: /* jcc (short) */
5497 if (test_cc(ctxt->b, ctxt->eflags))
5498 rc = jmp_rel(ctxt, ctxt->src.val);
5500 case 0x8d: /* lea r16/r32, m */
5501 ctxt->dst.val = ctxt->src.addr.mem.ea;
5503 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5504 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5505 ctxt->dst.type = OP_NONE;
5509 case 0x98: /* cbw/cwde/cdqe */
5510 switch (ctxt->op_bytes) {
5511 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5512 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5513 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5516 case 0xcc: /* int3 */
5517 rc = emulate_int(ctxt, 3);
5519 case 0xcd: /* int n */
5520 rc = emulate_int(ctxt, ctxt->src.val);
5522 case 0xce: /* into */
5523 if (ctxt->eflags & X86_EFLAGS_OF)
5524 rc = emulate_int(ctxt, 4);
5526 case 0xe9: /* jmp rel */
5527 case 0xeb: /* jmp rel short */
5528 rc = jmp_rel(ctxt, ctxt->src.val);
5529 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5531 case 0xf4: /* hlt */
5532 ctxt->ops->halt(ctxt);
5534 case 0xf5: /* cmc */
5535 /* complement carry flag from eflags reg */
5536 ctxt->eflags ^= X86_EFLAGS_CF;
5538 case 0xf8: /* clc */
5539 ctxt->eflags &= ~X86_EFLAGS_CF;
5541 case 0xf9: /* stc */
5542 ctxt->eflags |= X86_EFLAGS_CF;
5544 case 0xfc: /* cld */
5545 ctxt->eflags &= ~X86_EFLAGS_DF;
5547 case 0xfd: /* std */
5548 ctxt->eflags |= X86_EFLAGS_DF;
5551 goto cannot_emulate;
5554 if (rc != X86EMUL_CONTINUE)
5558 if (ctxt->d & SrcWrite) {
5559 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5560 rc = writeback(ctxt, &ctxt->src);
5561 if (rc != X86EMUL_CONTINUE)
5564 if (!(ctxt->d & NoWrite)) {
5565 rc = writeback(ctxt, &ctxt->dst);
5566 if (rc != X86EMUL_CONTINUE)
5571 * restore dst type in case the decoding will be reused
5572 * (happens for string instruction )
5574 ctxt->dst.type = saved_dst_type;
5576 if ((ctxt->d & SrcMask) == SrcSI)
5577 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5579 if ((ctxt->d & DstMask) == DstDI)
5580 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5582 if (ctxt->rep_prefix && (ctxt->d & String)) {
5584 struct read_cache *r = &ctxt->io_read;
5585 if ((ctxt->d & SrcMask) == SrcSI)
5586 count = ctxt->src.count;
5588 count = ctxt->dst.count;
5589 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5591 if (!string_insn_completed(ctxt)) {
5593 * Re-enter guest when pio read ahead buffer is empty
5594 * or, if it is not used, after each 1024 iteration.
5596 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5597 (r->end == 0 || r->end != r->pos)) {
5599 * Reset read cache. Usually happens before
5600 * decode, but since instruction is restarted
5601 * we have to do it here.
5603 ctxt->mem_read.end = 0;
5604 writeback_registers(ctxt);
5605 return EMULATION_RESTART;
5607 goto done; /* skip rip writeback */
5609 ctxt->eflags &= ~X86_EFLAGS_RF;
5612 ctxt->eip = ctxt->_eip;
5615 if (rc == X86EMUL_PROPAGATE_FAULT) {
5616 WARN_ON(ctxt->exception.vector > 0x1f);
5617 ctxt->have_exception = true;
5619 if (rc == X86EMUL_INTERCEPTED)
5620 return EMULATION_INTERCEPTED;
5622 if (rc == X86EMUL_CONTINUE)
5623 writeback_registers(ctxt);
5625 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5629 case 0x09: /* wbinvd */
5630 (ctxt->ops->wbinvd)(ctxt);
5632 case 0x08: /* invd */
5633 case 0x0d: /* GrpP (prefetch) */
5634 case 0x18: /* Grp16 (prefetch/nop) */
5635 case 0x1f: /* nop */
5637 case 0x20: /* mov cr, reg */
5638 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5640 case 0x21: /* mov from dr to reg */
5641 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5643 case 0x40 ... 0x4f: /* cmov */
5644 if (test_cc(ctxt->b, ctxt->eflags))
5645 ctxt->dst.val = ctxt->src.val;
5646 else if (ctxt->op_bytes != 4)
5647 ctxt->dst.type = OP_NONE; /* no writeback */
5649 case 0x80 ... 0x8f: /* jnz rel, etc*/
5650 if (test_cc(ctxt->b, ctxt->eflags))
5651 rc = jmp_rel(ctxt, ctxt->src.val);
5653 case 0x90 ... 0x9f: /* setcc r/m8 */
5654 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5656 case 0xb6 ... 0xb7: /* movzx */
5657 ctxt->dst.bytes = ctxt->op_bytes;
5658 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5659 : (u16) ctxt->src.val;
5661 case 0xbe ... 0xbf: /* movsx */
5662 ctxt->dst.bytes = ctxt->op_bytes;
5663 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5664 (s16) ctxt->src.val;
5667 goto cannot_emulate;
5672 if (rc != X86EMUL_CONTINUE)
5678 return EMULATION_FAILED;
5681 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5683 invalidate_registers(ctxt);
5686 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5688 writeback_registers(ctxt);
5691 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5693 if (ctxt->rep_prefix && (ctxt->d & String))
5696 if (ctxt->d & TwoMemOp)