1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
37 #define OpImplicit 1ull /* No generic decode */
38 #define OpReg 2ull /* Register */
39 #define OpMem 3ull /* Memory */
40 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI 5ull /* ES:DI/EDI/RDI */
42 #define OpMem64 6ull /* Memory, 64-bit */
43 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44 #define OpDX 8ull /* DX register */
45 #define OpCL 9ull /* CL register (for shifts) */
46 #define OpImmByte 10ull /* 8-bit sign extended immediate */
47 #define OpOne 11ull /* Implied 1 */
48 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
49 #define OpMem16 13ull /* Memory operand (16-bit). */
50 #define OpMem32 14ull /* Memory operand (32-bit). */
51 #define OpImmU 15ull /* Immediate operand, zero extended */
52 #define OpSI 16ull /* SI/ESI/RSI */
53 #define OpImmFAddr 17ull /* Immediate far address */
54 #define OpMemFAddr 18ull /* Far address in memory */
55 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
56 #define OpES 20ull /* ES */
57 #define OpCS 21ull /* CS */
58 #define OpSS 22ull /* SS */
59 #define OpDS 23ull /* DS */
60 #define OpFS 24ull /* FS */
61 #define OpGS 25ull /* GS */
62 #define OpMem8 26ull /* 8-bit zero extended memory operand */
63 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
68 #define OpBits 5 /* Width of operand field */
69 #define OpMask ((1ull << OpBits) - 1)
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp (1<<0) /* 8-bit operands. */
82 /* Destination operand type. */
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg (OpReg << DstShift)
86 #define DstMem (OpMem << DstShift)
87 #define DstAcc (OpAcc << DstShift)
88 #define DstDI (OpDI << DstShift)
89 #define DstMem64 (OpMem64 << DstShift)
90 #define DstMem16 (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX (OpDX << DstShift)
93 #define DstAccLo (OpAccLo << DstShift)
94 #define DstMask (OpMask << DstShift)
95 /* Source operand type. */
97 #define SrcNone (OpNone << SrcShift)
98 #define SrcReg (OpReg << SrcShift)
99 #define SrcMem (OpMem << SrcShift)
100 #define SrcMem16 (OpMem16 << SrcShift)
101 #define SrcMem32 (OpMem32 << SrcShift)
102 #define SrcImm (OpImm << SrcShift)
103 #define SrcImmByte (OpImmByte << SrcShift)
104 #define SrcOne (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU (OpImmU << SrcShift)
107 #define SrcSI (OpSI << SrcShift)
108 #define SrcXLat (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc (OpAcc << SrcShift)
112 #define SrcImmU16 (OpImmU16 << SrcShift)
113 #define SrcImm64 (OpImm64 << SrcShift)
114 #define SrcDX (OpDX << SrcShift)
115 #define SrcMem8 (OpMem8 << SrcShift)
116 #define SrcAccHi (OpAccHi << SrcShift)
117 #define SrcMask (OpMask << SrcShift)
118 #define BitOp (1<<11)
119 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
120 #define String (1<<13) /* String instruction (rep capable) */
121 #define Stack (1<<14) /* Stack instruction (push/pop) */
122 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape (5<<15) /* Escape to coprocessor instruction */
128 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
130 #define Sse (1<<18) /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM (1<<19)
133 /* Destination is only written; never read. */
136 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined (1<<25) /* No Such Instruction */
141 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
144 #define PageTable (1 << 29) /* instruction used to write page table */
145 #define NotImpl (1 << 30) /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift (31)
148 #define Src2None (OpNone << Src2Shift)
149 #define Src2Mem (OpMem << Src2Shift)
150 #define Src2CL (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One (OpOne << Src2Shift)
153 #define Src2Imm (OpImm << Src2Shift)
154 #define Src2ES (OpES << Src2Shift)
155 #define Src2CS (OpCS << Src2Shift)
156 #define Src2SS (OpSS << Src2Shift)
157 #define Src2DS (OpDS << Src2Shift)
158 #define Src2FS (OpFS << Src2Shift)
159 #define Src2GS (OpGS << Src2Shift)
160 #define Src2Mask (OpMask << Src2Shift)
161 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
162 #define AlignMask ((u64)7 << 41)
163 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
164 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
165 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
166 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
168 #define NoWrite ((u64)1 << 45) /* No writeback */
169 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
170 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
171 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
172 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
173 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
174 #define NearBranch ((u64)1 << 52) /* Near branches */
175 #define No16 ((u64)1 << 53) /* No 16 bit operand */
176 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
177 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
179 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
181 #define X2(x...) x, x
182 #define X3(x...) X2(x), x
183 #define X4(x...) X2(x), X2(x)
184 #define X5(x...) X4(x), x
185 #define X6(x...) X4(x), X2(x)
186 #define X7(x...) X4(x), X3(x)
187 #define X8(x...) X4(x), X4(x)
188 #define X16(x...) X8(x), X8(x)
190 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
191 #define FASTOP_SIZE 8
194 * fastop functions have a special calling convention:
199 * flags: rflags (in/out)
200 * ex: rsi (in:fastop pointer, out:zero if exception)
202 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
203 * different operand sizes can be reached by calculation, rather than a jump
204 * table (which would be bigger than the code).
206 * fastop functions are declared as taking a never-defined fastop parameter,
207 * so they can't be called from C directly.
216 int (*execute)(struct x86_emulate_ctxt *ctxt);
217 const struct opcode *group;
218 const struct group_dual *gdual;
219 const struct gprefix *gprefix;
220 const struct escape *esc;
221 const struct instr_dual *idual;
222 const struct mode_dual *mdual;
223 void (*fastop)(struct fastop *fake);
225 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
229 struct opcode mod012[8];
230 struct opcode mod3[8];
234 struct opcode pfx_no;
235 struct opcode pfx_66;
236 struct opcode pfx_f2;
237 struct opcode pfx_f3;
242 struct opcode high[64];
246 struct opcode mod012;
251 struct opcode mode32;
252 struct opcode mode64;
255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
257 enum x86_transfer_type {
259 X86_TRANSFER_CALL_JMP,
261 X86_TRANSFER_TASK_SWITCH,
264 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
266 if (!(ctxt->regs_valid & (1 << nr))) {
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
270 return ctxt->_regs[nr];
273 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
275 ctxt->regs_valid |= 1 << nr;
276 ctxt->regs_dirty |= 1 << nr;
277 return &ctxt->_regs[nr];
280 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
283 return reg_write(ctxt, nr);
286 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
290 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
291 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
294 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
296 ctxt->regs_dirty = 0;
297 ctxt->regs_valid = 0;
301 * These EFLAGS bits are restored from saved value during emulation, and
302 * any changes are written back to the saved value after emulation.
304 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
305 X86_EFLAGS_PF|X86_EFLAGS_CF)
313 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
315 #define FOP_FUNC(name) \
316 ".align " __stringify(FASTOP_SIZE) " \n\t" \
317 ".type " name ", @function \n\t" \
320 #define FOP_RET "ret \n\t"
322 #define FOP_START(op) \
323 extern void em_##op(struct fastop *fake); \
324 asm(".pushsection .text, \"ax\" \n\t" \
325 ".global em_" #op " \n\t" \
332 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
335 #define FOP1E(op, dst) \
336 FOP_FUNC(#op "_" #dst) \
337 "10: " #op " %" #dst " \n\t" FOP_RET
339 #define FOP1EEX(op, dst) \
340 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
342 #define FASTOP1(op) \
347 ON64(FOP1E(op##q, rax)) \
350 /* 1-operand, using src2 (for MUL/DIV r/m) */
351 #define FASTOP1SRC2(op, name) \
356 ON64(FOP1E(op, rcx)) \
359 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
360 #define FASTOP1SRC2EX(op, name) \
365 ON64(FOP1EEX(op, rcx)) \
368 #define FOP2E(op, dst, src) \
369 FOP_FUNC(#op "_" #dst "_" #src) \
370 #op " %" #src ", %" #dst " \n\t" FOP_RET
372 #define FASTOP2(op) \
374 FOP2E(op##b, al, dl) \
375 FOP2E(op##w, ax, dx) \
376 FOP2E(op##l, eax, edx) \
377 ON64(FOP2E(op##q, rax, rdx)) \
380 /* 2 operand, word only */
381 #define FASTOP2W(op) \
384 FOP2E(op##w, ax, dx) \
385 FOP2E(op##l, eax, edx) \
386 ON64(FOP2E(op##q, rax, rdx)) \
389 /* 2 operand, src is CL */
390 #define FASTOP2CL(op) \
392 FOP2E(op##b, al, cl) \
393 FOP2E(op##w, ax, cl) \
394 FOP2E(op##l, eax, cl) \
395 ON64(FOP2E(op##q, rax, cl)) \
398 /* 2 operand, src and dest are reversed */
399 #define FASTOP2R(op, name) \
401 FOP2E(op##b, dl, al) \
402 FOP2E(op##w, dx, ax) \
403 FOP2E(op##l, edx, eax) \
404 ON64(FOP2E(op##q, rdx, rax)) \
407 #define FOP3E(op, dst, src, src2) \
408 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
409 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
411 /* 3-operand, word-only, src2=cl */
412 #define FASTOP3WCL(op) \
415 FOP3E(op##w, ax, dx, cl) \
416 FOP3E(op##l, eax, edx, cl) \
417 ON64(FOP3E(op##q, rax, rdx, cl)) \
420 /* Special case for SETcc - 1 instruction per cc */
421 #define FOP_SETCC(op) \
423 ".type " #op ", @function \n\t" \
428 asm(".pushsection .fixup, \"ax\"\n"
429 ".global kvm_fastop_exception \n"
430 "kvm_fastop_exception: xor %esi, %esi; ret\n"
452 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
456 * XXX: inoutclob user must know where the argument is being expanded.
457 * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
459 #define asm_safe(insn, inoutclob...) \
463 asm volatile("1:" insn "\n" \
465 ".pushsection .fixup, \"ax\"\n" \
466 "3: movl $1, %[_fault]\n" \
469 _ASM_EXTABLE(1b, 3b) \
470 : [_fault] "+qm"(_fault) inoutclob ); \
472 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
475 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
476 enum x86_intercept intercept,
477 enum x86_intercept_stage stage)
479 struct x86_instruction_info info = {
480 .intercept = intercept,
481 .rep_prefix = ctxt->rep_prefix,
482 .modrm_mod = ctxt->modrm_mod,
483 .modrm_reg = ctxt->modrm_reg,
484 .modrm_rm = ctxt->modrm_rm,
485 .src_val = ctxt->src.val64,
486 .dst_val = ctxt->dst.val64,
487 .src_bytes = ctxt->src.bytes,
488 .dst_bytes = ctxt->dst.bytes,
489 .ad_bytes = ctxt->ad_bytes,
490 .next_rip = ctxt->eip,
493 return ctxt->ops->intercept(ctxt, &info, stage);
496 static void assign_masked(ulong *dest, ulong src, ulong mask)
498 *dest = (*dest & ~mask) | (src & mask);
501 static void assign_register(unsigned long *reg, u64 val, int bytes)
503 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
506 *(u8 *)reg = (u8)val;
509 *(u16 *)reg = (u16)val;
513 break; /* 64b: zero-extend */
520 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
522 return (1UL << (ctxt->ad_bytes << 3)) - 1;
525 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
528 struct desc_struct ss;
530 if (ctxt->mode == X86EMUL_MODE_PROT64)
532 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
533 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
536 static int stack_size(struct x86_emulate_ctxt *ctxt)
538 return (__fls(stack_mask(ctxt)) + 1) >> 3;
541 /* Access/update address held in a register, based on addressing mode. */
542 static inline unsigned long
543 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
545 if (ctxt->ad_bytes == sizeof(unsigned long))
548 return reg & ad_mask(ctxt);
551 static inline unsigned long
552 register_address(struct x86_emulate_ctxt *ctxt, int reg)
554 return address_mask(ctxt, reg_read(ctxt, reg));
557 static void masked_increment(ulong *reg, ulong mask, int inc)
559 assign_masked(reg, *reg + inc, mask);
563 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
565 ulong *preg = reg_rmw(ctxt, reg);
567 assign_register(preg, *preg + inc, ctxt->ad_bytes);
570 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
572 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
575 static u32 desc_limit_scaled(struct desc_struct *desc)
577 u32 limit = get_desc_limit(desc);
579 return desc->g ? (limit << 12) | 0xfff : limit;
582 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
584 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
587 return ctxt->ops->get_cached_segment_base(ctxt, seg);
590 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
591 u32 error, bool valid)
594 ctxt->exception.vector = vec;
595 ctxt->exception.error_code = error;
596 ctxt->exception.error_code_valid = valid;
597 return X86EMUL_PROPAGATE_FAULT;
600 static int emulate_db(struct x86_emulate_ctxt *ctxt)
602 return emulate_exception(ctxt, DB_VECTOR, 0, false);
605 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
607 return emulate_exception(ctxt, GP_VECTOR, err, true);
610 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
612 return emulate_exception(ctxt, SS_VECTOR, err, true);
615 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
617 return emulate_exception(ctxt, UD_VECTOR, 0, false);
620 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
622 return emulate_exception(ctxt, TS_VECTOR, err, true);
625 static int emulate_de(struct x86_emulate_ctxt *ctxt)
627 return emulate_exception(ctxt, DE_VECTOR, 0, false);
630 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
632 return emulate_exception(ctxt, NM_VECTOR, 0, false);
635 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
638 struct desc_struct desc;
640 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
644 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
649 struct desc_struct desc;
651 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
652 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
656 * x86 defines three classes of vector instructions: explicitly
657 * aligned, explicitly unaligned, and the rest, which change behaviour
658 * depending on whether they're AVX encoded or not.
660 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
661 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
662 * 512 bytes of data must be aligned to a 16 byte boundary.
664 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
666 u64 alignment = ctxt->d & AlignMask;
668 if (likely(size < 16))
683 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
684 struct segmented_address addr,
685 unsigned *max_size, unsigned size,
686 bool write, bool fetch,
687 enum x86emul_mode mode, ulong *linear)
689 struct desc_struct desc;
696 la = seg_base(ctxt, addr.seg) + addr.ea;
699 case X86EMUL_MODE_PROT64:
701 va_bits = ctxt_virt_addr_bits(ctxt);
702 if (get_canonical(la, va_bits) != la)
705 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
706 if (size > *max_size)
710 *linear = la = (u32)la;
711 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
715 /* code segment in protected mode or read-only data segment */
716 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
717 || !(desc.type & 2)) && write)
719 /* unreadable code segment */
720 if (!fetch && (desc.type & 8) && !(desc.type & 2))
722 lim = desc_limit_scaled(&desc);
723 if (!(desc.type & 8) && (desc.type & 4)) {
724 /* expand-down segment */
727 lim = desc.d ? 0xffffffff : 0xffff;
731 if (lim == 0xffffffff)
734 *max_size = (u64)lim + 1 - addr.ea;
735 if (size > *max_size)
740 if (la & (insn_alignment(ctxt, size) - 1))
741 return emulate_gp(ctxt, 0);
742 return X86EMUL_CONTINUE;
744 if (addr.seg == VCPU_SREG_SS)
745 return emulate_ss(ctxt, 0);
747 return emulate_gp(ctxt, 0);
750 static int linearize(struct x86_emulate_ctxt *ctxt,
751 struct segmented_address addr,
752 unsigned size, bool write,
756 return __linearize(ctxt, addr, &max_size, size, write, false,
760 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
761 enum x86emul_mode mode)
766 struct segmented_address addr = { .seg = VCPU_SREG_CS,
769 if (ctxt->op_bytes != sizeof(unsigned long))
770 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
771 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
772 if (rc == X86EMUL_CONTINUE)
773 ctxt->_eip = addr.ea;
777 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
779 return assign_eip(ctxt, dst, ctxt->mode);
782 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
783 const struct desc_struct *cs_desc)
785 enum x86emul_mode mode = ctxt->mode;
789 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
793 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
795 mode = X86EMUL_MODE_PROT64;
797 mode = X86EMUL_MODE_PROT32; /* temporary value */
800 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
801 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
802 rc = assign_eip(ctxt, dst, mode);
803 if (rc == X86EMUL_CONTINUE)
808 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
810 return assign_eip_near(ctxt, ctxt->_eip + rel);
813 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
814 struct segmented_address addr,
821 rc = linearize(ctxt, addr, size, false, &linear);
822 if (rc != X86EMUL_CONTINUE)
824 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
827 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
828 struct segmented_address addr,
835 rc = linearize(ctxt, addr, size, true, &linear);
836 if (rc != X86EMUL_CONTINUE)
838 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
842 * Prefetch the remaining bytes of the instruction without crossing page
843 * boundary if they are not in fetch_cache yet.
845 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
848 unsigned size, max_size;
849 unsigned long linear;
850 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
851 struct segmented_address addr = { .seg = VCPU_SREG_CS,
852 .ea = ctxt->eip + cur_size };
855 * We do not know exactly how many bytes will be needed, and
856 * __linearize is expensive, so fetch as much as possible. We
857 * just have to avoid going beyond the 15 byte limit, the end
858 * of the segment, or the end of the page.
860 * __linearize is called with size 0 so that it does not do any
861 * boundary check itself. Instead, we use max_size to check
864 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
866 if (unlikely(rc != X86EMUL_CONTINUE))
869 size = min_t(unsigned, 15UL ^ cur_size, max_size);
870 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
873 * One instruction can only straddle two pages,
874 * and one has been loaded at the beginning of
875 * x86_decode_insn. So, if not enough bytes
876 * still, we must have hit the 15-byte boundary.
878 if (unlikely(size < op_size))
879 return emulate_gp(ctxt, 0);
881 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
882 size, &ctxt->exception);
883 if (unlikely(rc != X86EMUL_CONTINUE))
885 ctxt->fetch.end += size;
886 return X86EMUL_CONTINUE;
889 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
892 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
894 if (unlikely(done_size < size))
895 return __do_insn_fetch_bytes(ctxt, size - done_size);
897 return X86EMUL_CONTINUE;
900 /* Fetch next part of the instruction being emulated. */
901 #define insn_fetch(_type, _ctxt) \
904 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
905 if (rc != X86EMUL_CONTINUE) \
907 ctxt->_eip += sizeof(_type); \
908 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
909 ctxt->fetch.ptr += sizeof(_type); \
913 #define insn_fetch_arr(_arr, _size, _ctxt) \
915 rc = do_insn_fetch_bytes(_ctxt, _size); \
916 if (rc != X86EMUL_CONTINUE) \
918 ctxt->_eip += (_size); \
919 memcpy(_arr, ctxt->fetch.ptr, _size); \
920 ctxt->fetch.ptr += (_size); \
924 * Given the 'reg' portion of a ModRM byte, and a register block, return a
925 * pointer into the block that addresses the relevant register.
926 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
928 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
932 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
934 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
935 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
937 p = reg_rmw(ctxt, modrm_reg);
941 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
942 struct segmented_address addr,
943 u16 *size, unsigned long *address, int op_bytes)
950 rc = segmented_read_std(ctxt, addr, size, 2);
951 if (rc != X86EMUL_CONTINUE)
954 rc = segmented_read_std(ctxt, addr, address, op_bytes);
968 FASTOP1SRC2(mul, mul_ex);
969 FASTOP1SRC2(imul, imul_ex);
970 FASTOP1SRC2EX(div, div_ex);
971 FASTOP1SRC2EX(idiv, idiv_ex);
1000 FASTOP2R(cmp, cmp_r);
1002 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1004 /* If src is zero, do not writeback, but update flags */
1005 if (ctxt->src.val == 0)
1006 ctxt->dst.type = OP_NONE;
1007 return fastop(ctxt, em_bsf);
1010 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1012 /* If src is zero, do not writeback, but update flags */
1013 if (ctxt->src.val == 0)
1014 ctxt->dst.type = OP_NONE;
1015 return fastop(ctxt, em_bsr);
1018 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1021 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1023 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1024 asm("push %[flags]; popf; call *%[fastop]"
1025 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
1029 static void fetch_register_operand(struct operand *op)
1031 switch (op->bytes) {
1033 op->val = *(u8 *)op->addr.reg;
1036 op->val = *(u16 *)op->addr.reg;
1039 op->val = *(u32 *)op->addr.reg;
1042 op->val = *(u64 *)op->addr.reg;
1047 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1050 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1051 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1052 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1053 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1054 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1055 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1056 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1057 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1058 #ifdef CONFIG_X86_64
1059 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1060 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1061 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1062 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1063 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1064 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1065 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1066 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1072 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1076 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1077 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1078 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1079 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1080 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1081 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1082 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1083 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1084 #ifdef CONFIG_X86_64
1085 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1086 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1087 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1088 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1089 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1090 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1091 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1092 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1098 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1101 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1102 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1103 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1104 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1105 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1106 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1107 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1108 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1113 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1116 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1117 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1118 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1119 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1120 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1121 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1122 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1123 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1128 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1130 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1131 return emulate_nm(ctxt);
1133 asm volatile("fninit");
1134 return X86EMUL_CONTINUE;
1137 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1141 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1142 return emulate_nm(ctxt);
1144 asm volatile("fnstcw %0": "+m"(fcw));
1146 ctxt->dst.val = fcw;
1148 return X86EMUL_CONTINUE;
1151 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1155 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1156 return emulate_nm(ctxt);
1158 asm volatile("fnstsw %0": "+m"(fsw));
1160 ctxt->dst.val = fsw;
1162 return X86EMUL_CONTINUE;
1165 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1168 unsigned reg = ctxt->modrm_reg;
1170 if (!(ctxt->d & ModRM))
1171 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1173 if (ctxt->d & Sse) {
1177 read_sse_reg(ctxt, &op->vec_val, reg);
1180 if (ctxt->d & Mmx) {
1189 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1190 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1192 fetch_register_operand(op);
1193 op->orig_val = op->val;
1196 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1198 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1199 ctxt->modrm_seg = VCPU_SREG_SS;
1202 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1206 int index_reg, base_reg, scale;
1207 int rc = X86EMUL_CONTINUE;
1210 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1211 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1212 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1214 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1215 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1216 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1217 ctxt->modrm_seg = VCPU_SREG_DS;
1219 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1221 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1222 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1224 if (ctxt->d & Sse) {
1227 op->addr.xmm = ctxt->modrm_rm;
1228 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1231 if (ctxt->d & Mmx) {
1234 op->addr.mm = ctxt->modrm_rm & 7;
1237 fetch_register_operand(op);
1243 if (ctxt->ad_bytes == 2) {
1244 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1245 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1246 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1247 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1249 /* 16-bit ModR/M decode. */
1250 switch (ctxt->modrm_mod) {
1252 if (ctxt->modrm_rm == 6)
1253 modrm_ea += insn_fetch(u16, ctxt);
1256 modrm_ea += insn_fetch(s8, ctxt);
1259 modrm_ea += insn_fetch(u16, ctxt);
1262 switch (ctxt->modrm_rm) {
1264 modrm_ea += bx + si;
1267 modrm_ea += bx + di;
1270 modrm_ea += bp + si;
1273 modrm_ea += bp + di;
1282 if (ctxt->modrm_mod != 0)
1289 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1290 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1291 ctxt->modrm_seg = VCPU_SREG_SS;
1292 modrm_ea = (u16)modrm_ea;
1294 /* 32/64-bit ModR/M decode. */
1295 if ((ctxt->modrm_rm & 7) == 4) {
1296 sib = insn_fetch(u8, ctxt);
1297 index_reg |= (sib >> 3) & 7;
1298 base_reg |= sib & 7;
1301 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1302 modrm_ea += insn_fetch(s32, ctxt);
1304 modrm_ea += reg_read(ctxt, base_reg);
1305 adjust_modrm_seg(ctxt, base_reg);
1306 /* Increment ESP on POP [ESP] */
1307 if ((ctxt->d & IncSP) &&
1308 base_reg == VCPU_REGS_RSP)
1309 modrm_ea += ctxt->op_bytes;
1312 modrm_ea += reg_read(ctxt, index_reg) << scale;
1313 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1314 modrm_ea += insn_fetch(s32, ctxt);
1315 if (ctxt->mode == X86EMUL_MODE_PROT64)
1316 ctxt->rip_relative = 1;
1318 base_reg = ctxt->modrm_rm;
1319 modrm_ea += reg_read(ctxt, base_reg);
1320 adjust_modrm_seg(ctxt, base_reg);
1322 switch (ctxt->modrm_mod) {
1324 modrm_ea += insn_fetch(s8, ctxt);
1327 modrm_ea += insn_fetch(s32, ctxt);
1331 op->addr.mem.ea = modrm_ea;
1332 if (ctxt->ad_bytes != 8)
1333 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1339 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1342 int rc = X86EMUL_CONTINUE;
1345 switch (ctxt->ad_bytes) {
1347 op->addr.mem.ea = insn_fetch(u16, ctxt);
1350 op->addr.mem.ea = insn_fetch(u32, ctxt);
1353 op->addr.mem.ea = insn_fetch(u64, ctxt);
1360 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1364 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1365 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1367 if (ctxt->src.bytes == 2)
1368 sv = (s16)ctxt->src.val & (s16)mask;
1369 else if (ctxt->src.bytes == 4)
1370 sv = (s32)ctxt->src.val & (s32)mask;
1372 sv = (s64)ctxt->src.val & (s64)mask;
1374 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1375 ctxt->dst.addr.mem.ea + (sv >> 3));
1378 /* only subword offset */
1379 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1382 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1383 unsigned long addr, void *dest, unsigned size)
1386 struct read_cache *mc = &ctxt->mem_read;
1388 if (mc->pos < mc->end)
1391 WARN_ON((mc->end + size) >= sizeof(mc->data));
1393 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1395 if (rc != X86EMUL_CONTINUE)
1401 memcpy(dest, mc->data + mc->pos, size);
1403 return X86EMUL_CONTINUE;
1406 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1407 struct segmented_address addr,
1414 rc = linearize(ctxt, addr, size, false, &linear);
1415 if (rc != X86EMUL_CONTINUE)
1417 return read_emulated(ctxt, linear, data, size);
1420 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1421 struct segmented_address addr,
1428 rc = linearize(ctxt, addr, size, true, &linear);
1429 if (rc != X86EMUL_CONTINUE)
1431 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1435 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1436 struct segmented_address addr,
1437 const void *orig_data, const void *data,
1443 rc = linearize(ctxt, addr, size, true, &linear);
1444 if (rc != X86EMUL_CONTINUE)
1446 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1447 size, &ctxt->exception);
1450 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1451 unsigned int size, unsigned short port,
1454 struct read_cache *rc = &ctxt->io_read;
1456 if (rc->pos == rc->end) { /* refill pio read ahead */
1457 unsigned int in_page, n;
1458 unsigned int count = ctxt->rep_prefix ?
1459 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1460 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1461 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1462 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1463 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1466 rc->pos = rc->end = 0;
1467 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1472 if (ctxt->rep_prefix && (ctxt->d & String) &&
1473 !(ctxt->eflags & X86_EFLAGS_DF)) {
1474 ctxt->dst.data = rc->data + rc->pos;
1475 ctxt->dst.type = OP_MEM_STR;
1476 ctxt->dst.count = (rc->end - rc->pos) / size;
1479 memcpy(dest, rc->data + rc->pos, size);
1485 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1486 u16 index, struct desc_struct *desc)
1491 ctxt->ops->get_idt(ctxt, &dt);
1493 if (dt.size < index * 8 + 7)
1494 return emulate_gp(ctxt, index << 3 | 0x2);
1496 addr = dt.address + index * 8;
1497 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1501 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1502 u16 selector, struct desc_ptr *dt)
1504 const struct x86_emulate_ops *ops = ctxt->ops;
1507 if (selector & 1 << 2) {
1508 struct desc_struct desc;
1511 memset (dt, 0, sizeof *dt);
1512 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1516 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1517 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1519 ops->get_gdt(ctxt, dt);
1522 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1523 u16 selector, ulong *desc_addr_p)
1526 u16 index = selector >> 3;
1529 get_descriptor_table_ptr(ctxt, selector, &dt);
1531 if (dt.size < index * 8 + 7)
1532 return emulate_gp(ctxt, selector & 0xfffc);
1534 addr = dt.address + index * 8;
1536 #ifdef CONFIG_X86_64
1537 if (addr >> 32 != 0) {
1540 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1541 if (!(efer & EFER_LMA))
1546 *desc_addr_p = addr;
1547 return X86EMUL_CONTINUE;
1550 /* allowed just for 8 bytes segments */
1551 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1552 u16 selector, struct desc_struct *desc,
1557 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1558 if (rc != X86EMUL_CONTINUE)
1561 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1565 /* allowed just for 8 bytes segments */
1566 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1567 u16 selector, struct desc_struct *desc)
1572 rc = get_descriptor_ptr(ctxt, selector, &addr);
1573 if (rc != X86EMUL_CONTINUE)
1576 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1580 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1581 u16 selector, int seg, u8 cpl,
1582 enum x86_transfer_type transfer,
1583 struct desc_struct *desc)
1585 struct desc_struct seg_desc, old_desc;
1587 unsigned err_vec = GP_VECTOR;
1589 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1595 memset(&seg_desc, 0, sizeof seg_desc);
1597 if (ctxt->mode == X86EMUL_MODE_REAL) {
1598 /* set real mode segment descriptor (keep limit etc. for
1600 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1601 set_desc_base(&seg_desc, selector << 4);
1603 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1604 /* VM86 needs a clean new segment descriptor */
1605 set_desc_base(&seg_desc, selector << 4);
1606 set_desc_limit(&seg_desc, 0xffff);
1616 /* TR should be in GDT only */
1617 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1620 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1621 if (null_selector) {
1622 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1625 if (seg == VCPU_SREG_SS) {
1626 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1630 * ctxt->ops->set_segment expects the CPL to be in
1631 * SS.DPL, so fake an expand-up 32-bit data segment.
1641 /* Skip all following checks */
1645 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1646 if (ret != X86EMUL_CONTINUE)
1649 err_code = selector & 0xfffc;
1650 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1653 /* can't load system descriptor into segment selector */
1654 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1655 if (transfer == X86_TRANSFER_CALL_JMP)
1656 return X86EMUL_UNHANDLEABLE;
1661 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1670 * segment is not a writable data segment or segment
1671 * selector's RPL != CPL or segment selector's RPL != CPL
1673 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1677 if (!(seg_desc.type & 8))
1680 if (seg_desc.type & 4) {
1686 if (rpl > cpl || dpl != cpl)
1689 /* in long-mode d/b must be clear if l is set */
1690 if (seg_desc.d && seg_desc.l) {
1693 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1694 if (efer & EFER_LMA)
1698 /* CS(RPL) <- CPL */
1699 selector = (selector & 0xfffc) | cpl;
1702 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1704 old_desc = seg_desc;
1705 seg_desc.type |= 2; /* busy */
1706 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1707 sizeof(seg_desc), &ctxt->exception);
1708 if (ret != X86EMUL_CONTINUE)
1711 case VCPU_SREG_LDTR:
1712 if (seg_desc.s || seg_desc.type != 2)
1715 default: /* DS, ES, FS, or GS */
1717 * segment is not a data or readable code segment or
1718 * ((segment is a data or nonconforming code segment)
1719 * and (both RPL and CPL > DPL))
1721 if ((seg_desc.type & 0xa) == 0x8 ||
1722 (((seg_desc.type & 0xc) != 0xc) &&
1723 (rpl > dpl && cpl > dpl)))
1729 /* mark segment as accessed */
1730 if (!(seg_desc.type & 1)) {
1732 ret = write_segment_descriptor(ctxt, selector,
1734 if (ret != X86EMUL_CONTINUE)
1737 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1738 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1739 sizeof(base3), &ctxt->exception);
1740 if (ret != X86EMUL_CONTINUE)
1742 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1743 ((u64)base3 << 32), ctxt))
1744 return emulate_gp(ctxt, 0);
1747 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1750 return X86EMUL_CONTINUE;
1752 return emulate_exception(ctxt, err_vec, err_code, true);
1755 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1756 u16 selector, int seg)
1758 u8 cpl = ctxt->ops->cpl(ctxt);
1761 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1762 * they can load it at CPL<3 (Intel's manual says only LSS can,
1765 * However, the Intel manual says that putting IST=1/DPL=3 in
1766 * an interrupt gate will result in SS=3 (the AMD manual instead
1767 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1768 * and only forbid it here.
1770 if (seg == VCPU_SREG_SS && selector == 3 &&
1771 ctxt->mode == X86EMUL_MODE_PROT64)
1772 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1774 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1775 X86_TRANSFER_NONE, NULL);
1778 static void write_register_operand(struct operand *op)
1780 return assign_register(op->addr.reg, op->val, op->bytes);
1783 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1787 write_register_operand(op);
1790 if (ctxt->lock_prefix)
1791 return segmented_cmpxchg(ctxt,
1797 return segmented_write(ctxt,
1803 return segmented_write(ctxt,
1806 op->bytes * op->count);
1809 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1812 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1820 return X86EMUL_CONTINUE;
1823 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1825 struct segmented_address addr;
1827 rsp_increment(ctxt, -bytes);
1828 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1829 addr.seg = VCPU_SREG_SS;
1831 return segmented_write(ctxt, addr, data, bytes);
1834 static int em_push(struct x86_emulate_ctxt *ctxt)
1836 /* Disable writeback. */
1837 ctxt->dst.type = OP_NONE;
1838 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1841 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1842 void *dest, int len)
1845 struct segmented_address addr;
1847 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1848 addr.seg = VCPU_SREG_SS;
1849 rc = segmented_read(ctxt, addr, dest, len);
1850 if (rc != X86EMUL_CONTINUE)
1853 rsp_increment(ctxt, len);
1857 static int em_pop(struct x86_emulate_ctxt *ctxt)
1859 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1862 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1863 void *dest, int len)
1866 unsigned long val, change_mask;
1867 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1868 int cpl = ctxt->ops->cpl(ctxt);
1870 rc = emulate_pop(ctxt, &val, len);
1871 if (rc != X86EMUL_CONTINUE)
1874 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1875 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1876 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1877 X86_EFLAGS_AC | X86_EFLAGS_ID;
1879 switch(ctxt->mode) {
1880 case X86EMUL_MODE_PROT64:
1881 case X86EMUL_MODE_PROT32:
1882 case X86EMUL_MODE_PROT16:
1884 change_mask |= X86_EFLAGS_IOPL;
1886 change_mask |= X86_EFLAGS_IF;
1888 case X86EMUL_MODE_VM86:
1890 return emulate_gp(ctxt, 0);
1891 change_mask |= X86_EFLAGS_IF;
1893 default: /* real mode */
1894 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1898 *(unsigned long *)dest =
1899 (ctxt->eflags & ~change_mask) | (val & change_mask);
1904 static int em_popf(struct x86_emulate_ctxt *ctxt)
1906 ctxt->dst.type = OP_REG;
1907 ctxt->dst.addr.reg = &ctxt->eflags;
1908 ctxt->dst.bytes = ctxt->op_bytes;
1909 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1912 static int em_enter(struct x86_emulate_ctxt *ctxt)
1915 unsigned frame_size = ctxt->src.val;
1916 unsigned nesting_level = ctxt->src2.val & 31;
1920 return X86EMUL_UNHANDLEABLE;
1922 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1923 rc = push(ctxt, &rbp, stack_size(ctxt));
1924 if (rc != X86EMUL_CONTINUE)
1926 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1928 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1929 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1931 return X86EMUL_CONTINUE;
1934 static int em_leave(struct x86_emulate_ctxt *ctxt)
1936 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1938 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1941 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1943 int seg = ctxt->src2.val;
1945 ctxt->src.val = get_segment_selector(ctxt, seg);
1946 if (ctxt->op_bytes == 4) {
1947 rsp_increment(ctxt, -2);
1951 return em_push(ctxt);
1954 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1956 int seg = ctxt->src2.val;
1957 unsigned long selector;
1960 rc = emulate_pop(ctxt, &selector, 2);
1961 if (rc != X86EMUL_CONTINUE)
1964 if (ctxt->modrm_reg == VCPU_SREG_SS)
1965 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1966 if (ctxt->op_bytes > 2)
1967 rsp_increment(ctxt, ctxt->op_bytes - 2);
1969 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1973 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1975 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1976 int rc = X86EMUL_CONTINUE;
1977 int reg = VCPU_REGS_RAX;
1979 while (reg <= VCPU_REGS_RDI) {
1980 (reg == VCPU_REGS_RSP) ?
1981 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1984 if (rc != X86EMUL_CONTINUE)
1993 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1995 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1996 return em_push(ctxt);
1999 static int em_popa(struct x86_emulate_ctxt *ctxt)
2001 int rc = X86EMUL_CONTINUE;
2002 int reg = VCPU_REGS_RDI;
2005 while (reg >= VCPU_REGS_RAX) {
2006 if (reg == VCPU_REGS_RSP) {
2007 rsp_increment(ctxt, ctxt->op_bytes);
2011 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2012 if (rc != X86EMUL_CONTINUE)
2014 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2020 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2022 const struct x86_emulate_ops *ops = ctxt->ops;
2029 /* TODO: Add limit checks */
2030 ctxt->src.val = ctxt->eflags;
2032 if (rc != X86EMUL_CONTINUE)
2035 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2037 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2039 if (rc != X86EMUL_CONTINUE)
2042 ctxt->src.val = ctxt->_eip;
2044 if (rc != X86EMUL_CONTINUE)
2047 ops->get_idt(ctxt, &dt);
2049 eip_addr = dt.address + (irq << 2);
2050 cs_addr = dt.address + (irq << 2) + 2;
2052 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
2053 if (rc != X86EMUL_CONTINUE)
2056 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
2057 if (rc != X86EMUL_CONTINUE)
2060 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2061 if (rc != X86EMUL_CONTINUE)
2069 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2073 invalidate_registers(ctxt);
2074 rc = __emulate_int_real(ctxt, irq);
2075 if (rc == X86EMUL_CONTINUE)
2076 writeback_registers(ctxt);
2080 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2082 switch(ctxt->mode) {
2083 case X86EMUL_MODE_REAL:
2084 return __emulate_int_real(ctxt, irq);
2085 case X86EMUL_MODE_VM86:
2086 case X86EMUL_MODE_PROT16:
2087 case X86EMUL_MODE_PROT32:
2088 case X86EMUL_MODE_PROT64:
2090 /* Protected mode interrupts unimplemented yet */
2091 return X86EMUL_UNHANDLEABLE;
2095 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2097 int rc = X86EMUL_CONTINUE;
2098 unsigned long temp_eip = 0;
2099 unsigned long temp_eflags = 0;
2100 unsigned long cs = 0;
2101 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2102 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2103 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2104 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2105 X86_EFLAGS_AC | X86_EFLAGS_ID |
2107 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2110 /* TODO: Add stack limit check */
2112 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2114 if (rc != X86EMUL_CONTINUE)
2117 if (temp_eip & ~0xffff)
2118 return emulate_gp(ctxt, 0);
2120 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2122 if (rc != X86EMUL_CONTINUE)
2125 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2127 if (rc != X86EMUL_CONTINUE)
2130 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2132 if (rc != X86EMUL_CONTINUE)
2135 ctxt->_eip = temp_eip;
2137 if (ctxt->op_bytes == 4)
2138 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2139 else if (ctxt->op_bytes == 2) {
2140 ctxt->eflags &= ~0xffff;
2141 ctxt->eflags |= temp_eflags;
2144 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2145 ctxt->eflags |= X86_EFLAGS_FIXED;
2146 ctxt->ops->set_nmi_mask(ctxt, false);
2151 static int em_iret(struct x86_emulate_ctxt *ctxt)
2153 switch(ctxt->mode) {
2154 case X86EMUL_MODE_REAL:
2155 return emulate_iret_real(ctxt);
2156 case X86EMUL_MODE_VM86:
2157 case X86EMUL_MODE_PROT16:
2158 case X86EMUL_MODE_PROT32:
2159 case X86EMUL_MODE_PROT64:
2161 /* iret from protected mode unimplemented yet */
2162 return X86EMUL_UNHANDLEABLE;
2166 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2170 struct desc_struct new_desc;
2171 u8 cpl = ctxt->ops->cpl(ctxt);
2173 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2175 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2176 X86_TRANSFER_CALL_JMP,
2178 if (rc != X86EMUL_CONTINUE)
2181 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2182 /* Error handling is not implemented. */
2183 if (rc != X86EMUL_CONTINUE)
2184 return X86EMUL_UNHANDLEABLE;
2189 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2191 return assign_eip_near(ctxt, ctxt->src.val);
2194 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2199 old_eip = ctxt->_eip;
2200 rc = assign_eip_near(ctxt, ctxt->src.val);
2201 if (rc != X86EMUL_CONTINUE)
2203 ctxt->src.val = old_eip;
2208 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2210 u64 old = ctxt->dst.orig_val64;
2212 if (ctxt->dst.bytes == 16)
2213 return X86EMUL_UNHANDLEABLE;
2215 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2216 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2217 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2218 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2219 ctxt->eflags &= ~X86_EFLAGS_ZF;
2221 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2222 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2224 ctxt->eflags |= X86_EFLAGS_ZF;
2226 return X86EMUL_CONTINUE;
2229 static int em_ret(struct x86_emulate_ctxt *ctxt)
2234 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2235 if (rc != X86EMUL_CONTINUE)
2238 return assign_eip_near(ctxt, eip);
2241 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2244 unsigned long eip, cs;
2245 int cpl = ctxt->ops->cpl(ctxt);
2246 struct desc_struct new_desc;
2248 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2249 if (rc != X86EMUL_CONTINUE)
2251 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2252 if (rc != X86EMUL_CONTINUE)
2254 /* Outer-privilege level return is not implemented */
2255 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2256 return X86EMUL_UNHANDLEABLE;
2257 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2260 if (rc != X86EMUL_CONTINUE)
2262 rc = assign_eip_far(ctxt, eip, &new_desc);
2263 /* Error handling is not implemented. */
2264 if (rc != X86EMUL_CONTINUE)
2265 return X86EMUL_UNHANDLEABLE;
2270 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2274 rc = em_ret_far(ctxt);
2275 if (rc != X86EMUL_CONTINUE)
2277 rsp_increment(ctxt, ctxt->src.val);
2278 return X86EMUL_CONTINUE;
2281 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2283 /* Save real source value, then compare EAX against destination. */
2284 ctxt->dst.orig_val = ctxt->dst.val;
2285 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2286 ctxt->src.orig_val = ctxt->src.val;
2287 ctxt->src.val = ctxt->dst.orig_val;
2288 fastop(ctxt, em_cmp);
2290 if (ctxt->eflags & X86_EFLAGS_ZF) {
2291 /* Success: write back to memory; no update of EAX */
2292 ctxt->src.type = OP_NONE;
2293 ctxt->dst.val = ctxt->src.orig_val;
2295 /* Failure: write the value we saw to EAX. */
2296 ctxt->src.type = OP_REG;
2297 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2298 ctxt->src.val = ctxt->dst.orig_val;
2299 /* Create write-cycle to dest by writing the same value */
2300 ctxt->dst.val = ctxt->dst.orig_val;
2302 return X86EMUL_CONTINUE;
2305 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2307 int seg = ctxt->src2.val;
2311 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2313 rc = load_segment_descriptor(ctxt, sel, seg);
2314 if (rc != X86EMUL_CONTINUE)
2317 ctxt->dst.val = ctxt->src.val;
2321 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2323 u32 eax, ebx, ecx, edx;
2327 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2328 return edx & bit(X86_FEATURE_LM);
2331 #define GET_SMSTATE(type, smbase, offset) \
2334 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2336 if (r != X86EMUL_CONTINUE) \
2337 return X86EMUL_UNHANDLEABLE; \
2341 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2343 desc->g = (flags >> 23) & 1;
2344 desc->d = (flags >> 22) & 1;
2345 desc->l = (flags >> 21) & 1;
2346 desc->avl = (flags >> 20) & 1;
2347 desc->p = (flags >> 15) & 1;
2348 desc->dpl = (flags >> 13) & 3;
2349 desc->s = (flags >> 12) & 1;
2350 desc->type = (flags >> 8) & 15;
2353 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2355 struct desc_struct desc;
2359 selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2362 offset = 0x7f84 + n * 12;
2364 offset = 0x7f2c + (n - 3) * 12;
2366 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2367 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2368 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2369 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2370 return X86EMUL_CONTINUE;
2373 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2375 struct desc_struct desc;
2380 offset = 0x7e00 + n * 16;
2382 selector = GET_SMSTATE(u16, smbase, offset);
2383 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2384 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2385 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2386 base3 = GET_SMSTATE(u32, smbase, offset + 12);
2388 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2389 return X86EMUL_CONTINUE;
2392 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2398 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2399 * Then enable protected mode. However, PCID cannot be enabled
2400 * if EFER.LMA=0, so set it separately.
2402 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2404 return X86EMUL_UNHANDLEABLE;
2406 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2408 return X86EMUL_UNHANDLEABLE;
2410 if (cr4 & X86_CR4_PCIDE) {
2411 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2413 return X86EMUL_UNHANDLEABLE;
2416 return X86EMUL_CONTINUE;
2419 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2421 struct desc_struct desc;
2427 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
2428 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
2429 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2430 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
2432 for (i = 0; i < 8; i++)
2433 *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2435 val = GET_SMSTATE(u32, smbase, 0x7fcc);
2436 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2437 val = GET_SMSTATE(u32, smbase, 0x7fc8);
2438 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2440 selector = GET_SMSTATE(u32, smbase, 0x7fc4);
2441 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
2442 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
2443 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
2444 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2446 selector = GET_SMSTATE(u32, smbase, 0x7fc0);
2447 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
2448 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
2449 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
2450 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2452 dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
2453 dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
2454 ctxt->ops->set_gdt(ctxt, &dt);
2456 dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
2457 dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
2458 ctxt->ops->set_idt(ctxt, &dt);
2460 for (i = 0; i < 6; i++) {
2461 int r = rsm_load_seg_32(ctxt, smbase, i);
2462 if (r != X86EMUL_CONTINUE)
2466 cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2468 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2470 return rsm_enter_protected_mode(ctxt, cr0, cr4);
2473 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2475 struct desc_struct desc;
2482 for (i = 0; i < 16; i++)
2483 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2485 ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
2486 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2488 val = GET_SMSTATE(u32, smbase, 0x7f68);
2489 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2490 val = GET_SMSTATE(u32, smbase, 0x7f60);
2491 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2493 cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
2494 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
2495 cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
2496 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2497 val = GET_SMSTATE(u64, smbase, 0x7ed0);
2498 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2500 selector = GET_SMSTATE(u32, smbase, 0x7e90);
2501 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2502 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
2503 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
2504 base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
2505 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2507 dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
2508 dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
2509 ctxt->ops->set_idt(ctxt, &dt);
2511 selector = GET_SMSTATE(u32, smbase, 0x7e70);
2512 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2513 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
2514 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
2515 base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
2516 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2518 dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
2519 dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
2520 ctxt->ops->set_gdt(ctxt, &dt);
2522 r = rsm_enter_protected_mode(ctxt, cr0, cr4);
2523 if (r != X86EMUL_CONTINUE)
2526 for (i = 0; i < 6; i++) {
2527 r = rsm_load_seg_64(ctxt, smbase, i);
2528 if (r != X86EMUL_CONTINUE)
2532 return X86EMUL_CONTINUE;
2535 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2537 unsigned long cr0, cr4, efer;
2541 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2542 return emulate_ud(ctxt);
2545 * Get back to real mode, to prepare a safe state in which to load
2546 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2547 * supports long mode.
2549 cr4 = ctxt->ops->get_cr(ctxt, 4);
2550 if (emulator_has_longmode(ctxt)) {
2551 struct desc_struct cs_desc;
2553 /* Zero CR4.PCIDE before CR0.PG. */
2554 if (cr4 & X86_CR4_PCIDE) {
2555 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2556 cr4 &= ~X86_CR4_PCIDE;
2559 /* A 32-bit code segment is required to clear EFER.LMA. */
2560 memset(&cs_desc, 0, sizeof(cs_desc));
2562 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2563 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2566 /* For the 64-bit case, this will clear EFER.LMA. */
2567 cr0 = ctxt->ops->get_cr(ctxt, 0);
2568 if (cr0 & X86_CR0_PE)
2569 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2571 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
2572 if (cr4 & X86_CR4_PAE)
2573 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2575 /* And finally go back to 32-bit mode. */
2577 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2579 smbase = ctxt->ops->get_smbase(ctxt);
2582 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2583 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2586 if (ctxt->ops->pre_leave_smm(ctxt, smbase))
2587 return X86EMUL_UNHANDLEABLE;
2589 if (emulator_has_longmode(ctxt))
2590 ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2592 ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2594 if (ret != X86EMUL_CONTINUE) {
2595 /* FIXME: should triple fault */
2596 return X86EMUL_UNHANDLEABLE;
2599 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2600 ctxt->ops->set_nmi_mask(ctxt, false);
2602 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2603 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2604 return X86EMUL_CONTINUE;
2608 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2609 struct desc_struct *cs, struct desc_struct *ss)
2611 cs->l = 0; /* will be adjusted later */
2612 set_desc_base(cs, 0); /* flat segment */
2613 cs->g = 1; /* 4kb granularity */
2614 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2615 cs->type = 0x0b; /* Read, Execute, Accessed */
2617 cs->dpl = 0; /* will be adjusted later */
2622 set_desc_base(ss, 0); /* flat segment */
2623 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2624 ss->g = 1; /* 4kb granularity */
2626 ss->type = 0x03; /* Read/Write, Accessed */
2627 ss->d = 1; /* 32bit stack segment */
2634 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2636 u32 eax, ebx, ecx, edx;
2639 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2640 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2641 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2642 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2645 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2647 const struct x86_emulate_ops *ops = ctxt->ops;
2648 u32 eax, ebx, ecx, edx;
2651 * syscall should always be enabled in longmode - so only become
2652 * vendor specific (cpuid) if other modes are active...
2654 if (ctxt->mode == X86EMUL_MODE_PROT64)
2659 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2661 * Intel ("GenuineIntel")
2662 * remark: Intel CPUs only support "syscall" in 64bit
2663 * longmode. Also an 64bit guest with a
2664 * 32bit compat-app running will #UD !! While this
2665 * behaviour can be fixed (by emulating) into AMD
2666 * response - CPUs of AMD can't behave like Intel.
2668 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2669 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2670 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2673 /* AMD ("AuthenticAMD") */
2674 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2675 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2676 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2679 /* AMD ("AMDisbetter!") */
2680 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2681 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2682 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2685 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2689 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2691 const struct x86_emulate_ops *ops = ctxt->ops;
2692 struct desc_struct cs, ss;
2697 /* syscall is not available in real mode */
2698 if (ctxt->mode == X86EMUL_MODE_REAL ||
2699 ctxt->mode == X86EMUL_MODE_VM86)
2700 return emulate_ud(ctxt);
2702 if (!(em_syscall_is_enabled(ctxt)))
2703 return emulate_ud(ctxt);
2705 ops->get_msr(ctxt, MSR_EFER, &efer);
2706 setup_syscalls_segments(ctxt, &cs, &ss);
2708 if (!(efer & EFER_SCE))
2709 return emulate_ud(ctxt);
2711 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2713 cs_sel = (u16)(msr_data & 0xfffc);
2714 ss_sel = (u16)(msr_data + 8);
2716 if (efer & EFER_LMA) {
2720 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2721 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2723 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2724 if (efer & EFER_LMA) {
2725 #ifdef CONFIG_X86_64
2726 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2729 ctxt->mode == X86EMUL_MODE_PROT64 ?
2730 MSR_LSTAR : MSR_CSTAR, &msr_data);
2731 ctxt->_eip = msr_data;
2733 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2734 ctxt->eflags &= ~msr_data;
2735 ctxt->eflags |= X86_EFLAGS_FIXED;
2739 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2740 ctxt->_eip = (u32)msr_data;
2742 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2745 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2746 return X86EMUL_CONTINUE;
2749 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2751 const struct x86_emulate_ops *ops = ctxt->ops;
2752 struct desc_struct cs, ss;
2757 ops->get_msr(ctxt, MSR_EFER, &efer);
2758 /* inject #GP if in real mode */
2759 if (ctxt->mode == X86EMUL_MODE_REAL)
2760 return emulate_gp(ctxt, 0);
2763 * Not recognized on AMD in compat mode (but is recognized in legacy
2766 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2767 && !vendor_intel(ctxt))
2768 return emulate_ud(ctxt);
2770 /* sysenter/sysexit have not been tested in 64bit mode. */
2771 if (ctxt->mode == X86EMUL_MODE_PROT64)
2772 return X86EMUL_UNHANDLEABLE;
2774 setup_syscalls_segments(ctxt, &cs, &ss);
2776 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2777 if ((msr_data & 0xfffc) == 0x0)
2778 return emulate_gp(ctxt, 0);
2780 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2781 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2782 ss_sel = cs_sel + 8;
2783 if (efer & EFER_LMA) {
2788 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2789 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2791 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2792 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2794 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2795 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2798 return X86EMUL_CONTINUE;
2801 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2803 const struct x86_emulate_ops *ops = ctxt->ops;
2804 struct desc_struct cs, ss;
2805 u64 msr_data, rcx, rdx;
2807 u16 cs_sel = 0, ss_sel = 0;
2809 /* inject #GP if in real mode or Virtual 8086 mode */
2810 if (ctxt->mode == X86EMUL_MODE_REAL ||
2811 ctxt->mode == X86EMUL_MODE_VM86)
2812 return emulate_gp(ctxt, 0);
2814 setup_syscalls_segments(ctxt, &cs, &ss);
2816 if ((ctxt->rex_prefix & 0x8) != 0x0)
2817 usermode = X86EMUL_MODE_PROT64;
2819 usermode = X86EMUL_MODE_PROT32;
2821 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2822 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2826 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2828 case X86EMUL_MODE_PROT32:
2829 cs_sel = (u16)(msr_data + 16);
2830 if ((msr_data & 0xfffc) == 0x0)
2831 return emulate_gp(ctxt, 0);
2832 ss_sel = (u16)(msr_data + 24);
2836 case X86EMUL_MODE_PROT64:
2837 cs_sel = (u16)(msr_data + 32);
2838 if (msr_data == 0x0)
2839 return emulate_gp(ctxt, 0);
2840 ss_sel = cs_sel + 8;
2843 if (emul_is_noncanonical_address(rcx, ctxt) ||
2844 emul_is_noncanonical_address(rdx, ctxt))
2845 return emulate_gp(ctxt, 0);
2848 cs_sel |= SEGMENT_RPL_MASK;
2849 ss_sel |= SEGMENT_RPL_MASK;
2851 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2852 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2855 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2857 return X86EMUL_CONTINUE;
2860 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2863 if (ctxt->mode == X86EMUL_MODE_REAL)
2865 if (ctxt->mode == X86EMUL_MODE_VM86)
2867 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2868 return ctxt->ops->cpl(ctxt) > iopl;
2871 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2874 const struct x86_emulate_ops *ops = ctxt->ops;
2875 struct desc_struct tr_seg;
2878 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2879 unsigned mask = (1 << len) - 1;
2882 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2885 if (desc_limit_scaled(&tr_seg) < 103)
2887 base = get_desc_base(&tr_seg);
2888 #ifdef CONFIG_X86_64
2889 base |= ((u64)base3) << 32;
2891 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2892 if (r != X86EMUL_CONTINUE)
2894 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2896 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2897 if (r != X86EMUL_CONTINUE)
2899 if ((perm >> bit_idx) & mask)
2904 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2910 if (emulator_bad_iopl(ctxt))
2911 if (!emulator_io_port_access_allowed(ctxt, port, len))
2914 ctxt->perm_ok = true;
2919 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2922 * Intel CPUs mask the counter and pointers in quite strange
2923 * manner when ECX is zero due to REP-string optimizations.
2925 #ifdef CONFIG_X86_64
2926 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2929 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2932 case 0xa4: /* movsb */
2933 case 0xa5: /* movsd/w */
2934 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2936 case 0xaa: /* stosb */
2937 case 0xab: /* stosd/w */
2938 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2943 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2944 struct tss_segment_16 *tss)
2946 tss->ip = ctxt->_eip;
2947 tss->flag = ctxt->eflags;
2948 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2949 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2950 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2951 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2952 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2953 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2954 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2955 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2957 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2958 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2959 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2960 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2961 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2964 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2965 struct tss_segment_16 *tss)
2970 ctxt->_eip = tss->ip;
2971 ctxt->eflags = tss->flag | 2;
2972 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2973 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2974 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2975 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2976 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2977 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2978 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2979 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2982 * SDM says that segment selectors are loaded before segment
2985 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2986 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2987 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2988 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2989 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2994 * Now load segment descriptors. If fault happens at this stage
2995 * it is handled in a context of new task
2997 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2998 X86_TRANSFER_TASK_SWITCH, NULL);
2999 if (ret != X86EMUL_CONTINUE)
3001 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3002 X86_TRANSFER_TASK_SWITCH, NULL);
3003 if (ret != X86EMUL_CONTINUE)
3005 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3006 X86_TRANSFER_TASK_SWITCH, NULL);
3007 if (ret != X86EMUL_CONTINUE)
3009 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3010 X86_TRANSFER_TASK_SWITCH, NULL);
3011 if (ret != X86EMUL_CONTINUE)
3013 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3014 X86_TRANSFER_TASK_SWITCH, NULL);
3015 if (ret != X86EMUL_CONTINUE)
3018 return X86EMUL_CONTINUE;
3021 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3022 u16 tss_selector, u16 old_tss_sel,
3023 ulong old_tss_base, struct desc_struct *new_desc)
3025 const struct x86_emulate_ops *ops = ctxt->ops;
3026 struct tss_segment_16 tss_seg;
3028 u32 new_tss_base = get_desc_base(new_desc);
3030 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3032 if (ret != X86EMUL_CONTINUE)
3035 save_state_to_tss16(ctxt, &tss_seg);
3037 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3039 if (ret != X86EMUL_CONTINUE)
3042 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3044 if (ret != X86EMUL_CONTINUE)
3047 if (old_tss_sel != 0xffff) {
3048 tss_seg.prev_task_link = old_tss_sel;
3050 ret = ops->write_std(ctxt, new_tss_base,
3051 &tss_seg.prev_task_link,
3052 sizeof tss_seg.prev_task_link,
3054 if (ret != X86EMUL_CONTINUE)
3058 return load_state_from_tss16(ctxt, &tss_seg);
3061 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3062 struct tss_segment_32 *tss)
3064 /* CR3 and ldt selector are not saved intentionally */
3065 tss->eip = ctxt->_eip;
3066 tss->eflags = ctxt->eflags;
3067 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3068 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3069 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3070 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3071 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3072 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3073 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3074 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3076 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3077 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3078 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3079 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3080 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3081 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3084 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3085 struct tss_segment_32 *tss)
3090 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3091 return emulate_gp(ctxt, 0);
3092 ctxt->_eip = tss->eip;
3093 ctxt->eflags = tss->eflags | 2;
3095 /* General purpose registers */
3096 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3097 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3098 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3099 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3100 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3101 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3102 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3103 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3106 * SDM says that segment selectors are loaded before segment
3107 * descriptors. This is important because CPL checks will
3110 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3111 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3112 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3113 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3114 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3115 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3116 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3119 * If we're switching between Protected Mode and VM86, we need to make
3120 * sure to update the mode before loading the segment descriptors so
3121 * that the selectors are interpreted correctly.
3123 if (ctxt->eflags & X86_EFLAGS_VM) {
3124 ctxt->mode = X86EMUL_MODE_VM86;
3127 ctxt->mode = X86EMUL_MODE_PROT32;
3132 * Now load segment descriptors. If fault happenes at this stage
3133 * it is handled in a context of new task
3135 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3136 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3137 if (ret != X86EMUL_CONTINUE)
3139 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3140 X86_TRANSFER_TASK_SWITCH, NULL);
3141 if (ret != X86EMUL_CONTINUE)
3143 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3144 X86_TRANSFER_TASK_SWITCH, NULL);
3145 if (ret != X86EMUL_CONTINUE)
3147 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3148 X86_TRANSFER_TASK_SWITCH, NULL);
3149 if (ret != X86EMUL_CONTINUE)
3151 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3152 X86_TRANSFER_TASK_SWITCH, NULL);
3153 if (ret != X86EMUL_CONTINUE)
3155 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3156 X86_TRANSFER_TASK_SWITCH, NULL);
3157 if (ret != X86EMUL_CONTINUE)
3159 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3160 X86_TRANSFER_TASK_SWITCH, NULL);
3165 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3166 u16 tss_selector, u16 old_tss_sel,
3167 ulong old_tss_base, struct desc_struct *new_desc)
3169 const struct x86_emulate_ops *ops = ctxt->ops;
3170 struct tss_segment_32 tss_seg;
3172 u32 new_tss_base = get_desc_base(new_desc);
3173 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3174 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3176 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3178 if (ret != X86EMUL_CONTINUE)
3181 save_state_to_tss32(ctxt, &tss_seg);
3183 /* Only GP registers and segment selectors are saved */
3184 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3185 ldt_sel_offset - eip_offset, &ctxt->exception);
3186 if (ret != X86EMUL_CONTINUE)
3189 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3191 if (ret != X86EMUL_CONTINUE)
3194 if (old_tss_sel != 0xffff) {
3195 tss_seg.prev_task_link = old_tss_sel;
3197 ret = ops->write_std(ctxt, new_tss_base,
3198 &tss_seg.prev_task_link,
3199 sizeof tss_seg.prev_task_link,
3201 if (ret != X86EMUL_CONTINUE)
3205 return load_state_from_tss32(ctxt, &tss_seg);
3208 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3209 u16 tss_selector, int idt_index, int reason,
3210 bool has_error_code, u32 error_code)
3212 const struct x86_emulate_ops *ops = ctxt->ops;
3213 struct desc_struct curr_tss_desc, next_tss_desc;
3215 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3216 ulong old_tss_base =
3217 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3219 ulong desc_addr, dr7;
3221 /* FIXME: old_tss_base == ~0 ? */
3223 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3224 if (ret != X86EMUL_CONTINUE)
3226 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3227 if (ret != X86EMUL_CONTINUE)
3230 /* FIXME: check that next_tss_desc is tss */
3233 * Check privileges. The three cases are task switch caused by...
3235 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3236 * 2. Exception/IRQ/iret: No check is performed
3237 * 3. jmp/call to TSS/task-gate: No check is performed since the
3238 * hardware checks it before exiting.
3240 if (reason == TASK_SWITCH_GATE) {
3241 if (idt_index != -1) {
3242 /* Software interrupts */
3243 struct desc_struct task_gate_desc;
3246 ret = read_interrupt_descriptor(ctxt, idt_index,
3248 if (ret != X86EMUL_CONTINUE)
3251 dpl = task_gate_desc.dpl;
3252 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3253 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3257 desc_limit = desc_limit_scaled(&next_tss_desc);
3258 if (!next_tss_desc.p ||
3259 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3260 desc_limit < 0x2b)) {
3261 return emulate_ts(ctxt, tss_selector & 0xfffc);
3264 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3265 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3266 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3269 if (reason == TASK_SWITCH_IRET)
3270 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3272 /* set back link to prev task only if NT bit is set in eflags
3273 note that old_tss_sel is not used after this point */
3274 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3275 old_tss_sel = 0xffff;
3277 if (next_tss_desc.type & 8)
3278 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3279 old_tss_base, &next_tss_desc);
3281 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3282 old_tss_base, &next_tss_desc);
3283 if (ret != X86EMUL_CONTINUE)
3286 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3287 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3289 if (reason != TASK_SWITCH_IRET) {
3290 next_tss_desc.type |= (1 << 1); /* set busy flag */
3291 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3294 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3295 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3297 if (has_error_code) {
3298 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3299 ctxt->lock_prefix = 0;
3300 ctxt->src.val = (unsigned long) error_code;
3301 ret = em_push(ctxt);
3304 ops->get_dr(ctxt, 7, &dr7);
3305 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3310 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3311 u16 tss_selector, int idt_index, int reason,
3312 bool has_error_code, u32 error_code)
3316 invalidate_registers(ctxt);
3317 ctxt->_eip = ctxt->eip;
3318 ctxt->dst.type = OP_NONE;
3320 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3321 has_error_code, error_code);
3323 if (rc == X86EMUL_CONTINUE) {
3324 ctxt->eip = ctxt->_eip;
3325 writeback_registers(ctxt);
3328 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3331 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3334 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3336 register_address_increment(ctxt, reg, df * op->bytes);
3337 op->addr.mem.ea = register_address(ctxt, reg);
3340 static int em_das(struct x86_emulate_ctxt *ctxt)
3343 bool af, cf, old_cf;
3345 cf = ctxt->eflags & X86_EFLAGS_CF;
3351 af = ctxt->eflags & X86_EFLAGS_AF;
3352 if ((al & 0x0f) > 9 || af) {
3354 cf = old_cf | (al >= 250);
3359 if (old_al > 0x99 || old_cf) {
3365 /* Set PF, ZF, SF */
3366 ctxt->src.type = OP_IMM;
3368 ctxt->src.bytes = 1;
3369 fastop(ctxt, em_or);
3370 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3372 ctxt->eflags |= X86_EFLAGS_CF;
3374 ctxt->eflags |= X86_EFLAGS_AF;
3375 return X86EMUL_CONTINUE;
3378 static int em_aam(struct x86_emulate_ctxt *ctxt)
3382 if (ctxt->src.val == 0)
3383 return emulate_de(ctxt);
3385 al = ctxt->dst.val & 0xff;
3386 ah = al / ctxt->src.val;
3387 al %= ctxt->src.val;
3389 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3391 /* Set PF, ZF, SF */
3392 ctxt->src.type = OP_IMM;
3394 ctxt->src.bytes = 1;
3395 fastop(ctxt, em_or);
3397 return X86EMUL_CONTINUE;
3400 static int em_aad(struct x86_emulate_ctxt *ctxt)
3402 u8 al = ctxt->dst.val & 0xff;
3403 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3405 al = (al + (ah * ctxt->src.val)) & 0xff;
3407 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3409 /* Set PF, ZF, SF */
3410 ctxt->src.type = OP_IMM;
3412 ctxt->src.bytes = 1;
3413 fastop(ctxt, em_or);
3415 return X86EMUL_CONTINUE;
3418 static int em_call(struct x86_emulate_ctxt *ctxt)
3421 long rel = ctxt->src.val;
3423 ctxt->src.val = (unsigned long)ctxt->_eip;
3424 rc = jmp_rel(ctxt, rel);
3425 if (rc != X86EMUL_CONTINUE)
3427 return em_push(ctxt);
3430 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3435 struct desc_struct old_desc, new_desc;
3436 const struct x86_emulate_ops *ops = ctxt->ops;
3437 int cpl = ctxt->ops->cpl(ctxt);
3438 enum x86emul_mode prev_mode = ctxt->mode;
3440 old_eip = ctxt->_eip;
3441 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3443 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3444 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3445 X86_TRANSFER_CALL_JMP, &new_desc);
3446 if (rc != X86EMUL_CONTINUE)
3449 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3450 if (rc != X86EMUL_CONTINUE)
3453 ctxt->src.val = old_cs;
3455 if (rc != X86EMUL_CONTINUE)
3458 ctxt->src.val = old_eip;
3460 /* If we failed, we tainted the memory, but the very least we should
3462 if (rc != X86EMUL_CONTINUE) {
3463 pr_warn_once("faulting far call emulation tainted memory\n");
3468 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3469 ctxt->mode = prev_mode;
3474 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3479 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3480 if (rc != X86EMUL_CONTINUE)
3482 rc = assign_eip_near(ctxt, eip);
3483 if (rc != X86EMUL_CONTINUE)
3485 rsp_increment(ctxt, ctxt->src.val);
3486 return X86EMUL_CONTINUE;
3489 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3491 /* Write back the register source. */
3492 ctxt->src.val = ctxt->dst.val;
3493 write_register_operand(&ctxt->src);
3495 /* Write back the memory destination with implicit LOCK prefix. */
3496 ctxt->dst.val = ctxt->src.orig_val;
3497 ctxt->lock_prefix = 1;
3498 return X86EMUL_CONTINUE;
3501 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3503 ctxt->dst.val = ctxt->src2.val;
3504 return fastop(ctxt, em_imul);
3507 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3509 ctxt->dst.type = OP_REG;
3510 ctxt->dst.bytes = ctxt->src.bytes;
3511 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3512 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3514 return X86EMUL_CONTINUE;
3517 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3521 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3522 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3523 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3524 return X86EMUL_CONTINUE;
3527 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3531 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3532 return emulate_gp(ctxt, 0);
3533 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3534 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3535 return X86EMUL_CONTINUE;
3538 static int em_mov(struct x86_emulate_ctxt *ctxt)
3540 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3541 return X86EMUL_CONTINUE;
3544 #define FFL(x) bit(X86_FEATURE_##x)
3546 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3548 u32 ebx, ecx, edx, eax = 1;
3552 * Check MOVBE is set in the guest-visible CPUID leaf.
3554 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3555 if (!(ecx & FFL(MOVBE)))
3556 return emulate_ud(ctxt);
3558 switch (ctxt->op_bytes) {
3561 * From MOVBE definition: "...When the operand size is 16 bits,
3562 * the upper word of the destination register remains unchanged
3565 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3566 * rules so we have to do the operation almost per hand.
3568 tmp = (u16)ctxt->src.val;
3569 ctxt->dst.val &= ~0xffffUL;
3570 ctxt->dst.val |= (unsigned long)swab16(tmp);
3573 ctxt->dst.val = swab32((u32)ctxt->src.val);
3576 ctxt->dst.val = swab64(ctxt->src.val);
3581 return X86EMUL_CONTINUE;
3584 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3586 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3587 return emulate_gp(ctxt, 0);
3589 /* Disable writeback. */
3590 ctxt->dst.type = OP_NONE;
3591 return X86EMUL_CONTINUE;
3594 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3598 if (ctxt->mode == X86EMUL_MODE_PROT64)
3599 val = ctxt->src.val & ~0ULL;
3601 val = ctxt->src.val & ~0U;
3603 /* #UD condition is already handled. */
3604 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3605 return emulate_gp(ctxt, 0);
3607 /* Disable writeback. */
3608 ctxt->dst.type = OP_NONE;
3609 return X86EMUL_CONTINUE;
3612 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3616 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3617 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3618 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3619 return emulate_gp(ctxt, 0);
3621 return X86EMUL_CONTINUE;
3624 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3628 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3629 return emulate_gp(ctxt, 0);
3631 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3632 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3633 return X86EMUL_CONTINUE;
3636 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3638 if (ctxt->modrm_reg > VCPU_SREG_GS)
3639 return emulate_ud(ctxt);
3641 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3642 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3643 ctxt->dst.bytes = 2;
3644 return X86EMUL_CONTINUE;
3647 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3649 u16 sel = ctxt->src.val;
3651 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3652 return emulate_ud(ctxt);
3654 if (ctxt->modrm_reg == VCPU_SREG_SS)
3655 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3657 /* Disable writeback. */
3658 ctxt->dst.type = OP_NONE;
3659 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3662 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3664 u16 sel = ctxt->src.val;
3666 /* Disable writeback. */
3667 ctxt->dst.type = OP_NONE;
3668 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3671 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3673 u16 sel = ctxt->src.val;
3675 /* Disable writeback. */
3676 ctxt->dst.type = OP_NONE;
3677 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3680 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3685 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3686 if (rc == X86EMUL_CONTINUE)
3687 ctxt->ops->invlpg(ctxt, linear);
3688 /* Disable writeback. */
3689 ctxt->dst.type = OP_NONE;
3690 return X86EMUL_CONTINUE;
3693 static int em_clts(struct x86_emulate_ctxt *ctxt)
3697 cr0 = ctxt->ops->get_cr(ctxt, 0);
3699 ctxt->ops->set_cr(ctxt, 0, cr0);
3700 return X86EMUL_CONTINUE;
3703 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3705 int rc = ctxt->ops->fix_hypercall(ctxt);
3707 if (rc != X86EMUL_CONTINUE)
3710 /* Let the processor re-execute the fixed hypercall */
3711 ctxt->_eip = ctxt->eip;
3712 /* Disable writeback. */
3713 ctxt->dst.type = OP_NONE;
3714 return X86EMUL_CONTINUE;
3717 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3718 void (*get)(struct x86_emulate_ctxt *ctxt,
3719 struct desc_ptr *ptr))
3721 struct desc_ptr desc_ptr;
3723 if (ctxt->mode == X86EMUL_MODE_PROT64)
3725 get(ctxt, &desc_ptr);
3726 if (ctxt->op_bytes == 2) {
3728 desc_ptr.address &= 0x00ffffff;
3730 /* Disable writeback. */
3731 ctxt->dst.type = OP_NONE;
3732 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3733 &desc_ptr, 2 + ctxt->op_bytes);
3736 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3738 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3741 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3743 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3746 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3748 struct desc_ptr desc_ptr;
3751 if (ctxt->mode == X86EMUL_MODE_PROT64)
3753 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3754 &desc_ptr.size, &desc_ptr.address,
3756 if (rc != X86EMUL_CONTINUE)
3758 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3759 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3760 return emulate_gp(ctxt, 0);
3762 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3764 ctxt->ops->set_idt(ctxt, &desc_ptr);
3765 /* Disable writeback. */
3766 ctxt->dst.type = OP_NONE;
3767 return X86EMUL_CONTINUE;
3770 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3772 return em_lgdt_lidt(ctxt, true);
3775 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3777 return em_lgdt_lidt(ctxt, false);
3780 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3782 if (ctxt->dst.type == OP_MEM)
3783 ctxt->dst.bytes = 2;
3784 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3785 return X86EMUL_CONTINUE;
3788 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3790 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3791 | (ctxt->src.val & 0x0f));
3792 ctxt->dst.type = OP_NONE;
3793 return X86EMUL_CONTINUE;
3796 static int em_loop(struct x86_emulate_ctxt *ctxt)
3798 int rc = X86EMUL_CONTINUE;
3800 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3801 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3802 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3803 rc = jmp_rel(ctxt, ctxt->src.val);
3808 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3810 int rc = X86EMUL_CONTINUE;
3812 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3813 rc = jmp_rel(ctxt, ctxt->src.val);
3818 static int em_in(struct x86_emulate_ctxt *ctxt)
3820 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3822 return X86EMUL_IO_NEEDED;
3824 return X86EMUL_CONTINUE;
3827 static int em_out(struct x86_emulate_ctxt *ctxt)
3829 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3831 /* Disable writeback. */
3832 ctxt->dst.type = OP_NONE;
3833 return X86EMUL_CONTINUE;
3836 static int em_cli(struct x86_emulate_ctxt *ctxt)
3838 if (emulator_bad_iopl(ctxt))
3839 return emulate_gp(ctxt, 0);
3841 ctxt->eflags &= ~X86_EFLAGS_IF;
3842 return X86EMUL_CONTINUE;
3845 static int em_sti(struct x86_emulate_ctxt *ctxt)
3847 if (emulator_bad_iopl(ctxt))
3848 return emulate_gp(ctxt, 0);
3850 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3851 ctxt->eflags |= X86_EFLAGS_IF;
3852 return X86EMUL_CONTINUE;
3855 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3857 u32 eax, ebx, ecx, edx;
3860 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3861 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3862 ctxt->ops->cpl(ctxt)) {
3863 return emulate_gp(ctxt, 0);
3866 eax = reg_read(ctxt, VCPU_REGS_RAX);
3867 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3868 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3869 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3870 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3871 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3872 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3873 return X86EMUL_CONTINUE;
3876 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3880 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3882 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3884 ctxt->eflags &= ~0xffUL;
3885 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3886 return X86EMUL_CONTINUE;
3889 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3891 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3892 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3893 return X86EMUL_CONTINUE;
3896 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3898 switch (ctxt->op_bytes) {
3899 #ifdef CONFIG_X86_64
3901 asm("bswap %0" : "+r"(ctxt->dst.val));
3905 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3908 return X86EMUL_CONTINUE;
3911 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3913 /* emulating clflush regardless of cpuid */
3914 return X86EMUL_CONTINUE;
3917 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3919 ctxt->dst.val = (s32) ctxt->src.val;
3920 return X86EMUL_CONTINUE;
3923 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3925 u32 eax = 1, ebx, ecx = 0, edx;
3927 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3928 if (!(edx & FFL(FXSR)))
3929 return emulate_ud(ctxt);
3931 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3932 return emulate_nm(ctxt);
3935 * Don't emulate a case that should never be hit, instead of working
3936 * around a lack of fxsave64/fxrstor64 on old compilers.
3938 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3939 return X86EMUL_UNHANDLEABLE;
3941 return X86EMUL_CONTINUE;
3945 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3946 * and restore MXCSR.
3948 static size_t __fxstate_size(int nregs)
3950 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3953 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3956 if (ctxt->mode == X86EMUL_MODE_PROT64)
3957 return __fxstate_size(16);
3959 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3960 return __fxstate_size(cr4_osfxsr ? 8 : 0);
3964 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3967 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
3968 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3970 * 3) 64-bit mode with REX.W prefix
3971 * - like (2), but XMM 8-15 are being saved and restored
3972 * 4) 64-bit mode without REX.W prefix
3973 * - like (3), but FIP and FDP are 64 bit
3975 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3976 * desired result. (4) is not emulated.
3978 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3979 * and FPU DS) should match.
3981 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3983 struct fxregs_state fx_state;
3986 rc = check_fxsr(ctxt);
3987 if (rc != X86EMUL_CONTINUE)
3990 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3992 if (rc != X86EMUL_CONTINUE)
3995 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
3996 fxstate_size(ctxt));
4000 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4001 * in the host registers (via FXSAVE) instead, so they won't be modified.
4002 * (preemption has to stay disabled until FXRSTOR).
4004 * Use noinline to keep the stack for other functions called by callers small.
4006 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4007 const size_t used_size)
4009 struct fxregs_state fx_tmp;
4012 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4013 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4014 __fxstate_size(16) - used_size);
4019 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4021 struct fxregs_state fx_state;
4025 rc = check_fxsr(ctxt);
4026 if (rc != X86EMUL_CONTINUE)
4029 size = fxstate_size(ctxt);
4030 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4031 if (rc != X86EMUL_CONTINUE)
4034 if (size < __fxstate_size(16)) {
4035 rc = fxregs_fixup(&fx_state, size);
4036 if (rc != X86EMUL_CONTINUE)
4040 if (fx_state.mxcsr >> 16) {
4041 rc = emulate_gp(ctxt, 0);
4045 if (rc == X86EMUL_CONTINUE)
4046 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4052 static bool valid_cr(int nr)
4064 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4066 if (!valid_cr(ctxt->modrm_reg))
4067 return emulate_ud(ctxt);
4069 return X86EMUL_CONTINUE;
4072 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4074 u64 new_val = ctxt->src.val64;
4075 int cr = ctxt->modrm_reg;
4078 static u64 cr_reserved_bits[] = {
4079 0xffffffff00000000ULL,
4080 0, 0, 0, /* CR3 checked later */
4087 return emulate_ud(ctxt);
4089 if (new_val & cr_reserved_bits[cr])
4090 return emulate_gp(ctxt, 0);
4095 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4096 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4097 return emulate_gp(ctxt, 0);
4099 cr4 = ctxt->ops->get_cr(ctxt, 4);
4100 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4102 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4103 !(cr4 & X86_CR4_PAE))
4104 return emulate_gp(ctxt, 0);
4111 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4112 if (efer & EFER_LMA) {
4114 u32 eax, ebx, ecx, edx;
4118 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4120 maxphyaddr = eax & 0xff;
4123 rsvd = rsvd_bits(maxphyaddr, 62);
4127 return emulate_gp(ctxt, 0);
4132 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4134 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4135 return emulate_gp(ctxt, 0);
4141 return X86EMUL_CONTINUE;
4144 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4148 ctxt->ops->get_dr(ctxt, 7, &dr7);
4150 /* Check if DR7.Global_Enable is set */
4151 return dr7 & (1 << 13);
4154 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4156 int dr = ctxt->modrm_reg;
4160 return emulate_ud(ctxt);
4162 cr4 = ctxt->ops->get_cr(ctxt, 4);
4163 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4164 return emulate_ud(ctxt);
4166 if (check_dr7_gd(ctxt)) {
4169 ctxt->ops->get_dr(ctxt, 6, &dr6);
4171 dr6 |= DR6_BD | DR6_RTM;
4172 ctxt->ops->set_dr(ctxt, 6, dr6);
4173 return emulate_db(ctxt);
4176 return X86EMUL_CONTINUE;
4179 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4181 u64 new_val = ctxt->src.val64;
4182 int dr = ctxt->modrm_reg;
4184 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4185 return emulate_gp(ctxt, 0);
4187 return check_dr_read(ctxt);
4190 static int check_svme(struct x86_emulate_ctxt *ctxt)
4194 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4196 if (!(efer & EFER_SVME))
4197 return emulate_ud(ctxt);
4199 return X86EMUL_CONTINUE;
4202 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4204 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4206 /* Valid physical address? */
4207 if (rax & 0xffff000000000000ULL)
4208 return emulate_gp(ctxt, 0);
4210 return check_svme(ctxt);
4213 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4215 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4217 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4218 return emulate_ud(ctxt);
4220 return X86EMUL_CONTINUE;
4223 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4225 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4226 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4228 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4229 ctxt->ops->check_pmc(ctxt, rcx))
4230 return emulate_gp(ctxt, 0);
4232 return X86EMUL_CONTINUE;
4235 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4237 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4238 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4239 return emulate_gp(ctxt, 0);
4241 return X86EMUL_CONTINUE;
4244 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4246 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4247 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4248 return emulate_gp(ctxt, 0);
4250 return X86EMUL_CONTINUE;
4253 #define D(_y) { .flags = (_y) }
4254 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4255 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4256 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4257 #define N D(NotImpl)
4258 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4259 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4260 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4261 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4262 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4263 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4264 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4265 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4266 #define II(_f, _e, _i) \
4267 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4268 #define IIP(_f, _e, _i, _p) \
4269 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4270 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4271 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4273 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4274 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4275 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4276 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4277 #define I2bvIP(_f, _e, _i, _p) \
4278 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4280 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4281 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4282 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4284 static const struct opcode group7_rm0[] = {
4286 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4290 static const struct opcode group7_rm1[] = {
4291 DI(SrcNone | Priv, monitor),
4292 DI(SrcNone | Priv, mwait),
4296 static const struct opcode group7_rm3[] = {
4297 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4298 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4299 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4300 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4301 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4302 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4303 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4304 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4307 static const struct opcode group7_rm7[] = {
4309 DIP(SrcNone, rdtscp, check_rdtsc),
4313 static const struct opcode group1[] = {
4315 F(Lock | PageTable, em_or),
4318 F(Lock | PageTable, em_and),
4324 static const struct opcode group1A[] = {
4325 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4328 static const struct opcode group2[] = {
4329 F(DstMem | ModRM, em_rol),
4330 F(DstMem | ModRM, em_ror),
4331 F(DstMem | ModRM, em_rcl),
4332 F(DstMem | ModRM, em_rcr),
4333 F(DstMem | ModRM, em_shl),
4334 F(DstMem | ModRM, em_shr),
4335 F(DstMem | ModRM, em_shl),
4336 F(DstMem | ModRM, em_sar),
4339 static const struct opcode group3[] = {
4340 F(DstMem | SrcImm | NoWrite, em_test),
4341 F(DstMem | SrcImm | NoWrite, em_test),
4342 F(DstMem | SrcNone | Lock, em_not),
4343 F(DstMem | SrcNone | Lock, em_neg),
4344 F(DstXacc | Src2Mem, em_mul_ex),
4345 F(DstXacc | Src2Mem, em_imul_ex),
4346 F(DstXacc | Src2Mem, em_div_ex),
4347 F(DstXacc | Src2Mem, em_idiv_ex),
4350 static const struct opcode group4[] = {
4351 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4352 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4356 static const struct opcode group5[] = {
4357 F(DstMem | SrcNone | Lock, em_inc),
4358 F(DstMem | SrcNone | Lock, em_dec),
4359 I(SrcMem | NearBranch, em_call_near_abs),
4360 I(SrcMemFAddr | ImplicitOps, em_call_far),
4361 I(SrcMem | NearBranch, em_jmp_abs),
4362 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4363 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4366 static const struct opcode group6[] = {
4367 DI(Prot | DstMem, sldt),
4368 DI(Prot | DstMem, str),
4369 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4370 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4374 static const struct group_dual group7 = { {
4375 II(Mov | DstMem, em_sgdt, sgdt),
4376 II(Mov | DstMem, em_sidt, sidt),
4377 II(SrcMem | Priv, em_lgdt, lgdt),
4378 II(SrcMem | Priv, em_lidt, lidt),
4379 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4380 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4381 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4385 N, EXT(0, group7_rm3),
4386 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4387 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4391 static const struct opcode group8[] = {
4393 F(DstMem | SrcImmByte | NoWrite, em_bt),
4394 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4395 F(DstMem | SrcImmByte | Lock, em_btr),
4396 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4399 static const struct group_dual group9 = { {
4400 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4402 N, N, N, N, N, N, N, N,
4405 static const struct opcode group11[] = {
4406 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4410 static const struct gprefix pfx_0f_ae_7 = {
4411 I(SrcMem | ByteOp, em_clflush), N, N, N,
4414 static const struct group_dual group15 = { {
4415 I(ModRM | Aligned16, em_fxsave),
4416 I(ModRM | Aligned16, em_fxrstor),
4417 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4419 N, N, N, N, N, N, N, N,
4422 static const struct gprefix pfx_0f_6f_0f_7f = {
4423 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4426 static const struct instr_dual instr_dual_0f_2b = {
4430 static const struct gprefix pfx_0f_2b = {
4431 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4434 static const struct gprefix pfx_0f_28_0f_29 = {
4435 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4438 static const struct gprefix pfx_0f_e7 = {
4439 N, I(Sse, em_mov), N, N,
4442 static const struct escape escape_d9 = { {
4443 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4446 N, N, N, N, N, N, N, N,
4448 N, N, N, N, N, N, N, N,
4450 N, N, N, N, N, N, N, N,
4452 N, N, N, N, N, N, N, N,
4454 N, N, N, N, N, N, N, N,
4456 N, N, N, N, N, N, N, N,
4458 N, N, N, N, N, N, N, N,
4460 N, N, N, N, N, N, N, N,
4463 static const struct escape escape_db = { {
4464 N, N, N, N, N, N, N, N,
4467 N, N, N, N, N, N, N, N,
4469 N, N, N, N, N, N, N, N,
4471 N, N, N, N, N, N, N, N,
4473 N, N, N, N, N, N, N, N,
4475 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4477 N, N, N, N, N, N, N, N,
4479 N, N, N, N, N, N, N, N,
4481 N, N, N, N, N, N, N, N,
4484 static const struct escape escape_dd = { {
4485 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4488 N, N, N, N, N, N, N, N,
4490 N, N, N, N, N, N, N, N,
4492 N, N, N, N, N, N, N, N,
4494 N, N, N, N, N, N, N, N,
4496 N, N, N, N, N, N, N, N,
4498 N, N, N, N, N, N, N, N,
4500 N, N, N, N, N, N, N, N,
4502 N, N, N, N, N, N, N, N,
4505 static const struct instr_dual instr_dual_0f_c3 = {
4506 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4509 static const struct mode_dual mode_dual_63 = {
4510 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4513 static const struct opcode opcode_table[256] = {
4515 F6ALU(Lock, em_add),
4516 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4517 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4519 F6ALU(Lock | PageTable, em_or),
4520 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4523 F6ALU(Lock, em_adc),
4524 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4525 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4527 F6ALU(Lock, em_sbb),
4528 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4529 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4531 F6ALU(Lock | PageTable, em_and), N, N,
4533 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4535 F6ALU(Lock, em_xor), N, N,
4537 F6ALU(NoWrite, em_cmp), N, N,
4539 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4541 X8(I(SrcReg | Stack, em_push)),
4543 X8(I(DstReg | Stack, em_pop)),
4545 I(ImplicitOps | Stack | No64, em_pusha),
4546 I(ImplicitOps | Stack | No64, em_popa),
4547 N, MD(ModRM, &mode_dual_63),
4550 I(SrcImm | Mov | Stack, em_push),
4551 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4552 I(SrcImmByte | Mov | Stack, em_push),
4553 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4554 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4555 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4557 X16(D(SrcImmByte | NearBranch)),
4559 G(ByteOp | DstMem | SrcImm, group1),
4560 G(DstMem | SrcImm, group1),
4561 G(ByteOp | DstMem | SrcImm | No64, group1),
4562 G(DstMem | SrcImmByte, group1),
4563 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4564 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4566 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4567 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4568 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4569 D(ModRM | SrcMem | NoAccess | DstReg),
4570 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4573 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4575 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4576 I(SrcImmFAddr | No64, em_call_far), N,
4577 II(ImplicitOps | Stack, em_pushf, pushf),
4578 II(ImplicitOps | Stack, em_popf, popf),
4579 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4581 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4582 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4583 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4584 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4586 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4587 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4588 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4589 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4591 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4593 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4595 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4596 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4597 I(ImplicitOps | NearBranch, em_ret),
4598 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4599 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4600 G(ByteOp, group11), G(0, group11),
4602 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4603 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4604 I(ImplicitOps, em_ret_far),
4605 D(ImplicitOps), DI(SrcImmByte, intn),
4606 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4608 G(Src2One | ByteOp, group2), G(Src2One, group2),
4609 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4610 I(DstAcc | SrcImmUByte | No64, em_aam),
4611 I(DstAcc | SrcImmUByte | No64, em_aad),
4612 F(DstAcc | ByteOp | No64, em_salc),
4613 I(DstAcc | SrcXLat | ByteOp, em_mov),
4615 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4617 X3(I(SrcImmByte | NearBranch, em_loop)),
4618 I(SrcImmByte | NearBranch, em_jcxz),
4619 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4620 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4622 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4623 I(SrcImmFAddr | No64, em_jmp_far),
4624 D(SrcImmByte | ImplicitOps | NearBranch),
4625 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4626 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4628 N, DI(ImplicitOps, icebp), N, N,
4629 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4630 G(ByteOp, group3), G(0, group3),
4632 D(ImplicitOps), D(ImplicitOps),
4633 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4634 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4637 static const struct opcode twobyte_table[256] = {
4639 G(0, group6), GD(0, &group7), N, N,
4640 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4641 II(ImplicitOps | Priv, em_clts, clts), N,
4642 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4643 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4645 N, N, N, N, N, N, N, N,
4646 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4647 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4649 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4650 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4651 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4653 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4656 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4657 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4658 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4661 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4662 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4663 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4664 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4665 I(ImplicitOps | EmulateOnUD, em_sysenter),
4666 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4668 N, N, N, N, N, N, N, N,
4670 X16(D(DstReg | SrcMem | ModRM)),
4672 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4677 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4682 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4684 X16(D(SrcImm | NearBranch)),
4686 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4688 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4689 II(ImplicitOps, em_cpuid, cpuid),
4690 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4691 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4692 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4694 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4695 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4696 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4697 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4698 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4699 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4701 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4702 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4703 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4704 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4705 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4706 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4710 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4711 I(DstReg | SrcMem | ModRM, em_bsf_c),
4712 I(DstReg | SrcMem | ModRM, em_bsr_c),
4713 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4715 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4716 N, ID(0, &instr_dual_0f_c3),
4717 N, N, N, GD(0, &group9),
4719 X8(I(DstReg, em_bswap)),
4721 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4723 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4724 N, N, N, N, N, N, N, N,
4726 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4729 static const struct instr_dual instr_dual_0f_38_f0 = {
4730 I(DstReg | SrcMem | Mov, em_movbe), N
4733 static const struct instr_dual instr_dual_0f_38_f1 = {
4734 I(DstMem | SrcReg | Mov, em_movbe), N
4737 static const struct gprefix three_byte_0f_38_f0 = {
4738 ID(0, &instr_dual_0f_38_f0), N, N, N
4741 static const struct gprefix three_byte_0f_38_f1 = {
4742 ID(0, &instr_dual_0f_38_f1), N, N, N
4746 * Insns below are selected by the prefix which indexed by the third opcode
4749 static const struct opcode opcode_map_0f_38[256] = {
4751 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4753 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4755 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4756 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4777 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4781 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4787 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4788 unsigned size, bool sign_extension)
4790 int rc = X86EMUL_CONTINUE;
4794 op->addr.mem.ea = ctxt->_eip;
4795 /* NB. Immediates are sign-extended as necessary. */
4796 switch (op->bytes) {
4798 op->val = insn_fetch(s8, ctxt);
4801 op->val = insn_fetch(s16, ctxt);
4804 op->val = insn_fetch(s32, ctxt);
4807 op->val = insn_fetch(s64, ctxt);
4810 if (!sign_extension) {
4811 switch (op->bytes) {
4819 op->val &= 0xffffffff;
4827 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4830 int rc = X86EMUL_CONTINUE;
4834 decode_register_operand(ctxt, op);
4837 rc = decode_imm(ctxt, op, 1, false);
4840 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4844 if (ctxt->d & BitOp)
4845 fetch_bit_operand(ctxt);
4846 op->orig_val = op->val;
4849 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4853 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4854 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4855 fetch_register_operand(op);
4856 op->orig_val = op->val;
4860 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4861 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4862 fetch_register_operand(op);
4863 op->orig_val = op->val;
4866 if (ctxt->d & ByteOp) {
4871 op->bytes = ctxt->op_bytes;
4872 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4873 fetch_register_operand(op);
4874 op->orig_val = op->val;
4878 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4880 register_address(ctxt, VCPU_REGS_RDI);
4881 op->addr.mem.seg = VCPU_SREG_ES;
4888 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4889 fetch_register_operand(op);
4894 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4897 rc = decode_imm(ctxt, op, 1, true);
4905 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4908 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4911 ctxt->memop.bytes = 1;
4912 if (ctxt->memop.type == OP_REG) {
4913 ctxt->memop.addr.reg = decode_register(ctxt,
4914 ctxt->modrm_rm, true);
4915 fetch_register_operand(&ctxt->memop);
4919 ctxt->memop.bytes = 2;
4922 ctxt->memop.bytes = 4;
4925 rc = decode_imm(ctxt, op, 2, false);
4928 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4932 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4934 register_address(ctxt, VCPU_REGS_RSI);
4935 op->addr.mem.seg = ctxt->seg_override;
4941 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4944 reg_read(ctxt, VCPU_REGS_RBX) +
4945 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4946 op->addr.mem.seg = ctxt->seg_override;
4951 op->addr.mem.ea = ctxt->_eip;
4952 op->bytes = ctxt->op_bytes + 2;
4953 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4956 ctxt->memop.bytes = ctxt->op_bytes + 2;
4960 op->val = VCPU_SREG_ES;
4964 op->val = VCPU_SREG_CS;
4968 op->val = VCPU_SREG_SS;
4972 op->val = VCPU_SREG_DS;
4976 op->val = VCPU_SREG_FS;
4980 op->val = VCPU_SREG_GS;
4983 /* Special instructions do their own operand decoding. */
4985 op->type = OP_NONE; /* Disable writeback. */
4993 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4995 int rc = X86EMUL_CONTINUE;
4996 int mode = ctxt->mode;
4997 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4998 bool op_prefix = false;
4999 bool has_seg_override = false;
5000 struct opcode opcode;
5002 struct desc_struct desc;
5004 ctxt->memop.type = OP_NONE;
5005 ctxt->memopp = NULL;
5006 ctxt->_eip = ctxt->eip;
5007 ctxt->fetch.ptr = ctxt->fetch.data;
5008 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5009 ctxt->opcode_len = 1;
5011 memcpy(ctxt->fetch.data, insn, insn_len);
5013 rc = __do_insn_fetch_bytes(ctxt, 1);
5014 if (rc != X86EMUL_CONTINUE)
5019 case X86EMUL_MODE_REAL:
5020 case X86EMUL_MODE_VM86:
5021 def_op_bytes = def_ad_bytes = 2;
5022 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5024 def_op_bytes = def_ad_bytes = 4;
5026 case X86EMUL_MODE_PROT16:
5027 def_op_bytes = def_ad_bytes = 2;
5029 case X86EMUL_MODE_PROT32:
5030 def_op_bytes = def_ad_bytes = 4;
5032 #ifdef CONFIG_X86_64
5033 case X86EMUL_MODE_PROT64:
5039 return EMULATION_FAILED;
5042 ctxt->op_bytes = def_op_bytes;
5043 ctxt->ad_bytes = def_ad_bytes;
5045 /* Legacy prefixes. */
5047 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5048 case 0x66: /* operand-size override */
5050 /* switch between 2/4 bytes */
5051 ctxt->op_bytes = def_op_bytes ^ 6;
5053 case 0x67: /* address-size override */
5054 if (mode == X86EMUL_MODE_PROT64)
5055 /* switch between 4/8 bytes */
5056 ctxt->ad_bytes = def_ad_bytes ^ 12;
5058 /* switch between 2/4 bytes */
5059 ctxt->ad_bytes = def_ad_bytes ^ 6;
5061 case 0x26: /* ES override */
5062 case 0x2e: /* CS override */
5063 case 0x36: /* SS override */
5064 case 0x3e: /* DS override */
5065 has_seg_override = true;
5066 ctxt->seg_override = (ctxt->b >> 3) & 3;
5068 case 0x64: /* FS override */
5069 case 0x65: /* GS override */
5070 has_seg_override = true;
5071 ctxt->seg_override = ctxt->b & 7;
5073 case 0x40 ... 0x4f: /* REX */
5074 if (mode != X86EMUL_MODE_PROT64)
5076 ctxt->rex_prefix = ctxt->b;
5078 case 0xf0: /* LOCK */
5079 ctxt->lock_prefix = 1;
5081 case 0xf2: /* REPNE/REPNZ */
5082 case 0xf3: /* REP/REPE/REPZ */
5083 ctxt->rep_prefix = ctxt->b;
5089 /* Any legacy prefix after a REX prefix nullifies its effect. */
5091 ctxt->rex_prefix = 0;
5097 if (ctxt->rex_prefix & 8)
5098 ctxt->op_bytes = 8; /* REX.W */
5100 /* Opcode byte(s). */
5101 opcode = opcode_table[ctxt->b];
5102 /* Two-byte opcode? */
5103 if (ctxt->b == 0x0f) {
5104 ctxt->opcode_len = 2;
5105 ctxt->b = insn_fetch(u8, ctxt);
5106 opcode = twobyte_table[ctxt->b];
5108 /* 0F_38 opcode map */
5109 if (ctxt->b == 0x38) {
5110 ctxt->opcode_len = 3;
5111 ctxt->b = insn_fetch(u8, ctxt);
5112 opcode = opcode_map_0f_38[ctxt->b];
5115 ctxt->d = opcode.flags;
5117 if (ctxt->d & ModRM)
5118 ctxt->modrm = insn_fetch(u8, ctxt);
5120 /* vex-prefix instructions are not implemented */
5121 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5122 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5126 while (ctxt->d & GroupMask) {
5127 switch (ctxt->d & GroupMask) {
5129 goffset = (ctxt->modrm >> 3) & 7;
5130 opcode = opcode.u.group[goffset];
5133 goffset = (ctxt->modrm >> 3) & 7;
5134 if ((ctxt->modrm >> 6) == 3)
5135 opcode = opcode.u.gdual->mod3[goffset];
5137 opcode = opcode.u.gdual->mod012[goffset];
5140 goffset = ctxt->modrm & 7;
5141 opcode = opcode.u.group[goffset];
5144 if (ctxt->rep_prefix && op_prefix)
5145 return EMULATION_FAILED;
5146 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5147 switch (simd_prefix) {
5148 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5149 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5150 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5151 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5155 if (ctxt->modrm > 0xbf)
5156 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5158 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5161 if ((ctxt->modrm >> 6) == 3)
5162 opcode = opcode.u.idual->mod3;
5164 opcode = opcode.u.idual->mod012;
5167 if (ctxt->mode == X86EMUL_MODE_PROT64)
5168 opcode = opcode.u.mdual->mode64;
5170 opcode = opcode.u.mdual->mode32;
5173 return EMULATION_FAILED;
5176 ctxt->d &= ~(u64)GroupMask;
5177 ctxt->d |= opcode.flags;
5182 return EMULATION_FAILED;
5184 ctxt->execute = opcode.u.execute;
5186 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5187 return EMULATION_FAILED;
5189 if (unlikely(ctxt->d &
5190 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5193 * These are copied unconditionally here, and checked unconditionally
5194 * in x86_emulate_insn.
5196 ctxt->check_perm = opcode.check_perm;
5197 ctxt->intercept = opcode.intercept;
5199 if (ctxt->d & NotImpl)
5200 return EMULATION_FAILED;
5202 if (mode == X86EMUL_MODE_PROT64) {
5203 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5205 else if (ctxt->d & NearBranch)
5209 if (ctxt->d & Op3264) {
5210 if (mode == X86EMUL_MODE_PROT64)
5216 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5220 ctxt->op_bytes = 16;
5221 else if (ctxt->d & Mmx)
5225 /* ModRM and SIB bytes. */
5226 if (ctxt->d & ModRM) {
5227 rc = decode_modrm(ctxt, &ctxt->memop);
5228 if (!has_seg_override) {
5229 has_seg_override = true;
5230 ctxt->seg_override = ctxt->modrm_seg;
5232 } else if (ctxt->d & MemAbs)
5233 rc = decode_abs(ctxt, &ctxt->memop);
5234 if (rc != X86EMUL_CONTINUE)
5237 if (!has_seg_override)
5238 ctxt->seg_override = VCPU_SREG_DS;
5240 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5243 * Decode and fetch the source operand: register, memory
5246 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5247 if (rc != X86EMUL_CONTINUE)
5251 * Decode and fetch the second source operand: register, memory
5254 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5255 if (rc != X86EMUL_CONTINUE)
5258 /* Decode and fetch the destination operand: register or memory. */
5259 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5261 if (ctxt->rip_relative && likely(ctxt->memopp))
5262 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5263 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5266 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5269 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5271 return ctxt->d & PageTable;
5274 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5276 /* The second termination condition only applies for REPE
5277 * and REPNE. Test if the repeat string operation prefix is
5278 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5279 * corresponding termination condition according to:
5280 * - if REPE/REPZ and ZF = 0 then done
5281 * - if REPNE/REPNZ and ZF = 1 then done
5283 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5284 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5285 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5286 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5287 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5288 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5294 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5298 rc = asm_safe("fwait");
5300 if (unlikely(rc != X86EMUL_CONTINUE))
5301 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5303 return X86EMUL_CONTINUE;
5306 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5309 if (op->type == OP_MM)
5310 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5313 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5315 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5317 if (!(ctxt->d & ByteOp))
5318 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5320 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5321 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5322 [fastop]"+S"(fop), ASM_CALL_CONSTRAINT
5323 : "c"(ctxt->src2.val));
5325 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5326 if (!fop) /* exception is returned in fop variable */
5327 return emulate_de(ctxt);
5328 return X86EMUL_CONTINUE;
5331 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5333 memset(&ctxt->rip_relative, 0,
5334 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5336 ctxt->io_read.pos = 0;
5337 ctxt->io_read.end = 0;
5338 ctxt->mem_read.end = 0;
5341 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5343 const struct x86_emulate_ops *ops = ctxt->ops;
5344 int rc = X86EMUL_CONTINUE;
5345 int saved_dst_type = ctxt->dst.type;
5346 unsigned emul_flags;
5348 ctxt->mem_read.pos = 0;
5350 /* LOCK prefix is allowed only with some instructions */
5351 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5352 rc = emulate_ud(ctxt);
5356 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5357 rc = emulate_ud(ctxt);
5361 emul_flags = ctxt->ops->get_hflags(ctxt);
5362 if (unlikely(ctxt->d &
5363 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5364 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5365 (ctxt->d & Undefined)) {
5366 rc = emulate_ud(ctxt);
5370 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5371 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5372 rc = emulate_ud(ctxt);
5376 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5377 rc = emulate_nm(ctxt);
5381 if (ctxt->d & Mmx) {
5382 rc = flush_pending_x87_faults(ctxt);
5383 if (rc != X86EMUL_CONTINUE)
5386 * Now that we know the fpu is exception safe, we can fetch
5389 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5390 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5391 if (!(ctxt->d & Mov))
5392 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5395 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5396 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5397 X86_ICPT_PRE_EXCEPT);
5398 if (rc != X86EMUL_CONTINUE)
5402 /* Instruction can only be executed in protected mode */
5403 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5404 rc = emulate_ud(ctxt);
5408 /* Privileged instruction can be executed only in CPL=0 */
5409 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5410 if (ctxt->d & PrivUD)
5411 rc = emulate_ud(ctxt);
5413 rc = emulate_gp(ctxt, 0);
5417 /* Do instruction specific permission checks */
5418 if (ctxt->d & CheckPerm) {
5419 rc = ctxt->check_perm(ctxt);
5420 if (rc != X86EMUL_CONTINUE)
5424 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5425 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5426 X86_ICPT_POST_EXCEPT);
5427 if (rc != X86EMUL_CONTINUE)
5431 if (ctxt->rep_prefix && (ctxt->d & String)) {
5432 /* All REP prefixes have the same first termination condition */
5433 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5434 string_registers_quirk(ctxt);
5435 ctxt->eip = ctxt->_eip;
5436 ctxt->eflags &= ~X86_EFLAGS_RF;
5442 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5443 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5444 ctxt->src.valptr, ctxt->src.bytes);
5445 if (rc != X86EMUL_CONTINUE)
5447 ctxt->src.orig_val64 = ctxt->src.val64;
5450 if (ctxt->src2.type == OP_MEM) {
5451 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5452 &ctxt->src2.val, ctxt->src2.bytes);
5453 if (rc != X86EMUL_CONTINUE)
5457 if ((ctxt->d & DstMask) == ImplicitOps)
5461 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5462 /* optimisation - avoid slow emulated read if Mov */
5463 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5464 &ctxt->dst.val, ctxt->dst.bytes);
5465 if (rc != X86EMUL_CONTINUE) {
5466 if (!(ctxt->d & NoWrite) &&
5467 rc == X86EMUL_PROPAGATE_FAULT &&
5468 ctxt->exception.vector == PF_VECTOR)
5469 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5473 /* Copy full 64-bit value for CMPXCHG8B. */
5474 ctxt->dst.orig_val64 = ctxt->dst.val64;
5478 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5479 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5480 X86_ICPT_POST_MEMACCESS);
5481 if (rc != X86EMUL_CONTINUE)
5485 if (ctxt->rep_prefix && (ctxt->d & String))
5486 ctxt->eflags |= X86_EFLAGS_RF;
5488 ctxt->eflags &= ~X86_EFLAGS_RF;
5490 if (ctxt->execute) {
5491 if (ctxt->d & Fastop) {
5492 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5493 rc = fastop(ctxt, fop);
5494 if (rc != X86EMUL_CONTINUE)
5498 rc = ctxt->execute(ctxt);
5499 if (rc != X86EMUL_CONTINUE)
5504 if (ctxt->opcode_len == 2)
5506 else if (ctxt->opcode_len == 3)
5507 goto threebyte_insn;
5510 case 0x70 ... 0x7f: /* jcc (short) */
5511 if (test_cc(ctxt->b, ctxt->eflags))
5512 rc = jmp_rel(ctxt, ctxt->src.val);
5514 case 0x8d: /* lea r16/r32, m */
5515 ctxt->dst.val = ctxt->src.addr.mem.ea;
5517 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5518 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5519 ctxt->dst.type = OP_NONE;
5523 case 0x98: /* cbw/cwde/cdqe */
5524 switch (ctxt->op_bytes) {
5525 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5526 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5527 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5530 case 0xcc: /* int3 */
5531 rc = emulate_int(ctxt, 3);
5533 case 0xcd: /* int n */
5534 rc = emulate_int(ctxt, ctxt->src.val);
5536 case 0xce: /* into */
5537 if (ctxt->eflags & X86_EFLAGS_OF)
5538 rc = emulate_int(ctxt, 4);
5540 case 0xe9: /* jmp rel */
5541 case 0xeb: /* jmp rel short */
5542 rc = jmp_rel(ctxt, ctxt->src.val);
5543 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5545 case 0xf4: /* hlt */
5546 ctxt->ops->halt(ctxt);
5548 case 0xf5: /* cmc */
5549 /* complement carry flag from eflags reg */
5550 ctxt->eflags ^= X86_EFLAGS_CF;
5552 case 0xf8: /* clc */
5553 ctxt->eflags &= ~X86_EFLAGS_CF;
5555 case 0xf9: /* stc */
5556 ctxt->eflags |= X86_EFLAGS_CF;
5558 case 0xfc: /* cld */
5559 ctxt->eflags &= ~X86_EFLAGS_DF;
5561 case 0xfd: /* std */
5562 ctxt->eflags |= X86_EFLAGS_DF;
5565 goto cannot_emulate;
5568 if (rc != X86EMUL_CONTINUE)
5572 if (ctxt->d & SrcWrite) {
5573 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5574 rc = writeback(ctxt, &ctxt->src);
5575 if (rc != X86EMUL_CONTINUE)
5578 if (!(ctxt->d & NoWrite)) {
5579 rc = writeback(ctxt, &ctxt->dst);
5580 if (rc != X86EMUL_CONTINUE)
5585 * restore dst type in case the decoding will be reused
5586 * (happens for string instruction )
5588 ctxt->dst.type = saved_dst_type;
5590 if ((ctxt->d & SrcMask) == SrcSI)
5591 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5593 if ((ctxt->d & DstMask) == DstDI)
5594 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5596 if (ctxt->rep_prefix && (ctxt->d & String)) {
5598 struct read_cache *r = &ctxt->io_read;
5599 if ((ctxt->d & SrcMask) == SrcSI)
5600 count = ctxt->src.count;
5602 count = ctxt->dst.count;
5603 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5605 if (!string_insn_completed(ctxt)) {
5607 * Re-enter guest when pio read ahead buffer is empty
5608 * or, if it is not used, after each 1024 iteration.
5610 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5611 (r->end == 0 || r->end != r->pos)) {
5613 * Reset read cache. Usually happens before
5614 * decode, but since instruction is restarted
5615 * we have to do it here.
5617 ctxt->mem_read.end = 0;
5618 writeback_registers(ctxt);
5619 return EMULATION_RESTART;
5621 goto done; /* skip rip writeback */
5623 ctxt->eflags &= ~X86_EFLAGS_RF;
5626 ctxt->eip = ctxt->_eip;
5629 if (rc == X86EMUL_PROPAGATE_FAULT) {
5630 WARN_ON(ctxt->exception.vector > 0x1f);
5631 ctxt->have_exception = true;
5633 if (rc == X86EMUL_INTERCEPTED)
5634 return EMULATION_INTERCEPTED;
5636 if (rc == X86EMUL_CONTINUE)
5637 writeback_registers(ctxt);
5639 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5643 case 0x09: /* wbinvd */
5644 (ctxt->ops->wbinvd)(ctxt);
5646 case 0x08: /* invd */
5647 case 0x0d: /* GrpP (prefetch) */
5648 case 0x18: /* Grp16 (prefetch/nop) */
5649 case 0x1f: /* nop */
5651 case 0x20: /* mov cr, reg */
5652 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5654 case 0x21: /* mov from dr to reg */
5655 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5657 case 0x40 ... 0x4f: /* cmov */
5658 if (test_cc(ctxt->b, ctxt->eflags))
5659 ctxt->dst.val = ctxt->src.val;
5660 else if (ctxt->op_bytes != 4)
5661 ctxt->dst.type = OP_NONE; /* no writeback */
5663 case 0x80 ... 0x8f: /* jnz rel, etc*/
5664 if (test_cc(ctxt->b, ctxt->eflags))
5665 rc = jmp_rel(ctxt, ctxt->src.val);
5667 case 0x90 ... 0x9f: /* setcc r/m8 */
5668 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5670 case 0xb6 ... 0xb7: /* movzx */
5671 ctxt->dst.bytes = ctxt->op_bytes;
5672 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5673 : (u16) ctxt->src.val;
5675 case 0xbe ... 0xbf: /* movsx */
5676 ctxt->dst.bytes = ctxt->op_bytes;
5677 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5678 (s16) ctxt->src.val;
5681 goto cannot_emulate;
5686 if (rc != X86EMUL_CONTINUE)
5692 return EMULATION_FAILED;
5695 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5697 invalidate_registers(ctxt);
5700 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5702 writeback_registers(ctxt);
5705 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5707 if (ctxt->rep_prefix && (ctxt->d & String))
5710 if (ctxt->d & TwoMemOp)