Merge tag 'pci-v5.17-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[linux-2.6-microblaze.git] / tools / testing / selftests / kvm / x86_64 / amx_test.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * amx tests
4  *
5  * Copyright (C) 2021, Intel, Inc.
6  *
7  * Tests for amx #NM exception and save/restore.
8  */
9
10 #define _GNU_SOURCE /* for program_invocation_short_name */
11 #include <fcntl.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/ioctl.h>
16 #include <sys/syscall.h>
17
18 #include "test_util.h"
19
20 #include "kvm_util.h"
21 #include "processor.h"
22 #include "vmx.h"
23
24 #ifndef __x86_64__
25 # error This test is 64-bit only
26 #endif
27
28 #define VCPU_ID                         0
29 #define X86_FEATURE_XSAVE               (1 << 26)
30 #define X86_FEATURE_OSXSAVE             (1 << 27)
31
32 #define PAGE_SIZE                       (1 << 12)
33 #define NUM_TILES                       8
34 #define TILE_SIZE                       1024
35 #define XSAVE_SIZE                      ((NUM_TILES * TILE_SIZE) + PAGE_SIZE)
36
37 /* Tile configuration associated: */
38 #define MAX_TILES                       16
39 #define RESERVED_BYTES                  14
40
41 #define XFEATURE_XTILECFG               17
42 #define XFEATURE_XTILEDATA              18
43 #define XFEATURE_MASK_XTILECFG          (1 << XFEATURE_XTILECFG)
44 #define XFEATURE_MASK_XTILEDATA         (1 << XFEATURE_XTILEDATA)
45 #define XFEATURE_MASK_XTILE             (XFEATURE_MASK_XTILECFG | XFEATURE_MASK_XTILEDATA)
46
47 #define TILE_CPUID                      0x1d
48 #define XSTATE_CPUID                    0xd
49 #define TILE_PALETTE_CPUID_SUBLEAVE     0x1
50 #define XSTATE_USER_STATE_SUBLEAVE      0x0
51
52 #define XSAVE_HDR_OFFSET                512
53
54 struct xsave_data {
55         u8 area[XSAVE_SIZE];
56 } __aligned(64);
57
58 struct tile_config {
59         u8  palette_id;
60         u8  start_row;
61         u8  reserved[RESERVED_BYTES];
62         u16 colsb[MAX_TILES];
63         u8  rows[MAX_TILES];
64 };
65
66 struct tile_data {
67         u8 data[NUM_TILES * TILE_SIZE];
68 };
69
70 struct xtile_info {
71         u16 bytes_per_tile;
72         u16 bytes_per_row;
73         u16 max_names;
74         u16 max_rows;
75         u32 xsave_offset;
76         u32 xsave_size;
77 };
78
79 static struct xtile_info xtile;
80
81 static inline u64 __xgetbv(u32 index)
82 {
83         u32 eax, edx;
84
85         asm volatile("xgetbv;"
86                      : "=a" (eax), "=d" (edx)
87                      : "c" (index));
88         return eax + ((u64)edx << 32);
89 }
90
91 static inline void __xsetbv(u32 index, u64 value)
92 {
93         u32 eax = value;
94         u32 edx = value >> 32;
95
96         asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
97 }
98
99 static inline void __ldtilecfg(void *cfg)
100 {
101         asm volatile(".byte 0xc4,0xe2,0x78,0x49,0x00"
102                      : : "a"(cfg));
103 }
104
105 static inline void __tileloadd(void *tile)
106 {
107         asm volatile(".byte 0xc4,0xe2,0x7b,0x4b,0x04,0x10"
108                      : : "a"(tile), "d"(0));
109 }
110
111 static inline void __tilerelease(void)
112 {
113         asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::);
114 }
115
116 static inline void __xsavec(struct xsave_data *data, uint64_t rfbm)
117 {
118         uint32_t rfbm_lo = rfbm;
119         uint32_t rfbm_hi = rfbm >> 32;
120
121         asm volatile("xsavec (%%rdi)"
122                      : : "D" (data), "a" (rfbm_lo), "d" (rfbm_hi)
123                      : "memory");
124 }
125
126 static inline void check_cpuid_xsave(void)
127 {
128         uint32_t eax, ebx, ecx, edx;
129
130         eax = 1;
131         ecx = 0;
132         cpuid(&eax, &ebx, &ecx, &edx);
133         if (!(ecx & X86_FEATURE_XSAVE))
134                 GUEST_ASSERT(!"cpuid: no CPU xsave support!");
135         if (!(ecx & X86_FEATURE_OSXSAVE))
136                 GUEST_ASSERT(!"cpuid: no OS xsave support!");
137 }
138
139 static bool check_xsave_supports_xtile(void)
140 {
141         return __xgetbv(0) & XFEATURE_MASK_XTILE;
142 }
143
144 static bool enum_xtile_config(void)
145 {
146         u32 eax, ebx, ecx, edx;
147
148         eax = TILE_CPUID;
149         ecx = TILE_PALETTE_CPUID_SUBLEAVE;
150
151         cpuid(&eax, &ebx, &ecx, &edx);
152         if (!eax || !ebx || !ecx)
153                 return false;
154
155         xtile.max_names = ebx >> 16;
156         if (xtile.max_names < NUM_TILES)
157                 return false;
158
159         xtile.bytes_per_tile = eax >> 16;
160         if (xtile.bytes_per_tile < TILE_SIZE)
161                 return false;
162
163         xtile.bytes_per_row = ebx;
164         xtile.max_rows = ecx;
165
166         return true;
167 }
168
169 static bool enum_xsave_tile(void)
170 {
171         u32 eax, ebx, ecx, edx;
172
173         eax = XSTATE_CPUID;
174         ecx = XFEATURE_XTILEDATA;
175
176         cpuid(&eax, &ebx, &ecx, &edx);
177         if (!eax || !ebx)
178                 return false;
179
180         xtile.xsave_offset = ebx;
181         xtile.xsave_size = eax;
182
183         return true;
184 }
185
186 static bool check_xsave_size(void)
187 {
188         u32 eax, ebx, ecx, edx;
189         bool valid = false;
190
191         eax = XSTATE_CPUID;
192         ecx = XSTATE_USER_STATE_SUBLEAVE;
193
194         cpuid(&eax, &ebx, &ecx, &edx);
195         if (ebx && ebx <= XSAVE_SIZE)
196                 valid = true;
197
198         return valid;
199 }
200
201 static bool check_xtile_info(void)
202 {
203         bool ret = false;
204
205         if (!check_xsave_size())
206                 return ret;
207
208         if (!enum_xsave_tile())
209                 return ret;
210
211         if (!enum_xtile_config())
212                 return ret;
213
214         if (sizeof(struct tile_data) >= xtile.xsave_size)
215                 ret = true;
216
217         return ret;
218 }
219
220 static void set_tilecfg(struct tile_config *cfg)
221 {
222         int i;
223
224         /* Only palette id 1 */
225         cfg->palette_id = 1;
226         for (i = 0; i < xtile.max_names; i++) {
227                 cfg->colsb[i] = xtile.bytes_per_row;
228                 cfg->rows[i] = xtile.max_rows;
229         }
230 }
231
232 static void set_xstatebv(void *data, uint64_t bv)
233 {
234         *(uint64_t *)(data + XSAVE_HDR_OFFSET) = bv;
235 }
236
237 static u64 get_xstatebv(void *data)
238 {
239         return *(u64 *)(data + XSAVE_HDR_OFFSET);
240 }
241
242 static void init_regs(void)
243 {
244         uint64_t cr4, xcr0;
245
246         /* turn on CR4.OSXSAVE */
247         cr4 = get_cr4();
248         cr4 |= X86_CR4_OSXSAVE;
249         set_cr4(cr4);
250
251         xcr0 = __xgetbv(0);
252         xcr0 |= XFEATURE_MASK_XTILE;
253         __xsetbv(0x0, xcr0);
254 }
255
256 static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg,
257                                                     struct tile_data *tiledata,
258                                                     struct xsave_data *xsave_data)
259 {
260         init_regs();
261         check_cpuid_xsave();
262         GUEST_ASSERT(check_xsave_supports_xtile());
263         GUEST_ASSERT(check_xtile_info());
264
265         /* check xtile configs */
266         GUEST_ASSERT(xtile.xsave_offset == 2816);
267         GUEST_ASSERT(xtile.xsave_size == 8192);
268         GUEST_ASSERT(xtile.max_names == 8);
269         GUEST_ASSERT(xtile.bytes_per_tile == 1024);
270         GUEST_ASSERT(xtile.bytes_per_row == 64);
271         GUEST_ASSERT(xtile.max_rows == 16);
272         GUEST_SYNC(1);
273
274         /* xfd=0, enable amx */
275         wrmsr(MSR_IA32_XFD, 0);
276         GUEST_SYNC(2);
277         GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == 0);
278         set_tilecfg(amx_cfg);
279         __ldtilecfg(amx_cfg);
280         GUEST_SYNC(3);
281         /* Check save/restore when trap to userspace */
282         __tileloadd(tiledata);
283         GUEST_SYNC(4);
284         __tilerelease();
285         GUEST_SYNC(5);
286         /* bit 18 not in the XCOMP_BV after xsavec() */
287         set_xstatebv(xsave_data, XFEATURE_MASK_XTILEDATA);
288         __xsavec(xsave_data, XFEATURE_MASK_XTILEDATA);
289         GUEST_ASSERT((get_xstatebv(xsave_data) & XFEATURE_MASK_XTILEDATA) == 0);
290
291         /* xfd=0x40000, disable amx tiledata */
292         wrmsr(MSR_IA32_XFD, XFEATURE_MASK_XTILEDATA);
293         GUEST_SYNC(6);
294         GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILEDATA);
295         set_tilecfg(amx_cfg);
296         __ldtilecfg(amx_cfg);
297         /* Trigger #NM exception */
298         __tileloadd(tiledata);
299         GUEST_SYNC(10);
300
301         GUEST_DONE();
302 }
303
304 void guest_nm_handler(struct ex_regs *regs)
305 {
306         /* Check if #NM is triggered by XFEATURE_MASK_XTILEDATA */
307         GUEST_SYNC(7);
308         GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILEDATA);
309         GUEST_SYNC(8);
310         GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILEDATA);
311         /* Clear xfd_err */
312         wrmsr(MSR_IA32_XFD_ERR, 0);
313         /* xfd=0, enable amx */
314         wrmsr(MSR_IA32_XFD, 0);
315         GUEST_SYNC(9);
316 }
317
318 int main(int argc, char *argv[])
319 {
320         struct kvm_cpuid_entry2 *entry;
321         struct kvm_regs regs1, regs2;
322         bool amx_supported = false;
323         struct kvm_vm *vm;
324         struct kvm_run *run;
325         struct kvm_x86_state *state;
326         int xsave_restore_size = 0;
327         vm_vaddr_t amx_cfg, tiledata, xsavedata;
328         struct ucall uc;
329         u32 amx_offset;
330         int stage, ret;
331
332         vm_xsave_req_perm(XSTATE_XTILE_DATA_BIT);
333
334         /* Create VM */
335         vm = vm_create_default(VCPU_ID, 0, guest_code);
336
337         entry = kvm_get_supported_cpuid_entry(1);
338         if (!(entry->ecx & X86_FEATURE_XSAVE)) {
339                 print_skip("XSAVE feature not supported");
340                 exit(KSFT_SKIP);
341         }
342
343         if (kvm_get_cpuid_max_basic() >= 0xd) {
344                 entry = kvm_get_supported_cpuid_index(0xd, 0);
345                 amx_supported = entry && !!(entry->eax & XFEATURE_MASK_XTILE);
346                 if (!amx_supported) {
347                         print_skip("AMX is not supported by the vCPU (eax=0x%x)", entry->eax);
348                         exit(KSFT_SKIP);
349                 }
350                 /* Get xsave/restore max size */
351                 xsave_restore_size = entry->ecx;
352         }
353
354         run = vcpu_state(vm, VCPU_ID);
355         vcpu_regs_get(vm, VCPU_ID, &regs1);
356
357         /* Register #NM handler */
358         vm_init_descriptor_tables(vm);
359         vcpu_init_descriptor_tables(vm, VCPU_ID);
360         vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler);
361
362         /* amx cfg for guest_code */
363         amx_cfg = vm_vaddr_alloc_page(vm);
364         memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize());
365
366         /* amx tiledata for guest_code */
367         tiledata = vm_vaddr_alloc_pages(vm, 2);
368         memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize());
369
370         /* xsave data for guest_code */
371         xsavedata = vm_vaddr_alloc_pages(vm, 3);
372         memset(addr_gva2hva(vm, xsavedata), 0, 3 * getpagesize());
373         vcpu_args_set(vm, VCPU_ID, 3, amx_cfg, tiledata, xsavedata);
374
375         for (stage = 1; ; stage++) {
376                 _vcpu_run(vm, VCPU_ID);
377                 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
378                             "Stage %d: unexpected exit reason: %u (%s),\n",
379                             stage, run->exit_reason,
380                             exit_reason_str(run->exit_reason));
381
382                 switch (get_ucall(vm, VCPU_ID, &uc)) {
383                 case UCALL_ABORT:
384                         TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
385                                   __FILE__, uc.args[1]);
386                         /* NOT REACHED */
387                 case UCALL_SYNC:
388                         switch (uc.args[1]) {
389                         case 1:
390                         case 2:
391                         case 3:
392                         case 5:
393                         case 6:
394                         case 7:
395                         case 8:
396                                 fprintf(stderr, "GUEST_SYNC(%ld)\n", uc.args[1]);
397                                 break;
398                         case 4:
399                         case 10:
400                                 fprintf(stderr,
401                                 "GUEST_SYNC(%ld), check save/restore status\n", uc.args[1]);
402
403                                 /* Compacted mode, get amx offset by xsave area
404                                  * size subtract 8K amx size.
405                                  */
406                                 amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE;
407                                 state = vcpu_save_state(vm, VCPU_ID);
408                                 void *amx_start = (void *)state->xsave + amx_offset;
409                                 void *tiles_data = (void *)addr_gva2hva(vm, tiledata);
410                                 /* Only check TMM0 register, 1 tile */
411                                 ret = memcmp(amx_start, tiles_data, TILE_SIZE);
412                                 TEST_ASSERT(ret == 0, "memcmp failed, ret=%d\n", ret);
413                                 kvm_x86_state_cleanup(state);
414                                 break;
415                         case 9:
416                                 fprintf(stderr,
417                                 "GUEST_SYNC(%ld), #NM exception and enable amx\n", uc.args[1]);
418                                 break;
419                         }
420                         break;
421                 case UCALL_DONE:
422                         fprintf(stderr, "UCALL_DONE\n");
423                         goto done;
424                 default:
425                         TEST_FAIL("Unknown ucall %lu", uc.cmd);
426                 }
427
428                 state = vcpu_save_state(vm, VCPU_ID);
429                 memset(&regs1, 0, sizeof(regs1));
430                 vcpu_regs_get(vm, VCPU_ID, &regs1);
431
432                 kvm_vm_release(vm);
433
434                 /* Restore state in a new VM.  */
435                 kvm_vm_restart(vm, O_RDWR);
436                 vm_vcpu_add(vm, VCPU_ID);
437                 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
438                 vcpu_load_state(vm, VCPU_ID, state);
439                 run = vcpu_state(vm, VCPU_ID);
440                 kvm_x86_state_cleanup(state);
441
442                 memset(&regs2, 0, sizeof(regs2));
443                 vcpu_regs_get(vm, VCPU_ID, &regs2);
444                 TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
445                             "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
446                             (ulong) regs2.rdi, (ulong) regs2.rsi);
447         }
448 done:
449         kvm_vm_free(vm);
450 }