2 * store hypervisor information instruction emulation functions.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License (version 2 only)
6 * as published by the Free Software Foundation.
8 * Copyright IBM Corp. 2016
9 * Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
11 #include <linux/errno.h>
12 #include <linux/pagemap.h>
13 #include <linux/vmalloc.h>
14 #include <linux/syscalls.h>
15 #include <linux/mutex.h>
16 #include <asm/asm-offsets.h>
19 #include <asm/sysinfo.h>
20 #include <asm/ebcdic.h>
21 #include <asm/facility.h>
22 #include <asm/sthyi.h>
25 #define DED_WEIGHT 0xffff
27 * CP and IFL as EBCDIC strings, SP/0x40 determines the end of string
28 * as they are justified with spaces.
30 #define CP 0xc3d7404040404040UL
31 #define IFL 0xc9c6d34040404040UL
35 HDR_STACK_INCM = 0x20,
60 u8 infhflg2; /* reserved */
61 u8 infhval1; /* reserved */
62 u8 infhval2; /* reserved */
87 u8 infmflg1; /* reserved */
88 u8 infmflg2; /* reserved */
90 u8 infmval2; /* reserved */
105 u8 infpflg2; /* reserved */
107 u8 infpval2; /* reserved */
139 struct lpar_cpu_inf {
145 * STHYI requires extensive locking in the higher hypervisors
146 * and is very computational/memory expensive. Therefore we
147 * cache the retrieved data whose valid period is 1s.
149 #define CACHE_VALID_JIFFIES HZ
156 static DEFINE_MUTEX(sthyi_mutex);
157 static struct sthyi_info sthyi_cache;
159 static inline u64 cpu_id(u8 ctidx, void *diag224_buf)
161 return *((u64 *)(diag224_buf + (ctidx + 1) * DIAG204_CPU_NAME_LEN));
165 * Scales the cpu capping from the lpar range to the one expected in
168 * diag204 reports a cap in hundredths of processor units.
169 * z/VM's range for one core is 0 - 0x10000.
171 static u32 scale_cap(u32 in)
173 return (0x10000 * in) / 100;
176 static void fill_hdr(struct sthyi_sctns *sctns)
178 sctns->hdr.infhdln = sizeof(sctns->hdr);
179 sctns->hdr.infmoff = sizeof(sctns->hdr);
180 sctns->hdr.infmlen = sizeof(sctns->mac);
181 sctns->hdr.infplen = sizeof(sctns->par);
182 sctns->hdr.infpoff = sctns->hdr.infhdln + sctns->hdr.infmlen;
183 sctns->hdr.infhtotl = sctns->hdr.infpoff + sctns->hdr.infplen;
186 static void fill_stsi_mac(struct sthyi_sctns *sctns,
187 struct sysinfo_1_1_1 *sysinfo)
189 if (stsi(sysinfo, 1, 1, 1))
192 sclp_ocf_cpc_name_copy(sctns->mac.infmname);
194 memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype));
195 memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu));
196 memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman));
197 memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq));
199 sctns->mac.infmval1 |= MAC_ID_VLD | MAC_NAME_VLD;
202 static void fill_stsi_par(struct sthyi_sctns *sctns,
203 struct sysinfo_2_2_2 *sysinfo)
205 if (stsi(sysinfo, 2, 2, 2))
208 sctns->par.infppnum = sysinfo->lpar_number;
209 memcpy(sctns->par.infppnam, sysinfo->name, sizeof(sctns->par.infppnam));
211 sctns->par.infpval1 |= PAR_ID_VLD;
214 static void fill_stsi(struct sthyi_sctns *sctns)
218 /* Errors are handled through the validity bits in the response. */
219 sysinfo = (void *)__get_free_page(GFP_KERNEL);
223 fill_stsi_mac(sctns, sysinfo);
224 fill_stsi_par(sctns, sysinfo);
226 free_pages((unsigned long)sysinfo, 0);
229 static void fill_diag_mac(struct sthyi_sctns *sctns,
230 struct diag204_x_phys_block *block,
235 for (i = 0; i < block->hdr.cpus; i++) {
236 switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) {
238 if (block->cpus[i].weight == DED_WEIGHT)
239 sctns->mac.infmdcps++;
241 sctns->mac.infmscps++;
244 if (block->cpus[i].weight == DED_WEIGHT)
245 sctns->mac.infmdifl++;
247 sctns->mac.infmsifl++;
251 sctns->mac.infmval1 |= MAC_CNT_VLD;
254 /* Returns a pointer to the the next partition block. */
255 static struct diag204_x_part_block *lpar_cpu_inf(struct lpar_cpu_inf *part_inf,
258 struct diag204_x_part_block *block)
260 int i, capped = 0, weight_cp = 0, weight_ifl = 0;
261 struct cpu_inf *cpu_inf;
263 for (i = 0; i < block->hdr.rcpus; i++) {
264 if (!(block->cpus[i].cflag & DIAG204_CPU_ONLINE))
267 switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) {
269 cpu_inf = &part_inf->cp;
270 if (block->cpus[i].cur_weight < DED_WEIGHT)
271 weight_cp |= block->cpus[i].cur_weight;
274 cpu_inf = &part_inf->ifl;
275 if (block->cpus[i].cur_weight < DED_WEIGHT)
276 weight_ifl |= block->cpus[i].cur_weight;
285 capped |= block->cpus[i].cflag & DIAG204_CPU_CAPPED;
286 cpu_inf->lpar_cap |= block->cpus[i].cpu_type_cap;
287 cpu_inf->lpar_grp_cap |= block->cpus[i].group_cpu_type_cap;
289 if (block->cpus[i].weight == DED_WEIGHT)
290 cpu_inf->cpu_num_ded += 1;
292 cpu_inf->cpu_num_shd += 1;
295 if (this_lpar && capped) {
296 part_inf->cp.lpar_weight = weight_cp;
297 part_inf->ifl.lpar_weight = weight_ifl;
299 part_inf->cp.all_weight += weight_cp;
300 part_inf->ifl.all_weight += weight_ifl;
301 return (struct diag204_x_part_block *)&block->cpus[i];
304 static void fill_diag(struct sthyi_sctns *sctns)
309 void *diag224_buf = NULL;
310 struct diag204_x_info_blk_hdr *ti_hdr;
311 struct diag204_x_part_block *part_block;
312 struct diag204_x_phys_block *phys_block;
313 struct lpar_cpu_inf lpar_inf = {};
315 /* Errors are handled through the validity bits in the response. */
316 pages = diag204((unsigned long)DIAG204_SUBC_RSI |
317 (unsigned long)DIAG204_INFO_EXT, 0, NULL);
321 diag204_buf = vmalloc(PAGE_SIZE * pages);
325 r = diag204((unsigned long)DIAG204_SUBC_STIB7 |
326 (unsigned long)DIAG204_INFO_EXT, pages, diag204_buf);
330 diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
331 if (!diag224_buf || diag224(diag224_buf))
334 ti_hdr = diag204_buf;
335 part_block = diag204_buf + sizeof(*ti_hdr);
337 for (i = 0; i < ti_hdr->npar; i++) {
339 * For the calling lpar we also need to get the cpu
340 * caps and weights. The time information block header
341 * specifies the offset to the partition block of the
342 * caller lpar, so we know when we process its data.
344 this_lpar = (void *)part_block - diag204_buf == ti_hdr->this_part;
345 part_block = lpar_cpu_inf(&lpar_inf, this_lpar, diag224_buf,
349 phys_block = (struct diag204_x_phys_block *)part_block;
350 part_block = diag204_buf + ti_hdr->this_part;
351 if (part_block->hdr.mtid)
352 sctns->par.infpflg1 = PAR_MT_EN;
354 sctns->par.infpval1 |= PAR_GRP_VLD;
355 sctns->par.infplgcp = scale_cap(lpar_inf.cp.lpar_grp_cap);
356 sctns->par.infplgif = scale_cap(lpar_inf.ifl.lpar_grp_cap);
357 memcpy(sctns->par.infplgnm, part_block->hdr.hardware_group_name,
358 sizeof(sctns->par.infplgnm));
360 sctns->par.infpscps = lpar_inf.cp.cpu_num_shd;
361 sctns->par.infpdcps = lpar_inf.cp.cpu_num_ded;
362 sctns->par.infpsifl = lpar_inf.ifl.cpu_num_shd;
363 sctns->par.infpdifl = lpar_inf.ifl.cpu_num_ded;
364 sctns->par.infpval1 |= PAR_PCNT_VLD;
366 sctns->par.infpabcp = scale_cap(lpar_inf.cp.lpar_cap);
367 sctns->par.infpabif = scale_cap(lpar_inf.ifl.lpar_cap);
368 sctns->par.infpval1 |= PAR_ABS_VLD;
371 * Everything below needs global performance data to be
374 if (!(ti_hdr->flags & DIAG204_LPAR_PHYS_FLG)) {
375 sctns->hdr.infhflg1 |= HDR_PERF_UNAV;
379 fill_diag_mac(sctns, phys_block, diag224_buf);
381 if (lpar_inf.cp.lpar_weight) {
382 sctns->par.infpwbcp = sctns->mac.infmscps * 0x10000 *
383 lpar_inf.cp.lpar_weight / lpar_inf.cp.all_weight;
386 if (lpar_inf.ifl.lpar_weight) {
387 sctns->par.infpwbif = sctns->mac.infmsifl * 0x10000 *
388 lpar_inf.ifl.lpar_weight / lpar_inf.ifl.all_weight;
390 sctns->par.infpval1 |= PAR_WGHT_VLD;
393 free_page((unsigned long)diag224_buf);
397 static int sthyi(u64 vaddr, u64 *rc)
399 register u64 code asm("0") = 0;
400 register u64 addr asm("2") = vaddr;
401 register u64 rcode asm("3");
405 ".insn rre,0xB2560000,%[code],%[addr]\n"
408 : [cc] "=d" (cc), "=d" (rcode)
409 : [code] "d" (code), [addr] "a" (addr)
415 static int fill_dst(void *dst, u64 *rc)
417 struct sthyi_sctns *sctns = (struct sthyi_sctns *)dst;
420 * If the facility is on, we don't want to emulate the instruction.
421 * We ask the hypervisor to provide the data.
423 if (test_facility(74))
424 return sthyi((u64)dst, rc);
433 static int sthyi_init_cache(void)
435 if (sthyi_cache.info)
437 sthyi_cache.info = (void *)get_zeroed_page(GFP_KERNEL);
438 if (!sthyi_cache.info)
440 sthyi_cache.end = jiffies - 1; /* expired */
444 static int sthyi_update_cache(u64 *rc)
448 memset(sthyi_cache.info, 0, PAGE_SIZE);
449 r = fill_dst(sthyi_cache.info, rc);
452 sthyi_cache.end = jiffies + CACHE_VALID_JIFFIES;
457 * sthyi_fill - Fill page with data returned by the STHYI instruction
459 * @dst: Pointer to zeroed page
460 * @rc: Pointer for storing the return code of the instruction
462 * Fills the destination with system information returned by the STHYI
463 * instruction. The data is generated by emulation or execution of STHYI,
464 * if available. The return value is the condition code that would be
465 * returned, the rc parameter is the return code which is passed in
468 int sthyi_fill(void *dst, u64 *rc)
472 mutex_lock(&sthyi_mutex);
473 r = sthyi_init_cache();
477 if (time_is_before_jiffies(sthyi_cache.end)) {
479 r = sthyi_update_cache(rc);
484 memcpy(dst, sthyi_cache.info, PAGE_SIZE);
486 mutex_unlock(&sthyi_mutex);
489 EXPORT_SYMBOL_GPL(sthyi_fill);
491 SYSCALL_DEFINE4(s390_sthyi, unsigned long, function_code, void __user *, buffer,
492 u64 __user *, return_code, unsigned long, flags)
500 if (function_code != STHYI_FC_CP_IFL_CAP)
502 info = (void *)get_zeroed_page(GFP_KERNEL);
505 r = sthyi_fill(info, &sthyi_rc);
508 if (return_code && put_user(sthyi_rc, return_code)) {
512 if (copy_to_user(buffer, info, PAGE_SIZE))
515 free_page((unsigned long)info);