2 * Linux on zSeries Channel Measurement Facility support
4 * Copyright IBM Corp. 2000, 2006
6 * Authors: Arnd Bergmann <arndb@de.ibm.com>
7 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define KMSG_COMPONENT "cio"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29 #include <linux/bootmem.h>
30 #include <linux/device.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
33 #include <linux/export.h>
34 #include <linux/moduleparam.h>
35 #include <linux/slab.h>
36 #include <linux/timex.h> /* get_tod_clock() */
38 #include <asm/ccwdev.h>
41 #include <asm/div64.h>
50 * parameter to enable cmf during boot, possible uses are:
51 * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
52 * used on any subchannel
53 * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
54 * <num> subchannel, where <num> is an integer
55 * between 1 and 65535, default is 1024
57 #define ARGSTRING "s390cmf"
59 /* indices for READCMB */
62 /* basic and exended format: */
63 cmb_ssch_rsch_count = 0,
65 cmb_device_connect_time,
66 cmb_function_pending_time,
67 cmb_device_disconnect_time,
68 cmb_control_unit_queuing_time,
69 cmb_device_active_only_time,
70 /* extended format only: */
72 cmb_initial_command_response_time,
76 * enum cmb_format - types of supported measurement block formats
78 * @CMF_BASIC: traditional channel measurement blocks supported
79 * by all machines that we run on
80 * @CMF_EXTENDED: improved format that was introduced with the z990
82 * @CMF_AUTODETECT: default: use extended format when running on a machine
83 * supporting extended format, otherwise fall back to
93 * format - actual format for all measurement blocks
95 * The format module parameter can be set to a value of 0 (zero)
96 * or 1, indicating basic or extended format as described for
99 static int format = CMF_AUTODETECT;
100 module_param(format, bint, 0444);
103 * struct cmb_operations - functions to use depending on cmb_format
105 * Most of these functions operate on a struct ccw_device. There is only
106 * one instance of struct cmb_operations because the format of the measurement
107 * data is guaranteed to be the same for every ccw_device.
109 * @alloc: allocate memory for a channel measurement block,
110 * either with the help of a special pool or with kmalloc
111 * @free: free memory allocated with @alloc
112 * @set: enable or disable measurement
113 * @read: read a measurement entry at an index
114 * @readall: read a measurement block in a common format
115 * @reset: clear the data in the associated measurement block and
116 * reset its time stamp
118 struct cmb_operations {
119 int (*alloc) (struct ccw_device *);
120 void (*free) (struct ccw_device *);
121 int (*set) (struct ccw_device *, u32);
122 u64 (*read) (struct ccw_device *, int);
123 int (*readall)(struct ccw_device *, struct cmbdata *);
124 void (*reset) (struct ccw_device *);
126 struct attribute_group *attr_group;
128 static struct cmb_operations *cmbops;
131 void *hw_block; /* Pointer to block updated by hardware */
132 void *last_block; /* Last changed block copied from hardware block */
133 int size; /* Size of hw_block and last_block */
134 unsigned long long last_update; /* when last_block was updated */
138 * Our user interface is designed in terms of nanoseconds,
139 * while the hardware measures total times in its own
142 static inline u64 time_to_nsec(u32 value)
144 return ((u64)value) * 128000ull;
148 * Users are usually interested in average times,
149 * not accumulated time.
150 * This also helps us with atomicity problems
151 * when reading sinlge values.
153 static inline u64 time_to_avg_nsec(u32 value, u32 count)
157 /* no samples yet, avoid division by 0 */
161 /* value comes in units of 128 µsec */
162 ret = time_to_nsec(value);
172 * Activate or deactivate the channel monitor. When area is NULL,
173 * the monitor is deactivated. The channel monitor needs to
174 * be active in order to measure subchannels, which also need
177 static inline void cmf_activate(void *area, unsigned int onoff)
179 register void * __gpr2 asm("2");
180 register long __gpr1 asm("1");
184 /* activate channel measurement */
185 asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
188 static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
189 unsigned long address)
191 struct subchannel *sch = to_subchannel(cdev->dev.parent);
194 sch->config.mme = mme;
195 sch->config.mbfc = mbfc;
196 /* address can be either a block address or a block index */
198 sch->config.mba = address;
200 sch->config.mbi = address;
202 ret = cio_commit_config(sch);
203 if (!mme && ret == -ENODEV) {
205 * The task was to disable measurement block updates but
206 * the subchannel is already gone. Report success.
213 struct set_schib_struct {
216 unsigned long address;
217 wait_queue_head_t wait;
221 #define CMF_PENDING 1
222 #define SET_SCHIB_TIMEOUT (10 * HZ)
224 static int set_schib_wait(struct ccw_device *cdev, u32 mme,
225 int mbfc, unsigned long address)
227 struct set_schib_struct set_data;
230 spin_lock_irq(cdev->ccwlock);
231 if (!cdev->private->cmb)
234 ret = set_schib(cdev, mme, mbfc, address);
238 /* if the device is not online, don't even try again */
239 if (cdev->private->state != DEV_STATE_ONLINE)
242 init_waitqueue_head(&set_data.wait);
244 set_data.mbfc = mbfc;
245 set_data.address = address;
246 set_data.ret = CMF_PENDING;
248 cdev->private->state = DEV_STATE_CMFCHANGE;
249 cdev->private->cmb_wait = &set_data;
250 spin_unlock_irq(cdev->ccwlock);
252 ret = wait_event_interruptible_timeout(set_data.wait,
253 set_data.ret != CMF_PENDING,
255 spin_lock_irq(cdev->ccwlock);
257 if (set_data.ret == CMF_PENDING) {
258 set_data.ret = (ret == 0) ? -ETIME : ret;
259 if (cdev->private->state == DEV_STATE_CMFCHANGE)
260 cdev->private->state = DEV_STATE_ONLINE;
263 cdev->private->cmb_wait = NULL;
266 spin_unlock_irq(cdev->ccwlock);
270 void retry_set_schib(struct ccw_device *cdev)
272 struct set_schib_struct *set_data = cdev->private->cmb_wait;
277 set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
279 wake_up(&set_data->wait);
282 static int cmf_copy_block(struct ccw_device *cdev)
284 struct subchannel *sch = to_subchannel(cdev->dev.parent);
285 struct cmb_data *cmb_data;
288 if (cio_update_schib(sch))
291 if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
292 /* Don't copy if a start function is in progress. */
293 if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
294 (scsw_actl(&sch->schib.scsw) &
295 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
296 (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
299 cmb_data = cdev->private->cmb;
300 hw_block = cmb_data->hw_block;
301 memcpy(cmb_data->last_block, hw_block, cmb_data->size);
302 cmb_data->last_update = get_tod_clock();
306 struct copy_block_struct {
307 wait_queue_head_t wait;
311 static int cmf_cmb_copy_wait(struct ccw_device *cdev)
313 struct copy_block_struct copy_block;
316 spin_lock_irq(cdev->ccwlock);
317 if (!cdev->private->cmb)
320 ret = cmf_copy_block(cdev);
324 if (cdev->private->state != DEV_STATE_ONLINE)
327 init_waitqueue_head(©_block.wait);
328 copy_block.ret = CMF_PENDING;
330 cdev->private->state = DEV_STATE_CMFUPDATE;
331 cdev->private->cmb_wait = ©_block;
332 spin_unlock_irq(cdev->ccwlock);
334 ret = wait_event_interruptible(copy_block.wait,
335 copy_block.ret != CMF_PENDING);
336 spin_lock_irq(cdev->ccwlock);
338 if (copy_block.ret == CMF_PENDING) {
339 copy_block.ret = -ERESTARTSYS;
340 if (cdev->private->state == DEV_STATE_CMFUPDATE)
341 cdev->private->state = DEV_STATE_ONLINE;
344 cdev->private->cmb_wait = NULL;
345 ret = copy_block.ret;
347 spin_unlock_irq(cdev->ccwlock);
351 void cmf_retry_copy_block(struct ccw_device *cdev)
353 struct copy_block_struct *copy_block = cdev->private->cmb_wait;
358 copy_block->ret = cmf_copy_block(cdev);
359 wake_up(©_block->wait);
362 static void cmf_generic_reset(struct ccw_device *cdev)
364 struct cmb_data *cmb_data;
366 spin_lock_irq(cdev->ccwlock);
367 cmb_data = cdev->private->cmb;
369 memset(cmb_data->last_block, 0, cmb_data->size);
371 * Need to reset hw block as well to make the hardware start
374 memset(cmb_data->hw_block, 0, cmb_data->size);
375 cmb_data->last_update = 0;
377 cdev->private->cmb_start_time = get_tod_clock();
378 spin_unlock_irq(cdev->ccwlock);
382 * struct cmb_area - container for global cmb data
384 * @mem: pointer to CMBs (only in basic measurement mode)
385 * @list: contains a linked list of all subchannels
386 * @num_channels: number of channels to be measured
387 * @lock: protect concurrent access to @mem and @list
391 struct list_head list;
396 static struct cmb_area cmb_area = {
397 .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
398 .list = LIST_HEAD_INIT(cmb_area.list),
399 .num_channels = 1024,
402 /* ****** old style CMB handling ********/
405 * Basic channel measurement blocks are allocated in one contiguous
406 * block of memory, which can not be moved as long as any channel
407 * is active. Therefore, a maximum number of subchannels needs to
408 * be defined somewhere. This is a module parameter, defaulting to
409 * a reasonable value of 1024, or 32 kb of memory.
410 * Current kernels don't allow kmalloc with more than 128kb, so the
414 module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
417 * struct cmb - basic channel measurement block
418 * @ssch_rsch_count: number of ssch and rsch
419 * @sample_count: number of samples
420 * @device_connect_time: time of device connect
421 * @function_pending_time: time of function pending
422 * @device_disconnect_time: time of device disconnect
423 * @control_unit_queuing_time: time of control unit queuing
424 * @device_active_only_time: time of device active only
425 * @reserved: unused in basic measurement mode
427 * The measurement block as used by the hardware. The fields are described
428 * further in z/Architecture Principles of Operation, chapter 17.
430 * The cmb area made up from these blocks must be a contiguous array and may
431 * not be reallocated or freed.
432 * Only one cmb area can be present in the system.
437 u32 device_connect_time;
438 u32 function_pending_time;
439 u32 device_disconnect_time;
440 u32 control_unit_queuing_time;
441 u32 device_active_only_time;
446 * Insert a single device into the cmb_area list.
447 * Called with cmb_area.lock held from alloc_cmb.
449 static int alloc_cmb_single(struct ccw_device *cdev,
450 struct cmb_data *cmb_data)
453 struct ccw_device_private *node;
456 spin_lock_irq(cdev->ccwlock);
457 if (!list_empty(&cdev->private->cmb_list)) {
463 * Find first unused cmb in cmb_area.mem.
464 * This is a little tricky: cmb_area.list
465 * remains sorted by ->cmb->hw_data pointers.
468 list_for_each_entry(node, &cmb_area.list, cmb_list) {
469 struct cmb_data *data;
471 if ((struct cmb*)data->hw_block > cmb)
475 if (cmb - cmb_area.mem >= cmb_area.num_channels) {
481 list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
482 cmb_data->hw_block = cmb;
483 cdev->private->cmb = cmb_data;
486 spin_unlock_irq(cdev->ccwlock);
490 static int alloc_cmb(struct ccw_device *cdev)
495 struct cmb_data *cmb_data;
497 /* Allocate private cmb_data. */
498 cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
502 cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
503 if (!cmb_data->last_block) {
507 cmb_data->size = sizeof(struct cmb);
508 spin_lock(&cmb_area.lock);
511 /* there is no user yet, so we need a new area */
512 size = sizeof(struct cmb) * cmb_area.num_channels;
513 WARN_ON(!list_empty(&cmb_area.list));
515 spin_unlock(&cmb_area.lock);
516 mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
518 spin_lock(&cmb_area.lock);
521 /* ok, another thread was faster */
522 free_pages((unsigned long)mem, get_order(size));
529 memset(mem, 0, size);
531 cmf_activate(cmb_area.mem, CMF_ON);
535 /* do the actual allocation */
536 ret = alloc_cmb_single(cdev, cmb_data);
538 spin_unlock(&cmb_area.lock);
540 kfree(cmb_data->last_block);
546 static void free_cmb(struct ccw_device *cdev)
548 struct ccw_device_private *priv;
549 struct cmb_data *cmb_data;
551 spin_lock(&cmb_area.lock);
552 spin_lock_irq(cdev->ccwlock);
554 priv = cdev->private;
555 cmb_data = priv->cmb;
558 kfree(cmb_data->last_block);
560 list_del_init(&priv->cmb_list);
562 if (list_empty(&cmb_area.list)) {
564 size = sizeof(struct cmb) * cmb_area.num_channels;
565 cmf_activate(NULL, CMF_OFF);
566 free_pages((unsigned long)cmb_area.mem, get_order(size));
569 spin_unlock_irq(cdev->ccwlock);
570 spin_unlock(&cmb_area.lock);
573 static int set_cmb(struct ccw_device *cdev, u32 mme)
576 struct cmb_data *cmb_data;
579 spin_lock_irqsave(cdev->ccwlock, flags);
580 if (!cdev->private->cmb) {
581 spin_unlock_irqrestore(cdev->ccwlock, flags);
584 cmb_data = cdev->private->cmb;
585 offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
586 spin_unlock_irqrestore(cdev->ccwlock, flags);
588 return set_schib_wait(cdev, mme, 0, offset);
591 /* calculate utilization in 0.1 percent units */
592 static u64 __cmb_utilization(u64 device_connect_time, u64 function_pending_time,
593 u64 device_disconnect_time, u64 start_time)
595 u64 utilization, elapsed_time;
597 utilization = time_to_nsec(device_connect_time +
598 function_pending_time +
599 device_disconnect_time);
601 elapsed_time = get_tod_clock() - start_time;
602 elapsed_time = tod_to_ns(elapsed_time);
603 elapsed_time /= 1000;
605 return elapsed_time ? (utilization / elapsed_time) : 0;
608 static u64 read_cmb(struct ccw_device *cdev, int index)
610 struct cmb_data *cmb_data;
616 spin_lock_irqsave(cdev->ccwlock, flags);
617 cmb_data = cdev->private->cmb;
621 cmb = cmb_data->hw_block;
623 case avg_utilization:
624 ret = __cmb_utilization(cmb->device_connect_time,
625 cmb->function_pending_time,
626 cmb->device_disconnect_time,
627 cdev->private->cmb_start_time);
629 case cmb_ssch_rsch_count:
630 ret = cmb->ssch_rsch_count;
632 case cmb_sample_count:
633 ret = cmb->sample_count;
635 case cmb_device_connect_time:
636 val = cmb->device_connect_time;
638 case cmb_function_pending_time:
639 val = cmb->function_pending_time;
641 case cmb_device_disconnect_time:
642 val = cmb->device_disconnect_time;
644 case cmb_control_unit_queuing_time:
645 val = cmb->control_unit_queuing_time;
647 case cmb_device_active_only_time:
648 val = cmb->device_active_only_time;
653 ret = time_to_avg_nsec(val, cmb->sample_count);
655 spin_unlock_irqrestore(cdev->ccwlock, flags);
659 static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
662 struct cmb_data *cmb_data;
667 ret = cmf_cmb_copy_wait(cdev);
670 spin_lock_irqsave(cdev->ccwlock, flags);
671 cmb_data = cdev->private->cmb;
676 if (cmb_data->last_update == 0) {
680 cmb = cmb_data->last_block;
681 time = cmb_data->last_update - cdev->private->cmb_start_time;
683 memset(data, 0, sizeof(struct cmbdata));
685 /* we only know values before device_busy_time */
686 data->size = offsetof(struct cmbdata, device_busy_time);
688 data->elapsed_time = tod_to_ns(time);
690 /* copy data to new structure */
691 data->ssch_rsch_count = cmb->ssch_rsch_count;
692 data->sample_count = cmb->sample_count;
694 /* time fields are converted to nanoseconds while copying */
695 data->device_connect_time = time_to_nsec(cmb->device_connect_time);
696 data->function_pending_time = time_to_nsec(cmb->function_pending_time);
697 data->device_disconnect_time =
698 time_to_nsec(cmb->device_disconnect_time);
699 data->control_unit_queuing_time
700 = time_to_nsec(cmb->control_unit_queuing_time);
701 data->device_active_only_time
702 = time_to_nsec(cmb->device_active_only_time);
705 spin_unlock_irqrestore(cdev->ccwlock, flags);
709 static void reset_cmb(struct ccw_device *cdev)
711 cmf_generic_reset(cdev);
714 static int cmf_enabled(struct ccw_device *cdev)
718 spin_lock_irq(cdev->ccwlock);
719 enabled = !!cdev->private->cmb;
720 spin_unlock_irq(cdev->ccwlock);
725 static struct attribute_group cmf_attr_group;
727 static struct cmb_operations cmbops_basic = {
732 .readall = readall_cmb,
734 .attr_group = &cmf_attr_group,
737 /* ******** extended cmb handling ********/
740 * struct cmbe - extended channel measurement block
741 * @ssch_rsch_count: number of ssch and rsch
742 * @sample_count: number of samples
743 * @device_connect_time: time of device connect
744 * @function_pending_time: time of function pending
745 * @device_disconnect_time: time of device disconnect
746 * @control_unit_queuing_time: time of control unit queuing
747 * @device_active_only_time: time of device active only
748 * @device_busy_time: time of device busy
749 * @initial_command_response_time: initial command response time
752 * The measurement block as used by the hardware. May be in any 64 bit physical
754 * The fields are described further in z/Architecture Principles of Operation,
755 * third edition, chapter 17.
760 u32 device_connect_time;
761 u32 function_pending_time;
762 u32 device_disconnect_time;
763 u32 control_unit_queuing_time;
764 u32 device_active_only_time;
765 u32 device_busy_time;
766 u32 initial_command_response_time;
768 } __packed __aligned(64);
770 static struct kmem_cache *cmbe_cache;
772 static int alloc_cmbe(struct ccw_device *cdev)
774 struct cmb_data *cmb_data;
778 cmbe = kmem_cache_zalloc(cmbe_cache, GFP_KERNEL);
782 cmb_data = kzalloc(sizeof(*cmb_data), GFP_KERNEL);
786 cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
787 if (!cmb_data->last_block)
790 cmb_data->size = sizeof(*cmbe);
791 cmb_data->hw_block = cmbe;
793 spin_lock(&cmb_area.lock);
794 spin_lock_irq(cdev->ccwlock);
795 if (cdev->private->cmb)
798 cdev->private->cmb = cmb_data;
800 /* activate global measurement if this is the first channel */
801 if (list_empty(&cmb_area.list))
802 cmf_activate(NULL, CMF_ON);
803 list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
805 spin_unlock_irq(cdev->ccwlock);
806 spin_unlock(&cmb_area.lock);
810 spin_unlock_irq(cdev->ccwlock);
811 spin_unlock(&cmb_area.lock);
815 kfree(cmb_data->last_block);
817 kmem_cache_free(cmbe_cache, cmbe);
822 static void free_cmbe(struct ccw_device *cdev)
824 struct cmb_data *cmb_data;
826 spin_lock(&cmb_area.lock);
827 spin_lock_irq(cdev->ccwlock);
828 cmb_data = cdev->private->cmb;
829 cdev->private->cmb = NULL;
831 kfree(cmb_data->last_block);
832 kmem_cache_free(cmbe_cache, cmb_data->hw_block);
836 /* deactivate global measurement if this is the last channel */
837 list_del_init(&cdev->private->cmb_list);
838 if (list_empty(&cmb_area.list))
839 cmf_activate(NULL, CMF_OFF);
840 spin_unlock_irq(cdev->ccwlock);
841 spin_unlock(&cmb_area.lock);
844 static int set_cmbe(struct ccw_device *cdev, u32 mme)
847 struct cmb_data *cmb_data;
850 spin_lock_irqsave(cdev->ccwlock, flags);
851 if (!cdev->private->cmb) {
852 spin_unlock_irqrestore(cdev->ccwlock, flags);
855 cmb_data = cdev->private->cmb;
856 mba = mme ? (unsigned long) cmb_data->hw_block : 0;
857 spin_unlock_irqrestore(cdev->ccwlock, flags);
859 return set_schib_wait(cdev, mme, 1, mba);
862 static u64 read_cmbe(struct ccw_device *cdev, int index)
864 struct cmb_data *cmb_data;
870 spin_lock_irqsave(cdev->ccwlock, flags);
871 cmb_data = cdev->private->cmb;
875 cmb = cmb_data->hw_block;
877 case avg_utilization:
878 ret = __cmb_utilization(cmb->device_connect_time,
879 cmb->function_pending_time,
880 cmb->device_disconnect_time,
881 cdev->private->cmb_start_time);
883 case cmb_ssch_rsch_count:
884 ret = cmb->ssch_rsch_count;
886 case cmb_sample_count:
887 ret = cmb->sample_count;
889 case cmb_device_connect_time:
890 val = cmb->device_connect_time;
892 case cmb_function_pending_time:
893 val = cmb->function_pending_time;
895 case cmb_device_disconnect_time:
896 val = cmb->device_disconnect_time;
898 case cmb_control_unit_queuing_time:
899 val = cmb->control_unit_queuing_time;
901 case cmb_device_active_only_time:
902 val = cmb->device_active_only_time;
904 case cmb_device_busy_time:
905 val = cmb->device_busy_time;
907 case cmb_initial_command_response_time:
908 val = cmb->initial_command_response_time;
913 ret = time_to_avg_nsec(val, cmb->sample_count);
915 spin_unlock_irqrestore(cdev->ccwlock, flags);
919 static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
922 struct cmb_data *cmb_data;
927 ret = cmf_cmb_copy_wait(cdev);
930 spin_lock_irqsave(cdev->ccwlock, flags);
931 cmb_data = cdev->private->cmb;
936 if (cmb_data->last_update == 0) {
940 time = cmb_data->last_update - cdev->private->cmb_start_time;
942 memset (data, 0, sizeof(struct cmbdata));
944 /* we only know values before device_busy_time */
945 data->size = offsetof(struct cmbdata, device_busy_time);
947 data->elapsed_time = tod_to_ns(time);
949 cmb = cmb_data->last_block;
950 /* copy data to new structure */
951 data->ssch_rsch_count = cmb->ssch_rsch_count;
952 data->sample_count = cmb->sample_count;
954 /* time fields are converted to nanoseconds while copying */
955 data->device_connect_time = time_to_nsec(cmb->device_connect_time);
956 data->function_pending_time = time_to_nsec(cmb->function_pending_time);
957 data->device_disconnect_time =
958 time_to_nsec(cmb->device_disconnect_time);
959 data->control_unit_queuing_time
960 = time_to_nsec(cmb->control_unit_queuing_time);
961 data->device_active_only_time
962 = time_to_nsec(cmb->device_active_only_time);
963 data->device_busy_time = time_to_nsec(cmb->device_busy_time);
964 data->initial_command_response_time
965 = time_to_nsec(cmb->initial_command_response_time);
969 spin_unlock_irqrestore(cdev->ccwlock, flags);
973 static void reset_cmbe(struct ccw_device *cdev)
975 cmf_generic_reset(cdev);
978 static struct attribute_group cmf_attr_group_ext;
980 static struct cmb_operations cmbops_extended = {
985 .readall = readall_cmbe,
987 .attr_group = &cmf_attr_group_ext,
990 static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
992 return sprintf(buf, "%lld\n",
993 (unsigned long long) cmf_read(to_ccwdev(dev), idx));
996 static ssize_t cmb_show_avg_sample_interval(struct device *dev,
997 struct device_attribute *attr,
1000 struct ccw_device *cdev = to_ccwdev(dev);
1001 unsigned long count;
1004 count = cmf_read(cdev, cmb_sample_count);
1005 spin_lock_irq(cdev->ccwlock);
1007 interval = get_tod_clock() - cdev->private->cmb_start_time;
1008 interval = tod_to_ns(interval);
1012 spin_unlock_irq(cdev->ccwlock);
1013 return sprintf(buf, "%ld\n", interval);
1016 static ssize_t cmb_show_avg_utilization(struct device *dev,
1017 struct device_attribute *attr,
1020 unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
1022 return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
1025 #define cmf_attr(name) \
1026 static ssize_t show_##name(struct device *dev, \
1027 struct device_attribute *attr, char *buf) \
1028 { return cmb_show_attr((dev), buf, cmb_##name); } \
1029 static DEVICE_ATTR(name, 0444, show_##name, NULL);
1031 #define cmf_attr_avg(name) \
1032 static ssize_t show_avg_##name(struct device *dev, \
1033 struct device_attribute *attr, char *buf) \
1034 { return cmb_show_attr((dev), buf, cmb_##name); } \
1035 static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
1037 cmf_attr(ssch_rsch_count);
1038 cmf_attr(sample_count);
1039 cmf_attr_avg(device_connect_time);
1040 cmf_attr_avg(function_pending_time);
1041 cmf_attr_avg(device_disconnect_time);
1042 cmf_attr_avg(control_unit_queuing_time);
1043 cmf_attr_avg(device_active_only_time);
1044 cmf_attr_avg(device_busy_time);
1045 cmf_attr_avg(initial_command_response_time);
1047 static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
1049 static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
1051 static struct attribute *cmf_attributes[] = {
1052 &dev_attr_avg_sample_interval.attr,
1053 &dev_attr_avg_utilization.attr,
1054 &dev_attr_ssch_rsch_count.attr,
1055 &dev_attr_sample_count.attr,
1056 &dev_attr_avg_device_connect_time.attr,
1057 &dev_attr_avg_function_pending_time.attr,
1058 &dev_attr_avg_device_disconnect_time.attr,
1059 &dev_attr_avg_control_unit_queuing_time.attr,
1060 &dev_attr_avg_device_active_only_time.attr,
1064 static struct attribute_group cmf_attr_group = {
1066 .attrs = cmf_attributes,
1069 static struct attribute *cmf_attributes_ext[] = {
1070 &dev_attr_avg_sample_interval.attr,
1071 &dev_attr_avg_utilization.attr,
1072 &dev_attr_ssch_rsch_count.attr,
1073 &dev_attr_sample_count.attr,
1074 &dev_attr_avg_device_connect_time.attr,
1075 &dev_attr_avg_function_pending_time.attr,
1076 &dev_attr_avg_device_disconnect_time.attr,
1077 &dev_attr_avg_control_unit_queuing_time.attr,
1078 &dev_attr_avg_device_active_only_time.attr,
1079 &dev_attr_avg_device_busy_time.attr,
1080 &dev_attr_avg_initial_command_response_time.attr,
1084 static struct attribute_group cmf_attr_group_ext = {
1086 .attrs = cmf_attributes_ext,
1089 static ssize_t cmb_enable_show(struct device *dev,
1090 struct device_attribute *attr,
1093 struct ccw_device *cdev = to_ccwdev(dev);
1095 return sprintf(buf, "%d\n", cmf_enabled(cdev));
1098 static ssize_t cmb_enable_store(struct device *dev,
1099 struct device_attribute *attr, const char *buf,
1102 struct ccw_device *cdev = to_ccwdev(dev);
1106 ret = kstrtoul(buf, 16, &val);
1112 ret = disable_cmf(cdev);
1115 ret = enable_cmf(cdev);
1121 return ret ? ret : c;
1123 DEVICE_ATTR_RW(cmb_enable);
1125 int ccw_set_cmf(struct ccw_device *cdev, int enable)
1127 return cmbops->set(cdev, enable ? 2 : 0);
1131 * enable_cmf() - switch on the channel measurement for a specific device
1132 * @cdev: The ccw device to be enabled
1134 * Returns %0 for success or a negative error value.
1135 * Note: If this is called on a device for which channel measurement is already
1136 * enabled a reset of the measurement data is triggered.
1140 int enable_cmf(struct ccw_device *cdev)
1144 device_lock(&cdev->dev);
1145 if (cmf_enabled(cdev)) {
1146 cmbops->reset(cdev);
1149 get_device(&cdev->dev);
1150 ret = cmbops->alloc(cdev);
1153 cmbops->reset(cdev);
1154 ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
1159 ret = cmbops->set(cdev, 2);
1161 sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
1166 put_device(&cdev->dev);
1168 device_unlock(&cdev->dev);
1173 * __disable_cmf() - switch off the channel measurement for a specific device
1174 * @cdev: The ccw device to be disabled
1176 * Returns %0 for success or a negative error value.
1179 * non-atomic, device_lock() held.
1181 int __disable_cmf(struct ccw_device *cdev)
1185 ret = cmbops->set(cdev, 0);
1189 sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
1191 put_device(&cdev->dev);
1197 * disable_cmf() - switch off the channel measurement for a specific device
1198 * @cdev: The ccw device to be disabled
1200 * Returns %0 for success or a negative error value.
1205 int disable_cmf(struct ccw_device *cdev)
1209 device_lock(&cdev->dev);
1210 ret = __disable_cmf(cdev);
1211 device_unlock(&cdev->dev);
1217 * cmf_read() - read one value from the current channel measurement block
1218 * @cdev: the channel to be read
1219 * @index: the index of the value to be read
1221 * Returns the value read or %0 if the value cannot be read.
1226 u64 cmf_read(struct ccw_device *cdev, int index)
1228 return cmbops->read(cdev, index);
1232 * cmf_readall() - read the current channel measurement block
1233 * @cdev: the channel to be read
1234 * @data: a pointer to a data block that will be filled
1236 * Returns %0 on success, a negative error value otherwise.
1241 int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
1243 return cmbops->readall(cdev, data);
1246 /* Reenable cmf when a disconnected device becomes available again. */
1247 int cmf_reenable(struct ccw_device *cdev)
1249 cmbops->reset(cdev);
1250 return cmbops->set(cdev, 2);
1254 * cmf_reactivate() - reactivate measurement block updates
1256 * Use this during resume from hibernate.
1258 void cmf_reactivate(void)
1260 spin_lock(&cmb_area.lock);
1261 if (!list_empty(&cmb_area.list))
1262 cmf_activate(cmb_area.mem, CMF_ON);
1263 spin_unlock(&cmb_area.lock);
1266 static int __init init_cmbe(void)
1268 cmbe_cache = kmem_cache_create("cmbe_cache", sizeof(struct cmbe),
1269 __alignof__(struct cmbe), 0, NULL);
1271 return cmbe_cache ? 0 : -ENOMEM;
1274 static int __init init_cmf(void)
1276 char *format_string;
1277 char *detect_string;
1281 * If the user did not give a parameter, see if we are running on a
1282 * machine supporting extended measurement blocks, otherwise fall back
1285 if (format == CMF_AUTODETECT) {
1286 if (!css_general_characteristics.ext_mb) {
1289 format = CMF_EXTENDED;
1291 detect_string = "autodetected";
1293 detect_string = "parameter";
1298 format_string = "basic";
1299 cmbops = &cmbops_basic;
1302 format_string = "extended";
1303 cmbops = &cmbops_extended;
1312 pr_info("Channel measurement facility initialized using format "
1313 "%s (mode %s)\n", format_string, detect_string);
1316 device_initcall(init_cmf);
1318 EXPORT_SYMBOL_GPL(enable_cmf);
1319 EXPORT_SYMBOL_GPL(disable_cmf);
1320 EXPORT_SYMBOL_GPL(cmf_read);
1321 EXPORT_SYMBOL_GPL(cmf_readall);