1 // SPDX-License-Identifier: GPL-2.0
3 * driver for channel subsystem
5 * Copyright IBM Corp. 2002, 2010
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/reboot.h>
21 #include <linux/proc_fs.h>
22 #include <linux/genalloc.h>
23 #include <linux/dma-mapping.h>
29 #include "blacklist.h"
30 #include "cio_debug.h"
37 int css_init_done = 0;
41 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
42 static struct bus_type css_bus_type;
45 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
47 struct subchannel_id schid;
50 init_subchannel_id(&schid);
53 ret = fn(schid, data);
56 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
58 } while (schid.ssid++ < max_ssid);
65 int (*fn_known_sch)(struct subchannel *, void *);
66 int (*fn_unknown_sch)(struct subchannel_id, void *);
69 static int call_fn_known_sch(struct device *dev, void *data)
71 struct subchannel *sch = to_subchannel(dev);
72 struct cb_data *cb = data;
76 idset_sch_del(cb->set, sch->schid);
78 rc = cb->fn_known_sch(sch, cb->data);
82 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
84 struct cb_data *cb = data;
87 if (idset_sch_contains(cb->set, schid))
88 rc = cb->fn_unknown_sch(schid, cb->data);
92 static int call_fn_all_sch(struct subchannel_id schid, void *data)
94 struct cb_data *cb = data;
95 struct subchannel *sch;
98 sch = get_subchannel_by_schid(schid);
100 if (cb->fn_known_sch)
101 rc = cb->fn_known_sch(sch, cb->data);
102 put_device(&sch->dev);
104 if (cb->fn_unknown_sch)
105 rc = cb->fn_unknown_sch(schid, cb->data);
111 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
112 int (*fn_unknown)(struct subchannel_id,
119 cb.fn_known_sch = fn_known;
120 cb.fn_unknown_sch = fn_unknown;
122 if (fn_known && !fn_unknown) {
123 /* Skip idset allocation in case of known-only loop. */
125 return bus_for_each_dev(&css_bus_type, NULL, &cb,
129 cb.set = idset_sch_new();
131 /* fall back to brute force scanning in case of oom */
132 return for_each_subchannel(call_fn_all_sch, &cb);
136 /* Process registered subchannels. */
137 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
140 /* Process unregistered subchannels. */
142 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
149 static void css_sch_todo(struct work_struct *work);
151 static int css_sch_create_locks(struct subchannel *sch)
153 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
157 spin_lock_init(sch->lock);
158 mutex_init(&sch->reg_mutex);
163 static void css_subchannel_release(struct device *dev)
165 struct subchannel *sch = to_subchannel(dev);
167 sch->config.intparm = 0;
168 cio_commit_config(sch);
169 kfree(sch->driver_override);
174 static int css_validate_subchannel(struct subchannel_id schid,
179 switch (schib->pmcw.st) {
180 case SUBCHANNEL_TYPE_IO:
181 case SUBCHANNEL_TYPE_MSG:
182 if (!css_sch_is_valid(schib))
184 else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
185 CIO_MSG_EVENT(6, "Blacklisted device detected "
186 "at devno %04X, subchannel set %x\n",
187 schib->pmcw.dev, schid.ssid);
198 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
199 schid.ssid, schid.sch_no, schib->pmcw.st);
204 struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
207 struct subchannel *sch;
210 ret = css_validate_subchannel(schid, schib);
214 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
216 return ERR_PTR(-ENOMEM);
220 sch->st = schib->pmcw.st;
222 ret = css_sch_create_locks(sch);
226 INIT_WORK(&sch->todo_work, css_sch_todo);
227 sch->dev.release = &css_subchannel_release;
228 sch->dev.dma_mask = &sch->dma_mask;
229 device_initialize(&sch->dev);
231 * The physical addresses for some of the dma structures that can
232 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
234 ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
238 * But we don't have such restrictions imposed on the stuff that
239 * is handled by the streaming API.
241 ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
252 static int css_sch_device_register(struct subchannel *sch)
256 mutex_lock(&sch->reg_mutex);
257 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
259 ret = device_add(&sch->dev);
260 mutex_unlock(&sch->reg_mutex);
265 * css_sch_device_unregister - unregister a subchannel
266 * @sch: subchannel to be unregistered
268 void css_sch_device_unregister(struct subchannel *sch)
270 mutex_lock(&sch->reg_mutex);
271 if (device_is_registered(&sch->dev))
272 device_unregister(&sch->dev);
273 mutex_unlock(&sch->reg_mutex);
275 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
277 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
282 memset(ssd, 0, sizeof(struct chsc_ssd_info));
283 ssd->path_mask = pmcw->pim;
284 for (i = 0; i < 8; i++) {
286 if (pmcw->pim & mask) {
287 chp_id_init(&ssd->chpid[i]);
288 ssd->chpid[i].id = pmcw->chpid[i];
293 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
298 for (i = 0; i < 8; i++) {
300 if (ssd->path_mask & mask)
301 chp_new(ssd->chpid[i]);
305 void css_update_ssd_info(struct subchannel *sch)
309 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
311 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
313 ssd_register_chpids(&sch->ssd_info);
316 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
319 struct subchannel *sch = to_subchannel(dev);
321 return sprintf(buf, "%01x\n", sch->st);
324 static DEVICE_ATTR_RO(type);
326 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
329 struct subchannel *sch = to_subchannel(dev);
331 return sprintf(buf, "css:t%01X\n", sch->st);
334 static DEVICE_ATTR_RO(modalias);
336 static ssize_t driver_override_store(struct device *dev,
337 struct device_attribute *attr,
338 const char *buf, size_t count)
340 struct subchannel *sch = to_subchannel(dev);
341 char *driver_override, *old, *cp;
343 /* We need to keep extra room for a newline */
344 if (count >= (PAGE_SIZE - 1))
347 driver_override = kstrndup(buf, count, GFP_KERNEL);
348 if (!driver_override)
351 cp = strchr(driver_override, '\n');
356 old = sch->driver_override;
357 if (strlen(driver_override)) {
358 sch->driver_override = driver_override;
360 kfree(driver_override);
361 sch->driver_override = NULL;
370 static ssize_t driver_override_show(struct device *dev,
371 struct device_attribute *attr, char *buf)
373 struct subchannel *sch = to_subchannel(dev);
377 len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
381 static DEVICE_ATTR_RW(driver_override);
383 static struct attribute *subch_attrs[] = {
385 &dev_attr_modalias.attr,
386 &dev_attr_driver_override.attr,
390 static struct attribute_group subch_attr_group = {
391 .attrs = subch_attrs,
394 static const struct attribute_group *default_subch_attr_groups[] = {
399 static ssize_t chpids_show(struct device *dev,
400 struct device_attribute *attr,
403 struct subchannel *sch = to_subchannel(dev);
404 struct chsc_ssd_info *ssd = &sch->ssd_info;
409 for (chp = 0; chp < 8; chp++) {
411 if (ssd->path_mask & mask)
412 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
414 ret += sprintf(buf + ret, "00 ");
416 ret += sprintf(buf + ret, "\n");
419 static DEVICE_ATTR_RO(chpids);
421 static ssize_t pimpampom_show(struct device *dev,
422 struct device_attribute *attr,
425 struct subchannel *sch = to_subchannel(dev);
426 struct pmcw *pmcw = &sch->schib.pmcw;
428 return sprintf(buf, "%02x %02x %02x\n",
429 pmcw->pim, pmcw->pam, pmcw->pom);
431 static DEVICE_ATTR_RO(pimpampom);
433 static struct attribute *io_subchannel_type_attrs[] = {
434 &dev_attr_chpids.attr,
435 &dev_attr_pimpampom.attr,
438 ATTRIBUTE_GROUPS(io_subchannel_type);
440 static const struct device_type io_subchannel_type = {
441 .groups = io_subchannel_type_groups,
444 int css_register_subchannel(struct subchannel *sch)
448 /* Initialize the subchannel structure */
449 sch->dev.parent = &channel_subsystems[0]->device;
450 sch->dev.bus = &css_bus_type;
451 sch->dev.groups = default_subch_attr_groups;
453 if (sch->st == SUBCHANNEL_TYPE_IO)
454 sch->dev.type = &io_subchannel_type;
457 * We don't want to generate uevents for I/O subchannels that don't
458 * have a working ccw device behind them since they will be
459 * unregistered before they can be used anyway, so we delay the add
460 * uevent until after device recognition was successful.
461 * Note that we suppress the uevent for all subchannel types;
462 * the subchannel driver can decide itself when it wants to inform
463 * userspace of its existence.
465 dev_set_uevent_suppress(&sch->dev, 1);
466 css_update_ssd_info(sch);
467 /* make it known to the system */
468 ret = css_sch_device_register(sch);
470 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
471 sch->schid.ssid, sch->schid.sch_no, ret);
476 * No driver matched. Generate the uevent now so that
477 * a fitting driver module may be loaded based on the
480 dev_set_uevent_suppress(&sch->dev, 0);
481 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
486 static int css_probe_device(struct subchannel_id schid, struct schib *schib)
488 struct subchannel *sch;
491 sch = css_alloc_subchannel(schid, schib);
495 ret = css_register_subchannel(sch);
497 put_device(&sch->dev);
503 check_subchannel(struct device *dev, const void *data)
505 struct subchannel *sch;
506 struct subchannel_id *schid = (void *)data;
508 sch = to_subchannel(dev);
509 return schid_equal(&sch->schid, schid);
513 get_subchannel_by_schid(struct subchannel_id schid)
517 dev = bus_find_device(&css_bus_type, NULL,
518 &schid, check_subchannel);
520 return dev ? to_subchannel(dev) : NULL;
524 * css_sch_is_valid() - check if a subchannel is valid
525 * @schib: subchannel information block for the subchannel
527 int css_sch_is_valid(struct schib *schib)
529 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
531 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
535 EXPORT_SYMBOL_GPL(css_sch_is_valid);
537 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
543 /* Will be done on the slow path. */
547 * The first subchannel that is not-operational (ccode==3)
548 * indicates that there aren't any more devices available.
549 * If stsch gets an exception, it means the current subchannel set
552 ccode = stsch(schid, &schib);
554 return (ccode == 3) ? -ENXIO : ccode;
556 return css_probe_device(schid, &schib);
559 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
564 if (sch->driver->sch_event)
565 ret = sch->driver->sch_event(sch, slow);
568 "Got subchannel machine check but "
569 "no sch_event handler provided.\n");
571 if (ret != 0 && ret != -EAGAIN) {
572 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
573 sch->schid.ssid, sch->schid.sch_no, ret);
578 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
580 struct subchannel *sch;
583 sch = get_subchannel_by_schid(schid);
585 ret = css_evaluate_known_subchannel(sch, slow);
586 put_device(&sch->dev);
588 ret = css_evaluate_new_subchannel(schid, slow);
590 css_schedule_eval(schid);
594 * css_sched_sch_todo - schedule a subchannel operation
598 * Schedule the operation identified by @todo to be performed on the slow path
599 * workqueue. Do nothing if another operation with higher priority is already
600 * scheduled. Needs to be called with subchannel lock held.
602 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
604 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
605 sch->schid.ssid, sch->schid.sch_no, todo);
606 if (sch->todo >= todo)
608 /* Get workqueue ref. */
609 if (!get_device(&sch->dev))
612 if (!queue_work(cio_work_q, &sch->todo_work)) {
613 /* Already queued, release workqueue ref. */
614 put_device(&sch->dev);
617 EXPORT_SYMBOL_GPL(css_sched_sch_todo);
619 static void css_sch_todo(struct work_struct *work)
621 struct subchannel *sch;
625 sch = container_of(work, struct subchannel, todo_work);
627 spin_lock_irq(sch->lock);
629 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
630 sch->schid.sch_no, todo);
631 sch->todo = SCH_TODO_NOTHING;
632 spin_unlock_irq(sch->lock);
635 case SCH_TODO_NOTHING:
638 ret = css_evaluate_known_subchannel(sch, 1);
639 if (ret == -EAGAIN) {
640 spin_lock_irq(sch->lock);
641 css_sched_sch_todo(sch, todo);
642 spin_unlock_irq(sch->lock);
646 css_sch_device_unregister(sch);
649 /* Release workqueue ref. */
650 put_device(&sch->dev);
653 static struct idset *slow_subchannel_set;
654 static spinlock_t slow_subchannel_lock;
655 static wait_queue_head_t css_eval_wq;
656 static atomic_t css_eval_scheduled;
658 static int __init slow_subchannel_init(void)
660 spin_lock_init(&slow_subchannel_lock);
661 atomic_set(&css_eval_scheduled, 0);
662 init_waitqueue_head(&css_eval_wq);
663 slow_subchannel_set = idset_sch_new();
664 if (!slow_subchannel_set) {
665 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
671 static int slow_eval_known_fn(struct subchannel *sch, void *data)
676 spin_lock_irq(&slow_subchannel_lock);
677 eval = idset_sch_contains(slow_subchannel_set, sch->schid);
678 idset_sch_del(slow_subchannel_set, sch->schid);
679 spin_unlock_irq(&slow_subchannel_lock);
681 rc = css_evaluate_known_subchannel(sch, 1);
683 css_schedule_eval(sch->schid);
685 * The loop might take long time for platforms with lots of
686 * known devices. Allow scheduling here.
693 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
698 spin_lock_irq(&slow_subchannel_lock);
699 eval = idset_sch_contains(slow_subchannel_set, schid);
700 idset_sch_del(slow_subchannel_set, schid);
701 spin_unlock_irq(&slow_subchannel_lock);
703 rc = css_evaluate_new_subchannel(schid, 1);
706 css_schedule_eval(schid);
712 /* These should abort looping */
713 spin_lock_irq(&slow_subchannel_lock);
714 idset_sch_del_subseq(slow_subchannel_set, schid);
715 spin_unlock_irq(&slow_subchannel_lock);
720 /* Allow scheduling here since the containing loop might
727 static void css_slow_path_func(struct work_struct *unused)
731 CIO_TRACE_EVENT(4, "slowpath");
732 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
734 spin_lock_irqsave(&slow_subchannel_lock, flags);
735 if (idset_is_empty(slow_subchannel_set)) {
736 atomic_set(&css_eval_scheduled, 0);
737 wake_up(&css_eval_wq);
739 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
742 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
743 struct workqueue_struct *cio_work_q;
745 void css_schedule_eval(struct subchannel_id schid)
749 spin_lock_irqsave(&slow_subchannel_lock, flags);
750 idset_sch_add(slow_subchannel_set, schid);
751 atomic_set(&css_eval_scheduled, 1);
752 queue_delayed_work(cio_work_q, &slow_path_work, 0);
753 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
756 void css_schedule_eval_all(void)
760 spin_lock_irqsave(&slow_subchannel_lock, flags);
761 idset_fill(slow_subchannel_set);
762 atomic_set(&css_eval_scheduled, 1);
763 queue_delayed_work(cio_work_q, &slow_path_work, 0);
764 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
767 static int __unset_registered(struct device *dev, void *data)
769 struct idset *set = data;
770 struct subchannel *sch = to_subchannel(dev);
772 idset_sch_del(set, sch->schid);
776 void css_schedule_eval_all_unreg(unsigned long delay)
779 struct idset *unreg_set;
781 /* Find unregistered subchannels. */
782 unreg_set = idset_sch_new();
785 css_schedule_eval_all();
788 idset_fill(unreg_set);
789 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
790 /* Apply to slow_subchannel_set. */
791 spin_lock_irqsave(&slow_subchannel_lock, flags);
792 idset_add_set(slow_subchannel_set, unreg_set);
793 atomic_set(&css_eval_scheduled, 1);
794 queue_delayed_work(cio_work_q, &slow_path_work, delay);
795 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
796 idset_free(unreg_set);
799 void css_wait_for_slow_path(void)
801 flush_workqueue(cio_work_q);
804 /* Schedule reprobing of all unregistered subchannels. */
805 void css_schedule_reprobe(void)
807 /* Schedule with a delay to allow merging of subsequent calls. */
808 css_schedule_eval_all_unreg(1 * HZ);
810 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
813 * Called from the machine check handler for subchannel report words.
815 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
817 struct subchannel_id mchk_schid;
818 struct subchannel *sch;
821 css_schedule_eval_all();
824 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
825 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
826 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
827 crw0->erc, crw0->rsid);
829 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
830 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
831 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
832 crw1->anc, crw1->erc, crw1->rsid);
833 init_subchannel_id(&mchk_schid);
834 mchk_schid.sch_no = crw0->rsid;
836 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
838 if (crw0->erc == CRW_ERC_PMOD) {
839 sch = get_subchannel_by_schid(mchk_schid);
841 css_update_ssd_info(sch);
842 put_device(&sch->dev);
846 * Since we are always presented with IPI in the CRW, we have to
847 * use stsch() to find out if the subchannel in question has come
850 css_evaluate_subchannel(mchk_schid, 0);
854 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
858 if (css_general_characteristics.mcss) {
859 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
860 css->global_pgid.pgid_high.ext_cssid.cssid =
861 css->id_valid ? css->cssid : 0;
863 css->global_pgid.pgid_high.cpu_addr = stap();
866 css->global_pgid.cpu_id = cpu_id.ident;
867 css->global_pgid.cpu_model = cpu_id.machine;
868 css->global_pgid.tod_high = tod_high;
871 static void channel_subsystem_release(struct device *dev)
873 struct channel_subsystem *css = to_css(dev);
875 mutex_destroy(&css->mutex);
879 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
882 struct channel_subsystem *css = to_css(dev);
887 return sprintf(buf, "%x\n", css->cssid);
889 static DEVICE_ATTR_RO(real_cssid);
891 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
894 struct channel_subsystem *css = to_css(dev);
897 mutex_lock(&css->mutex);
898 ret = sprintf(buf, "%x\n", css->cm_enabled);
899 mutex_unlock(&css->mutex);
903 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
904 const char *buf, size_t count)
906 struct channel_subsystem *css = to_css(dev);
910 ret = kstrtoul(buf, 16, &val);
913 mutex_lock(&css->mutex);
916 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
919 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
924 mutex_unlock(&css->mutex);
925 return ret < 0 ? ret : count;
927 static DEVICE_ATTR_RW(cm_enable);
929 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
932 return css_chsc_characteristics.secm ? attr->mode : 0;
935 static struct attribute *cssdev_attrs[] = {
936 &dev_attr_real_cssid.attr,
940 static struct attribute_group cssdev_attr_group = {
941 .attrs = cssdev_attrs,
944 static struct attribute *cssdev_cm_attrs[] = {
945 &dev_attr_cm_enable.attr,
949 static struct attribute_group cssdev_cm_attr_group = {
950 .attrs = cssdev_cm_attrs,
951 .is_visible = cm_enable_mode,
954 static const struct attribute_group *cssdev_attr_groups[] = {
956 &cssdev_cm_attr_group,
960 static int __init setup_css(int nr)
962 struct channel_subsystem *css;
965 css = kzalloc(sizeof(*css), GFP_KERNEL);
969 channel_subsystems[nr] = css;
970 dev_set_name(&css->device, "css%x", nr);
971 css->device.groups = cssdev_attr_groups;
972 css->device.release = channel_subsystem_release;
974 * We currently allocate notifier bits with this (using
975 * css->device as the device argument with the DMA API)
976 * and are fine with 64 bit addresses.
978 ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
984 mutex_init(&css->mutex);
985 ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
987 css->id_valid = true;
988 pr_info("Partition identifier %01x.%01x\n", css->cssid,
991 css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
993 ret = device_register(&css->device);
995 put_device(&css->device);
999 css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
1001 if (!css->pseudo_subchannel) {
1002 device_unregister(&css->device);
1007 css->pseudo_subchannel->dev.parent = &css->device;
1008 css->pseudo_subchannel->dev.release = css_subchannel_release;
1009 mutex_init(&css->pseudo_subchannel->reg_mutex);
1010 ret = css_sch_create_locks(css->pseudo_subchannel);
1012 kfree(css->pseudo_subchannel);
1013 device_unregister(&css->device);
1017 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1018 ret = device_register(&css->pseudo_subchannel->dev);
1020 put_device(&css->pseudo_subchannel->dev);
1021 device_unregister(&css->device);
1027 channel_subsystems[nr] = NULL;
1031 static int css_reboot_event(struct notifier_block *this,
1032 unsigned long event,
1035 struct channel_subsystem *css;
1040 mutex_lock(&css->mutex);
1041 if (css->cm_enabled)
1042 if (chsc_secm(css, 0))
1044 mutex_unlock(&css->mutex);
1050 static struct notifier_block css_reboot_notifier = {
1051 .notifier_call = css_reboot_event,
1054 #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1055 static struct gen_pool *cio_dma_pool;
1057 /* Currently cio supports only a single css */
1058 struct device *cio_get_dma_css_dev(void)
1060 return &channel_subsystems[0]->device;
1063 struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1065 struct gen_pool *gp_dma;
1067 dma_addr_t dma_addr;
1070 gp_dma = gen_pool_create(3, -1);
1073 for (i = 0; i < nr_pages; ++i) {
1074 cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1078 gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1079 dma_addr, PAGE_SIZE, -1);
1084 static void __gp_dma_free_dma(struct gen_pool *pool,
1085 struct gen_pool_chunk *chunk, void *data)
1087 size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1089 dma_free_coherent((struct device *) data, chunk_size,
1090 (void *) chunk->start_addr,
1091 (dma_addr_t) chunk->phys_addr);
1094 void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1098 /* this is quite ugly but no better idea */
1099 gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1100 gen_pool_destroy(gp_dma);
1103 static int cio_dma_pool_init(void)
1105 /* No need to free up the resources: compiled in */
1106 cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1112 void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1115 dma_addr_t dma_addr;
1121 addr = gen_pool_alloc(gp_dma, size);
1123 chunk_size = round_up(size, PAGE_SIZE);
1124 addr = (unsigned long) dma_alloc_coherent(dma_dev,
1125 chunk_size, &dma_addr, CIO_DMA_GFP);
1128 gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1129 addr = gen_pool_alloc(gp_dma, size);
1131 return (void *) addr;
1134 void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1138 memset(cpu_addr, 0, size);
1139 gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1143 * Allocate dma memory from the css global pool. Intended for memory not
1144 * specific to any single device within the css. The allocated memory
1145 * is not guaranteed to be 31-bit addressable.
1147 * Caution: Not suitable for early stuff like console.
1149 void *cio_dma_zalloc(size_t size)
1151 return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1154 void cio_dma_free(void *cpu_addr, size_t size)
1156 cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1160 * Now that the driver core is running, we can setup our channel subsystem.
1161 * The struct subchannel's are created during probing.
1163 static int __init css_bus_init(void)
1171 chsc_determine_css_characteristics();
1172 /* Try to enable MSS. */
1173 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1177 max_ssid = __MAX_SSID;
1179 ret = slow_subchannel_init();
1183 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1187 if ((ret = bus_register(&css_bus_type)))
1190 /* Setup css structure. */
1191 for (i = 0; i <= MAX_CSS_IDX; i++) {
1194 goto out_unregister;
1196 ret = register_reboot_notifier(&css_reboot_notifier);
1198 goto out_unregister;
1199 ret = cio_dma_pool_init();
1201 goto out_unregister_rn;
1205 /* Enable default isc for I/O subchannels. */
1206 isc_register(IO_SCH_ISC);
1210 unregister_reboot_notifier(&css_reboot_notifier);
1213 struct channel_subsystem *css = channel_subsystems[i];
1214 device_unregister(&css->pseudo_subchannel->dev);
1215 device_unregister(&css->device);
1217 bus_unregister(&css_bus_type);
1219 crw_unregister_handler(CRW_RSC_SCH);
1220 idset_free(slow_subchannel_set);
1221 chsc_init_cleanup();
1222 pr_alert("The CSS device driver initialization failed with "
1227 static void __init css_bus_cleanup(void)
1229 struct channel_subsystem *css;
1232 device_unregister(&css->pseudo_subchannel->dev);
1233 device_unregister(&css->device);
1235 bus_unregister(&css_bus_type);
1236 crw_unregister_handler(CRW_RSC_SCH);
1237 idset_free(slow_subchannel_set);
1238 chsc_init_cleanup();
1239 isc_unregister(IO_SCH_ISC);
1242 static int __init channel_subsystem_init(void)
1246 ret = css_bus_init();
1249 cio_work_q = create_singlethread_workqueue("cio");
1254 ret = io_subchannel_init();
1258 /* Register subchannels which are already in use. */
1259 cio_register_early_subchannels();
1260 /* Start initial subchannel evaluation. */
1261 css_schedule_eval_all();
1265 destroy_workqueue(cio_work_q);
1270 subsys_initcall(channel_subsystem_init);
1272 static int css_settle(struct device_driver *drv, void *unused)
1274 struct css_driver *cssdrv = to_cssdriver(drv);
1277 return cssdrv->settle();
1281 int css_complete_work(void)
1285 /* Wait for the evaluation of subchannels to finish. */
1286 ret = wait_event_interruptible(css_eval_wq,
1287 atomic_read(&css_eval_scheduled) == 0);
1290 flush_workqueue(cio_work_q);
1291 /* Wait for the subchannel type specific initialization to finish */
1292 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1297 * Wait for the initialization of devices to finish, to make sure we are
1298 * done with our setup if the search for the root device starts.
1300 static int __init channel_subsystem_init_sync(void)
1302 css_complete_work();
1305 subsys_initcall_sync(channel_subsystem_init_sync);
1307 #ifdef CONFIG_PROC_FS
1308 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1309 size_t count, loff_t *ppos)
1313 /* Handle pending CRW's. */
1314 crw_wait_for_channel_report();
1315 ret = css_complete_work();
1317 return ret ? ret : count;
1320 static const struct proc_ops cio_settle_proc_ops = {
1321 .proc_open = nonseekable_open,
1322 .proc_write = cio_settle_write,
1323 .proc_lseek = no_llseek,
1326 static int __init cio_settle_init(void)
1328 struct proc_dir_entry *entry;
1330 entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
1335 device_initcall(cio_settle_init);
1336 #endif /*CONFIG_PROC_FS*/
1338 int sch_is_pseudo_sch(struct subchannel *sch)
1340 if (!sch->dev.parent)
1342 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1345 static int css_bus_match(struct device *dev, struct device_driver *drv)
1347 struct subchannel *sch = to_subchannel(dev);
1348 struct css_driver *driver = to_cssdriver(drv);
1349 struct css_device_id *id;
1351 /* When driver_override is set, only bind to the matching driver */
1352 if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1355 for (id = driver->subchannel_type; id->match_flags; id++) {
1356 if (sch->st == id->type)
1363 static int css_probe(struct device *dev)
1365 struct subchannel *sch;
1368 sch = to_subchannel(dev);
1369 sch->driver = to_cssdriver(dev->driver);
1370 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1376 static int css_remove(struct device *dev)
1378 struct subchannel *sch;
1381 sch = to_subchannel(dev);
1382 ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1387 static void css_shutdown(struct device *dev)
1389 struct subchannel *sch;
1391 sch = to_subchannel(dev);
1392 if (sch->driver && sch->driver->shutdown)
1393 sch->driver->shutdown(sch);
1396 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1398 struct subchannel *sch = to_subchannel(dev);
1401 ret = add_uevent_var(env, "ST=%01X", sch->st);
1404 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1408 static struct bus_type css_bus_type = {
1410 .match = css_bus_match,
1412 .remove = css_remove,
1413 .shutdown = css_shutdown,
1414 .uevent = css_uevent,
1418 * css_driver_register - register a css driver
1419 * @cdrv: css driver to register
1421 * This is mainly a wrapper around driver_register that sets name
1422 * and bus_type in the embedded struct device_driver correctly.
1424 int css_driver_register(struct css_driver *cdrv)
1426 cdrv->drv.bus = &css_bus_type;
1427 return driver_register(&cdrv->drv);
1429 EXPORT_SYMBOL_GPL(css_driver_register);
1432 * css_driver_unregister - unregister a css driver
1433 * @cdrv: css driver to unregister
1435 * This is a wrapper around driver_unregister.
1437 void css_driver_unregister(struct css_driver *cdrv)
1439 driver_unregister(&cdrv->drv);
1441 EXPORT_SYMBOL_GPL(css_driver_unregister);