dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;
shost->shost_gendev.type = &scsi_host_type;
+ scsi_enable_async_suspend(&shost->shost_gendev);
device_initialize(&shost->shost_dev);
shost->shost_dev.parent = &shost->shost_gendev;
EXPORT_SYMBOL(scsi_logging_level);
#endif
-/*
- * Domain for asynchronous system resume operations. It is marked 'exclusive'
- * to avoid being included in the async_synchronize_full() that is invoked by
- * dpm_resume().
- */
-ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
-EXPORT_SYMBOL(scsi_sd_pm_domain);
-
#ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err;
- /* flush pending in-flight resume operations, suspend is synchronous */
- async_synchronize_full_domain(&scsi_sd_pm_domain);
-
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
err = cb(dev, pm);
return err;
}
-static void async_sdev_resume(void *dev, async_cookie_t cookie)
-{
- scsi_dev_type_resume(dev, do_scsi_resume);
-}
-
-static void async_sdev_thaw(void *dev, async_cookie_t cookie)
-{
- scsi_dev_type_resume(dev, do_scsi_thaw);
-}
-
-static void async_sdev_restore(void *dev, async_cookie_t cookie)
-{
- scsi_dev_type_resume(dev, do_scsi_restore);
-}
-
static int scsi_bus_resume_common(struct device *dev,
int (*cb)(struct device *, const struct dev_pm_ops *))
{
- async_func_t fn;
-
- if (!scsi_is_sdev_device(dev))
- fn = NULL;
- else if (cb == do_scsi_resume)
- fn = async_sdev_resume;
- else if (cb == do_scsi_thaw)
- fn = async_sdev_thaw;
- else if (cb == do_scsi_restore)
- fn = async_sdev_restore;
- else
- fn = NULL;
-
- if (fn) {
- async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
-
- /*
- * If a user has disabled async probing a likely reason
- * is due to a storage enclosure that does not inject
- * staggered spin-ups. For safety, make resume
- * synchronous as well in that case.
- */
- if (strncmp(scsi_scan_type, "async", 5) != 0)
- async_synchronize_full_domain(&scsi_sd_pm_domain);
+ if (scsi_is_sdev_device(dev)) {
+ scsi_dev_type_resume(dev, cb);
} else {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
#endif /* CONFIG_PROC_FS */
/* scsi_scan.c */
-extern char scsi_scan_type[];
+void scsi_enable_async_suspend(struct device *dev);
extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, u64, enum scsi_scan_mode);
static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
#endif /* CONFIG_PM */
-extern struct async_domain scsi_sd_pm_domain;
-
/* scsi_dh.c */
#ifdef CONFIG_SCSI_DH
void scsi_dh_add_device(struct scsi_device *sdev);
struct completion prev_finished;
};
+/**
+ * scsi_enable_async_suspend - Enable async suspend and resume
+ */
+void scsi_enable_async_suspend(struct device *dev)
+{
+ /*
+ * If a user has disabled async probing a likely reason is due to a
+ * storage enclosure that does not inject staggered spin-ups. For
+ * safety, make resume synchronous as well in that case.
+ */
+ if (strncmp(scsi_scan_type, "async", 5) != 0)
+ return;
+ /* Enable asynchronous suspend and resume. */
+ device_enable_async_suspend(dev);
+}
+
/**
* scsi_complete_async_scans - Wait for asynchronous scans to complete
*
dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
dev->bus = &scsi_bus_type;
dev->type = &scsi_target_type;
+ scsi_enable_async_suspend(dev);
starget->id = id;
starget->channel = channel;
starget->can_queue = 0;
device_initialize(&sdev->sdev_gendev);
sdev->sdev_gendev.bus = &scsi_bus_type;
sdev->sdev_gendev.type = &scsi_dev_type;
+ scsi_enable_async_suspend(&sdev->sdev_gendev);
dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu",
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
sdkp = dev_get_drvdata(dev);
scsi_autopm_get_device(sdkp->device);
- async_synchronize_full_domain(&scsi_sd_pm_domain);
device_del(&sdkp->dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);