1 // SPDX-License-Identifier: GPL-2.0+
3 * Surface Book (gen. 2 and later) detachment system (DTX) driver.
5 * Provides a user-space interface to properly handle clipboard/tablet
6 * (containing screen and processor) detachment from the base of the device
7 * (containing the keyboard and optionally a discrete GPU). Allows to
8 * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
9 * use), or request detachment via user-space.
11 * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
15 #include <linux/input.h>
16 #include <linux/ioctl.h>
17 #include <linux/kernel.h>
18 #include <linux/kfifo.h>
19 #include <linux/kref.h>
20 #include <linux/miscdevice.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/platform_device.h>
24 #include <linux/poll.h>
25 #include <linux/rwsem.h>
26 #include <linux/slab.h>
27 #include <linux/workqueue.h>
29 #include <linux/surface_aggregator/controller.h>
30 #include <linux/surface_aggregator/dtx.h>
33 /* -- SSAM interface. ------------------------------------------------------- */
35 enum sam_event_cid_bas {
36 SAM_EVENT_CID_DTX_CONNECTION = 0x0c,
37 SAM_EVENT_CID_DTX_REQUEST = 0x0e,
38 SAM_EVENT_CID_DTX_CANCEL = 0x0f,
39 SAM_EVENT_CID_DTX_LATCH_STATUS = 0x11,
42 enum ssam_bas_base_state {
43 SSAM_BAS_BASE_STATE_DETACH_SUCCESS = 0x00,
44 SSAM_BAS_BASE_STATE_ATTACHED = 0x01,
45 SSAM_BAS_BASE_STATE_NOT_FEASIBLE = 0x02,
48 enum ssam_bas_latch_status {
49 SSAM_BAS_LATCH_STATUS_CLOSED = 0x00,
50 SSAM_BAS_LATCH_STATUS_OPENED = 0x01,
51 SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN = 0x02,
52 SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN = 0x03,
53 SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE = 0x04,
56 enum ssam_bas_cancel_reason {
57 SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE = 0x00, /* Low battery. */
58 SSAM_BAS_CANCEL_REASON_TIMEOUT = 0x02,
59 SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN = 0x03,
60 SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN = 0x04,
61 SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE = 0x05,
64 struct ssam_bas_base_info {
69 static_assert(sizeof(struct ssam_bas_base_info) == 2);
71 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
72 .target_category = SSAM_SSH_TC_BAS,
78 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
79 .target_category = SSAM_SSH_TC_BAS,
85 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
86 .target_category = SSAM_SSH_TC_BAS,
92 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
93 .target_category = SSAM_SSH_TC_BAS,
99 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
100 .target_category = SSAM_SSH_TC_BAS,
106 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
107 .target_category = SSAM_SSH_TC_BAS,
113 SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
114 .target_category = SSAM_SSH_TC_BAS,
120 SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
121 .target_category = SSAM_SSH_TC_BAS,
127 SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
128 .target_category = SSAM_SSH_TC_BAS,
135 /* -- Main structures. ------------------------------------------------------ */
137 enum sdtx_device_state {
138 SDTX_DEVICE_SHUTDOWN_BIT = BIT(0),
139 SDTX_DEVICE_DIRTY_BASE_BIT = BIT(1),
140 SDTX_DEVICE_DIRTY_MODE_BIT = BIT(2),
141 SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
146 struct rw_semaphore lock; /* Guards device and controller reference. */
149 struct ssam_controller *ctrl;
152 struct miscdevice mdev;
153 wait_queue_head_t waitq;
154 struct mutex write_lock; /* Guards order of events/notifications. */
155 struct rw_semaphore client_lock; /* Guards client list. */
156 struct list_head client_list;
158 struct delayed_work state_work;
160 struct ssam_bas_base_info base;
165 struct delayed_work mode_work;
166 struct input_dev *mode_switch;
168 struct ssam_event_notifier notif;
171 enum sdtx_client_state {
172 SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
176 struct sdtx_device *ddev;
177 struct list_head node;
180 struct fasync_struct *fasync;
182 struct mutex read_lock; /* Guards FIFO buffer read access. */
183 DECLARE_KFIFO(buffer, u8, 512);
186 static void __sdtx_device_release(struct kref *kref)
188 struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
190 mutex_destroy(&ddev->write_lock);
194 static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
197 kref_get(&ddev->kref);
202 static void sdtx_device_put(struct sdtx_device *ddev)
205 kref_put(&ddev->kref, __sdtx_device_release);
209 /* -- Firmware value translations. ------------------------------------------ */
211 static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
214 case SSAM_BAS_BASE_STATE_ATTACHED:
215 return SDTX_BASE_ATTACHED;
217 case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
218 return SDTX_BASE_DETACHED;
220 case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
221 return SDTX_DETACH_NOT_FEASIBLE;
224 dev_err(ddev->dev, "unknown base state: %#04x\n", state);
225 return SDTX_UNKNOWN(state);
229 static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
232 case SSAM_BAS_LATCH_STATUS_CLOSED:
233 return SDTX_LATCH_CLOSED;
235 case SSAM_BAS_LATCH_STATUS_OPENED:
236 return SDTX_LATCH_OPENED;
238 case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
239 return SDTX_ERR_FAILED_TO_OPEN;
241 case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
242 return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
244 case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
245 return SDTX_ERR_FAILED_TO_CLOSE;
248 dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
249 return SDTX_UNKNOWN(status);
253 static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
256 case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
257 return SDTX_DETACH_NOT_FEASIBLE;
259 case SSAM_BAS_CANCEL_REASON_TIMEOUT:
260 return SDTX_DETACH_TIMEDOUT;
262 case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
263 return SDTX_ERR_FAILED_TO_OPEN;
265 case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
266 return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
268 case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
269 return SDTX_ERR_FAILED_TO_CLOSE;
272 dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
273 return SDTX_UNKNOWN(reason);
278 /* -- IOCTLs. --------------------------------------------------------------- */
280 static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
281 struct sdtx_base_info __user *buf)
283 struct ssam_bas_base_info raw;
284 struct sdtx_base_info info;
287 lockdep_assert_held_read(&ddev->lock);
289 status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
293 info.state = sdtx_translate_base_state(ddev, raw.state);
294 info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
296 if (copy_to_user(buf, &info, sizeof(info)))
302 static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
307 lockdep_assert_held_read(&ddev->lock);
309 status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
313 return put_user(mode, buf);
316 static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
321 lockdep_assert_held_read(&ddev->lock);
323 status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
327 return put_user(sdtx_translate_latch_status(ddev, latch), buf);
330 static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
332 struct sdtx_device *ddev = client->ddev;
334 lockdep_assert_held_read(&ddev->lock);
337 case SDTX_IOCTL_EVENTS_ENABLE:
338 set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
341 case SDTX_IOCTL_EVENTS_DISABLE:
342 clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
345 case SDTX_IOCTL_LATCH_LOCK:
346 return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
348 case SDTX_IOCTL_LATCH_UNLOCK:
349 return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
351 case SDTX_IOCTL_LATCH_REQUEST:
352 return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
354 case SDTX_IOCTL_LATCH_CONFIRM:
355 return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
357 case SDTX_IOCTL_LATCH_HEARTBEAT:
358 return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
360 case SDTX_IOCTL_LATCH_CANCEL:
361 return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
363 case SDTX_IOCTL_GET_BASE_INFO:
364 return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
366 case SDTX_IOCTL_GET_DEVICE_MODE:
367 return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
369 case SDTX_IOCTL_GET_LATCH_STATUS:
370 return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
377 static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
379 struct sdtx_client *client = file->private_data;
382 if (down_read_killable(&client->ddev->lock))
385 if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
386 up_read(&client->ddev->lock);
390 status = __surface_dtx_ioctl(client, cmd, arg);
392 up_read(&client->ddev->lock);
397 /* -- File operations. ------------------------------------------------------ */
399 static int surface_dtx_open(struct inode *inode, struct file *file)
401 struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
402 struct sdtx_client *client;
404 /* Initialize client. */
405 client = kzalloc(sizeof(*client), GFP_KERNEL);
409 client->ddev = sdtx_device_get(ddev);
411 INIT_LIST_HEAD(&client->node);
413 mutex_init(&client->read_lock);
414 INIT_KFIFO(client->buffer);
416 file->private_data = client;
419 down_write(&ddev->client_lock);
422 * Do not add a new client if the device has been shut down. Note that
423 * it's enough to hold the client_lock here as, during shutdown, we
424 * only acquire that lock and remove clients after marking the device
427 if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
428 up_write(&ddev->client_lock);
429 sdtx_device_put(client->ddev);
434 list_add_tail(&client->node, &ddev->client_list);
435 up_write(&ddev->client_lock);
437 stream_open(inode, file);
441 static int surface_dtx_release(struct inode *inode, struct file *file)
443 struct sdtx_client *client = file->private_data;
446 down_write(&client->ddev->client_lock);
447 list_del(&client->node);
448 up_write(&client->ddev->client_lock);
451 sdtx_device_put(client->ddev);
452 mutex_destroy(&client->read_lock);
458 static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
460 struct sdtx_client *client = file->private_data;
461 struct sdtx_device *ddev = client->ddev;
465 if (down_read_killable(&ddev->lock))
468 /* Make sure we're not shut down. */
469 if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
470 up_read(&ddev->lock);
475 /* Check availability, wait if necessary. */
476 if (kfifo_is_empty(&client->buffer)) {
477 up_read(&ddev->lock);
479 if (file->f_flags & O_NONBLOCK)
482 status = wait_event_interruptible(ddev->waitq,
483 !kfifo_is_empty(&client->buffer) ||
484 test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
489 if (down_read_killable(&client->ddev->lock))
492 /* Need to check that we're not shut down again. */
493 if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
494 up_read(&ddev->lock);
499 /* Try to read from FIFO. */
500 if (mutex_lock_interruptible(&client->read_lock)) {
501 up_read(&ddev->lock);
505 status = kfifo_to_user(&client->buffer, buf, count, &copied);
506 mutex_unlock(&client->read_lock);
509 up_read(&ddev->lock);
513 /* We might not have gotten anything, check this here. */
514 if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
515 up_read(&ddev->lock);
518 } while (copied == 0);
520 up_read(&ddev->lock);
524 static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
526 struct sdtx_client *client = file->private_data;
529 if (down_read_killable(&client->ddev->lock))
532 if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
533 up_read(&client->ddev->lock);
534 return EPOLLHUP | EPOLLERR;
537 poll_wait(file, &client->ddev->waitq, pt);
539 if (!kfifo_is_empty(&client->buffer))
540 events |= EPOLLIN | EPOLLRDNORM;
542 up_read(&client->ddev->lock);
546 static int surface_dtx_fasync(int fd, struct file *file, int on)
548 struct sdtx_client *client = file->private_data;
550 return fasync_helper(fd, file, on, &client->fasync);
553 static const struct file_operations surface_dtx_fops = {
554 .owner = THIS_MODULE,
555 .open = surface_dtx_open,
556 .release = surface_dtx_release,
557 .read = surface_dtx_read,
558 .poll = surface_dtx_poll,
559 .fasync = surface_dtx_fasync,
560 .unlocked_ioctl = surface_dtx_ioctl,
561 .compat_ioctl = surface_dtx_ioctl,
566 /* -- Event handling/forwarding. -------------------------------------------- */
569 * The device operation mode is not immediately updated on the EC when the
570 * base has been connected, i.e. querying the device mode inside the
571 * connection event callback yields an outdated value. Thus, we can only
572 * determine the new tablet-mode switch and device mode values after some
575 * These delays have been chosen by experimenting. We first delay on connect
576 * events, then check and validate the device mode against the base state and
577 * if invalid delay again by the "recheck" delay.
579 #define SDTX_DEVICE_MODE_DELAY_CONNECT msecs_to_jiffies(100)
580 #define SDTX_DEVICE_MODE_DELAY_RECHECK msecs_to_jiffies(100)
582 struct sdtx_status_event {
587 struct sdtx_base_info_event {
589 struct sdtx_base_info v;
592 union sdtx_generic_event {
593 struct sdtx_event common;
594 struct sdtx_status_event status;
595 struct sdtx_base_info_event base;
598 static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
600 /* Must be executed with ddev->write_lock held. */
601 static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
603 const size_t len = sizeof(struct sdtx_event) + evt->length;
604 struct sdtx_client *client;
606 lockdep_assert_held(&ddev->write_lock);
608 down_read(&ddev->client_lock);
609 list_for_each_entry(client, &ddev->client_list, node) {
610 if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
613 if (likely(kfifo_avail(&client->buffer) >= len))
614 kfifo_in(&client->buffer, (const u8 *)evt, len);
616 dev_warn(ddev->dev, "event buffer overrun\n");
618 kill_fasync(&client->fasync, SIGIO, POLL_IN);
620 up_read(&ddev->client_lock);
622 wake_up_interruptible(&ddev->waitq);
625 static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
627 struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
628 union sdtx_generic_event event;
631 /* Validate event payload length. */
632 switch (in->command_id) {
633 case SAM_EVENT_CID_DTX_CONNECTION:
634 len = 2 * sizeof(u8);
637 case SAM_EVENT_CID_DTX_REQUEST:
641 case SAM_EVENT_CID_DTX_CANCEL:
645 case SAM_EVENT_CID_DTX_LATCH_STATUS:
653 if (in->length != len) {
655 "unexpected payload size for event %#04x: got %u, expected %zu\n",
656 in->command_id, in->length, len);
660 mutex_lock(&ddev->write_lock);
662 /* Translate event. */
663 switch (in->command_id) {
664 case SAM_EVENT_CID_DTX_CONNECTION:
665 clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
667 /* If state has not changed: do not send new event. */
668 if (ddev->state.base.state == in->data[0] &&
669 ddev->state.base.base_id == in->data[1])
672 ddev->state.base.state = in->data[0];
673 ddev->state.base.base_id = in->data[1];
675 event.base.e.length = sizeof(struct sdtx_base_info);
676 event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
677 event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
678 event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
681 case SAM_EVENT_CID_DTX_REQUEST:
682 event.common.code = SDTX_EVENT_REQUEST;
683 event.common.length = 0;
686 case SAM_EVENT_CID_DTX_CANCEL:
687 event.status.e.length = sizeof(u16);
688 event.status.e.code = SDTX_EVENT_CANCEL;
689 event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
692 case SAM_EVENT_CID_DTX_LATCH_STATUS:
693 clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
695 /* If state has not changed: do not send new event. */
696 if (ddev->state.latch_status == in->data[0])
699 ddev->state.latch_status = in->data[0];
701 event.status.e.length = sizeof(u16);
702 event.status.e.code = SDTX_EVENT_LATCH_STATUS;
703 event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
707 sdtx_push_event(ddev, &event.common);
709 /* Update device mode on base connection change. */
710 if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
713 delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
714 sdtx_update_device_mode(ddev, delay);
718 mutex_unlock(&ddev->write_lock);
719 return SSAM_NOTIF_HANDLED;
723 /* -- State update functions. ----------------------------------------------- */
725 static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
727 return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
728 (mode == SDTX_DEVICE_MODE_TABLET)) ||
729 ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
730 (mode != SDTX_DEVICE_MODE_TABLET));
733 static void sdtx_device_mode_workfn(struct work_struct *work)
735 struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
736 struct sdtx_status_event event;
737 struct ssam_bas_base_info base;
741 /* Get operation mode. */
742 status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
744 dev_err(ddev->dev, "failed to get device mode: %d\n", status);
749 status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
751 dev_err(ddev->dev, "failed to get base info: %d\n", status);
756 * In some cases (specifically when attaching the base), the device
757 * mode isn't updated right away. Thus we check if the device mode
758 * makes sense for the given base state and try again later if it
761 if (sdtx_device_mode_invalid(mode, base.state)) {
762 dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
763 sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
767 mutex_lock(&ddev->write_lock);
768 clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
770 /* Avoid sending duplicate device-mode events. */
771 if (ddev->state.device_mode == mode) {
772 mutex_unlock(&ddev->write_lock);
776 ddev->state.device_mode = mode;
778 event.e.length = sizeof(u16);
779 event.e.code = SDTX_EVENT_DEVICE_MODE;
782 sdtx_push_event(ddev, &event.e);
784 /* Send SW_TABLET_MODE event. */
785 tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
786 input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
787 input_sync(ddev->mode_switch);
789 mutex_unlock(&ddev->write_lock);
792 static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
794 schedule_delayed_work(&ddev->mode_work, delay);
797 /* Must be executed with ddev->write_lock held. */
798 static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
799 struct ssam_bas_base_info info)
801 struct sdtx_base_info_event event;
803 lockdep_assert_held(&ddev->write_lock);
805 /* Prevent duplicate events. */
806 if (ddev->state.base.state == info.state &&
807 ddev->state.base.base_id == info.base_id)
810 ddev->state.base = info;
812 event.e.length = sizeof(struct sdtx_base_info);
813 event.e.code = SDTX_EVENT_BASE_CONNECTION;
814 event.v.state = sdtx_translate_base_state(ddev, info.state);
815 event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
817 sdtx_push_event(ddev, &event.e);
820 /* Must be executed with ddev->write_lock held. */
821 static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
823 struct sdtx_status_event event;
827 * Note: This function must be called after updating the base state
828 * via __sdtx_device_state_update_base(), as we rely on the updated
829 * base state value in the validity check below.
832 lockdep_assert_held(&ddev->write_lock);
834 if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
835 dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
836 sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
840 /* Prevent duplicate events. */
841 if (ddev->state.device_mode == mode)
844 ddev->state.device_mode = mode;
847 event.e.length = sizeof(u16);
848 event.e.code = SDTX_EVENT_DEVICE_MODE;
851 sdtx_push_event(ddev, &event.e);
853 /* Send SW_TABLET_MODE event. */
854 tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
855 input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
856 input_sync(ddev->mode_switch);
859 /* Must be executed with ddev->write_lock held. */
860 static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
862 struct sdtx_status_event event;
864 lockdep_assert_held(&ddev->write_lock);
866 /* Prevent duplicate events. */
867 if (ddev->state.latch_status == status)
870 ddev->state.latch_status = status;
872 event.e.length = sizeof(struct sdtx_base_info);
873 event.e.code = SDTX_EVENT_BASE_CONNECTION;
874 event.v = sdtx_translate_latch_status(ddev, status);
876 sdtx_push_event(ddev, &event.e);
879 static void sdtx_device_state_workfn(struct work_struct *work)
881 struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
882 struct ssam_bas_base_info base;
886 /* Mark everything as dirty. */
887 set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
888 set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
889 set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
892 * Ensure that the state gets marked as dirty before continuing to
893 * query it. Necessary to ensure that clear_bit() calls in
894 * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
895 * bits if an event is received while updating the state here.
897 smp_mb__after_atomic();
899 status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
901 dev_err(ddev->dev, "failed to get base state: %d\n", status);
905 status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
907 dev_err(ddev->dev, "failed to get device mode: %d\n", status);
911 status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
913 dev_err(ddev->dev, "failed to get latch status: %d\n", status);
917 mutex_lock(&ddev->write_lock);
920 * If the respective dirty-bit has been cleared, an event has been
921 * received, updating this state. The queried state may thus be out of
922 * date. At this point, we can safely assume that the state provided
923 * by the event is either up to date, or we're about to receive
924 * another event updating it.
927 if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
928 __sdtx_device_state_update_base(ddev, base);
930 if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
931 __sdtx_device_state_update_mode(ddev, mode);
933 if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
934 __sdtx_device_state_update_latch(ddev, latch);
936 mutex_unlock(&ddev->write_lock);
939 static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
941 schedule_delayed_work(&ddev->state_work, delay);
945 /* -- Common device initialization. ----------------------------------------- */
947 static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
948 struct ssam_controller *ctrl)
950 int status, tablet_mode;
952 /* Basic initialization. */
953 kref_init(&ddev->kref);
954 init_rwsem(&ddev->lock);
958 ddev->mdev.minor = MISC_DYNAMIC_MINOR;
959 ddev->mdev.name = "surface_dtx";
960 ddev->mdev.nodename = "surface/dtx";
961 ddev->mdev.fops = &surface_dtx_fops;
963 ddev->notif.base.priority = 1;
964 ddev->notif.base.fn = sdtx_notifier;
965 ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
966 ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
967 ddev->notif.event.id.instance = 0;
968 ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
969 ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
971 init_waitqueue_head(&ddev->waitq);
972 mutex_init(&ddev->write_lock);
973 init_rwsem(&ddev->client_lock);
974 INIT_LIST_HEAD(&ddev->client_list);
976 INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
977 INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
980 * Get current device state. We want to guarantee that events are only
981 * sent when state actually changes. Thus we cannot use special
982 * "uninitialized" values, as that would cause problems when manually
983 * querying the state in surface_dtx_pm_complete(). I.e. we would not
984 * be able to detect state changes there if no change event has been
985 * received between driver initialization and first device suspension.
987 * Note that we also need to do this before registering the event
988 * notifier, as that may access the state values.
990 status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
994 status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
998 status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
1002 /* Set up tablet mode switch. */
1003 ddev->mode_switch = input_allocate_device();
1004 if (!ddev->mode_switch)
1007 ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
1008 ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
1009 ddev->mode_switch->id.bustype = BUS_HOST;
1010 ddev->mode_switch->dev.parent = ddev->dev;
1012 tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
1013 input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
1014 input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
1016 status = input_register_device(ddev->mode_switch);
1018 input_free_device(ddev->mode_switch);
1022 /* Set up event notifier. */
1023 status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
1027 /* Register miscdevice. */
1028 status = misc_register(&ddev->mdev);
1033 * Update device state in case it has changed between getting the
1034 * initial mode and registering the event notifier.
1036 sdtx_update_device_state(ddev, 0);
1040 ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
1041 cancel_delayed_work_sync(&ddev->mode_work);
1043 input_unregister_device(ddev->mode_switch);
1047 static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
1049 struct sdtx_device *ddev;
1052 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
1054 return ERR_PTR(-ENOMEM);
1056 status = sdtx_device_init(ddev, dev, ctrl);
1058 sdtx_device_put(ddev);
1059 return ERR_PTR(status);
1065 static void sdtx_device_destroy(struct sdtx_device *ddev)
1067 struct sdtx_client *client;
1070 * Mark device as shut-down. Prevent new clients from being added and
1071 * new operations from being executed.
1073 set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
1075 /* Disable notifiers, prevent new events from arriving. */
1076 ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
1078 /* Stop mode_work, prevent access to mode_switch. */
1079 cancel_delayed_work_sync(&ddev->mode_work);
1081 /* Stop state_work. */
1082 cancel_delayed_work_sync(&ddev->state_work);
1084 /* With mode_work canceled, we can unregister the mode_switch. */
1085 input_unregister_device(ddev->mode_switch);
1087 /* Wake up async clients. */
1088 down_write(&ddev->client_lock);
1089 list_for_each_entry(client, &ddev->client_list, node) {
1090 kill_fasync(&client->fasync, SIGIO, POLL_HUP);
1092 up_write(&ddev->client_lock);
1094 /* Wake up blocking clients. */
1095 wake_up_interruptible(&ddev->waitq);
1098 * Wait for clients to finish their current operation. After this, the
1099 * controller and device references are guaranteed to be no longer in
1102 down_write(&ddev->lock);
1105 up_write(&ddev->lock);
1107 /* Finally remove the misc-device. */
1108 misc_deregister(&ddev->mdev);
1111 * We're now guaranteed that sdtx_device_open() won't be called any
1112 * more, so we can now drop out reference.
1114 sdtx_device_put(ddev);
1118 /* -- PM ops. --------------------------------------------------------------- */
1120 #ifdef CONFIG_PM_SLEEP
1122 static void surface_dtx_pm_complete(struct device *dev)
1124 struct sdtx_device *ddev = dev_get_drvdata(dev);
1127 * Normally, the EC will store events while suspended (i.e. in
1128 * display-off state) and release them when resumed (i.e. transitioned
1129 * to display-on state). During hibernation, however, the EC will be
1130 * shut down and does not store events. Furthermore, events might be
1131 * dropped during prolonged suspension (it is currently unknown how
1132 * big this event buffer is and how it behaves on overruns).
1134 * To prevent any problems, we update the device state here. We do
1135 * this delayed to ensure that any events sent by the EC directly
1136 * after resuming will be handled first. The delay below has been
1137 * chosen (experimentally), so that there should be ample time for
1138 * these events to be handled, before we check and, if necessary,
1141 sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
1144 static const struct dev_pm_ops surface_dtx_pm_ops = {
1145 .complete = surface_dtx_pm_complete,
1148 #else /* CONFIG_PM_SLEEP */
1150 static const struct dev_pm_ops surface_dtx_pm_ops = {};
1152 #endif /* CONFIG_PM_SLEEP */
1155 /* -- Platform driver. ------------------------------------------------------ */
1157 static int surface_dtx_platform_probe(struct platform_device *pdev)
1159 struct ssam_controller *ctrl;
1160 struct sdtx_device *ddev;
1163 ctrl = ssam_client_bind(&pdev->dev);
1165 return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
1167 ddev = sdtx_device_create(&pdev->dev, ctrl);
1169 return PTR_ERR(ddev);
1171 platform_set_drvdata(pdev, ddev);
1175 static int surface_dtx_platform_remove(struct platform_device *pdev)
1177 sdtx_device_destroy(platform_get_drvdata(pdev));
1181 static const struct acpi_device_id surface_dtx_acpi_match[] = {
1185 MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
1187 static struct platform_driver surface_dtx_platform_driver = {
1188 .probe = surface_dtx_platform_probe,
1189 .remove = surface_dtx_platform_remove,
1191 .name = "surface_dtx_pltf",
1192 .acpi_match_table = surface_dtx_acpi_match,
1193 .pm = &surface_dtx_pm_ops,
1194 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1197 module_platform_driver(surface_dtx_platform_driver);
1199 MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
1200 MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
1201 MODULE_LICENSE("GPL");