1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
4 #include <linux/acpi.h>
5 #include <linux/delay.h>
6 #include <linux/mod_devicetable.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/soundwire/sdw_registers.h>
9 #include <linux/soundwire/sdw.h>
10 #include <linux/soundwire/sdw_type.h>
12 #include "sysfs_local.h"
14 static DEFINE_IDA(sdw_bus_ida);
15 static DEFINE_IDA(sdw_peripheral_ida);
17 static int sdw_get_id(struct sdw_bus *bus)
19 int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL);
29 * sdw_bus_master_add() - add a bus Master instance
31 * @parent: parent device
32 * @fwnode: firmware node handle
34 * Initializes the bus instance, read properties and create child
37 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
38 struct fwnode_handle *fwnode)
40 struct sdw_master_prop *prop = NULL;
44 pr_err("SoundWire parent device is not set\n");
48 ret = sdw_get_id(bus);
50 dev_err(parent, "Failed to get bus id\n");
54 ret = sdw_master_device_add(bus, parent, fwnode);
56 dev_err(parent, "Failed to add master device at link %d\n",
62 dev_err(bus->dev, "SoundWire Bus ops are not set\n");
66 if (!bus->compute_params) {
68 "Bandwidth allocation not configured, compute_params no set\n");
73 * Give each bus_lock and msg_lock a unique key so that lockdep won't
74 * trigger a deadlock warning when the locks of several buses are
75 * grabbed during configuration of a multi-bus stream.
77 lockdep_register_key(&bus->msg_lock_key);
78 __mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key);
80 lockdep_register_key(&bus->bus_lock_key);
81 __mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key);
83 INIT_LIST_HEAD(&bus->slaves);
84 INIT_LIST_HEAD(&bus->m_rt_list);
87 * Initialize multi_link flag
89 bus->multi_link = false;
90 if (bus->ops->read_prop) {
91 ret = bus->ops->read_prop(bus);
94 "Bus read properties failed:%d\n", ret);
99 sdw_bus_debugfs_init(bus);
102 * Device numbers in SoundWire are 0 through 15. Enumeration device
103 * number (0), Broadcast device number (15), Group numbers (12 and
104 * 13) and Master device number (14) are not used for assignment so
105 * mask these and other higher bits.
108 /* Set higher order bits */
109 *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM);
111 /* Set enumuration device number and broadcast device number */
112 set_bit(SDW_ENUM_DEV_NUM, bus->assigned);
113 set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned);
115 /* Set group device numbers and master device number */
116 set_bit(SDW_GROUP12_DEV_NUM, bus->assigned);
117 set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
118 set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
121 * SDW is an enumerable bus, but devices can be powered off. So,
122 * they won't be able to report as present.
124 * Create Slave devices based on Slaves described in
125 * the respective firmware (ACPI/DT)
127 if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev))
128 ret = sdw_acpi_find_slaves(bus);
129 else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node)
130 ret = sdw_of_find_slaves(bus);
132 ret = -ENOTSUPP; /* No ACPI/DT so error out */
135 dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
140 * Initialize clock values based on Master properties. The max
141 * frequency is read from max_clk_freq property. Current assumption
142 * is that the bus will start at highest clock frequency when
145 * Default active bank will be 0 as out of reset the Slaves have
146 * to start with bank 0 (Table 40 of Spec)
149 bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR;
150 bus->params.curr_dr_freq = bus->params.max_dr_freq;
151 bus->params.curr_bank = SDW_BANK0;
152 bus->params.next_bank = SDW_BANK1;
156 EXPORT_SYMBOL(sdw_bus_master_add);
158 static int sdw_delete_slave(struct device *dev, void *data)
160 struct sdw_slave *slave = dev_to_sdw_dev(dev);
161 struct sdw_bus *bus = slave->bus;
163 pm_runtime_disable(dev);
165 sdw_slave_debugfs_exit(slave);
167 mutex_lock(&bus->bus_lock);
169 if (slave->dev_num) { /* clear dev_num if assigned */
170 clear_bit(slave->dev_num, bus->assigned);
171 if (bus->dev_num_ida_min)
172 ida_free(&sdw_peripheral_ida, slave->dev_num);
174 list_del_init(&slave->node);
175 mutex_unlock(&bus->bus_lock);
177 device_unregister(dev);
182 * sdw_bus_master_delete() - delete the bus master instance
183 * @bus: bus to be deleted
185 * Remove the instance, delete the child devices.
187 void sdw_bus_master_delete(struct sdw_bus *bus)
189 device_for_each_child(bus->dev, NULL, sdw_delete_slave);
190 sdw_master_device_del(bus);
192 sdw_bus_debugfs_exit(bus);
193 lockdep_unregister_key(&bus->bus_lock_key);
194 lockdep_unregister_key(&bus->msg_lock_key);
195 ida_free(&sdw_bus_ida, bus->id);
197 EXPORT_SYMBOL(sdw_bus_master_delete);
203 static inline int find_response_code(enum sdw_command_response resp)
209 case SDW_CMD_IGNORED:
212 case SDW_CMD_TIMEOUT:
220 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
222 int retry = bus->prop.err_threshold;
223 enum sdw_command_response resp;
226 for (i = 0; i <= retry; i++) {
227 resp = bus->ops->xfer_msg(bus, msg);
228 ret = find_response_code(resp);
230 /* if cmd is ok or ignored return */
231 if (ret == 0 || ret == -ENODATA)
238 static inline int do_transfer_defer(struct sdw_bus *bus,
241 struct sdw_defer *defer = &bus->defer_msg;
242 int retry = bus->prop.err_threshold;
243 enum sdw_command_response resp;
247 defer->length = msg->len;
248 init_completion(&defer->complete);
250 for (i = 0; i <= retry; i++) {
251 resp = bus->ops->xfer_msg_defer(bus);
252 ret = find_response_code(resp);
253 /* if cmd is ok or ignored return */
254 if (ret == 0 || ret == -ENODATA)
261 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg)
265 ret = do_transfer(bus, msg);
266 if (ret != 0 && ret != -ENODATA)
267 dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n",
269 (msg->flags & SDW_MSG_FLAG_WRITE) ? "write" : "read",
270 msg->addr, msg->len);
276 * sdw_transfer() - Synchronous transfer message to a SDW Slave device
278 * @msg: SDW message to be xfered
280 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
284 mutex_lock(&bus->msg_lock);
286 ret = sdw_transfer_unlocked(bus, msg);
288 mutex_unlock(&bus->msg_lock);
294 * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers
296 * @sync_delay: Delay before reading status
298 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay)
302 if (!bus->ops->read_ping_status)
306 * wait for peripheral to sync if desired. 10-15ms should be more than
307 * enough in most cases.
310 usleep_range(10000, 15000);
312 mutex_lock(&bus->msg_lock);
314 status = bus->ops->read_ping_status(bus);
316 mutex_unlock(&bus->msg_lock);
319 dev_warn(bus->dev, "%s: no peripherals attached\n", __func__);
321 dev_dbg(bus->dev, "PING status: %#x\n", status);
323 EXPORT_SYMBOL(sdw_show_ping_status);
326 * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
328 * @msg: SDW message to be xfered
330 * Caller needs to hold the msg_lock lock while calling this
332 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg)
336 if (!bus->ops->xfer_msg_defer)
339 ret = do_transfer_defer(bus, msg);
340 if (ret != 0 && ret != -ENODATA)
341 dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
347 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
348 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
350 memset(msg, 0, sizeof(*msg));
351 msg->addr = addr; /* addr is 16 bit and truncated here */
353 msg->dev_num = dev_num;
357 if (addr < SDW_REG_NO_PAGE) /* no paging area */
360 if (addr >= SDW_REG_MAX) { /* illegal addr */
361 pr_err("SDW: Invalid address %x passed\n", addr);
365 if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */
366 if (slave && !slave->prop.paging_support)
368 /* no need for else as that will fall-through to paging */
371 /* paging mandatory */
372 if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) {
373 pr_err("SDW: Invalid device for paging :%d\n", dev_num);
378 pr_err("SDW: No slave for paging addr\n");
382 if (!slave->prop.paging_support) {
384 "address %x needs paging but no support\n", addr);
388 msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr);
389 msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr);
390 msg->addr |= BIT(15);
397 * Read/Write IO functions.
400 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags,
401 size_t count, u8 *val)
408 // Only handle bytes up to next page boundary
409 size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR));
411 ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val);
415 ret = sdw_transfer(slave->bus, &msg);
416 if (ret < 0 && !slave->is_mockup_device)
428 * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM
430 * @addr: Register address
432 * @val: Buffer for values to be read
434 * Note that if the message crosses a page boundary each page will be
435 * transferred under a separate invocation of the msg_lock.
437 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
439 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val);
441 EXPORT_SYMBOL(sdw_nread_no_pm);
444 * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM
446 * @addr: Register address
448 * @val: Buffer for values to be written
450 * Note that if the message crosses a page boundary each page will be
451 * transferred under a separate invocation of the msg_lock.
453 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
455 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val);
457 EXPORT_SYMBOL(sdw_nwrite_no_pm);
460 * sdw_write_no_pm() - Write a SDW Slave register with no PM
462 * @addr: Register address
463 * @value: Register value
465 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
467 return sdw_nwrite_no_pm(slave, addr, 1, &value);
469 EXPORT_SYMBOL(sdw_write_no_pm);
472 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
478 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
479 SDW_MSG_FLAG_READ, &buf);
483 ret = sdw_transfer(bus, &msg);
491 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
496 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
497 SDW_MSG_FLAG_WRITE, &value);
501 return sdw_transfer(bus, &msg);
504 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr)
510 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
511 SDW_MSG_FLAG_READ, &buf);
515 ret = sdw_transfer_unlocked(bus, &msg);
521 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked);
523 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
528 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
529 SDW_MSG_FLAG_WRITE, &value);
533 return sdw_transfer_unlocked(bus, &msg);
535 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
538 * sdw_read_no_pm() - Read a SDW Slave register with no PM
540 * @addr: Register address
542 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
547 ret = sdw_nread_no_pm(slave, addr, 1, &buf);
553 EXPORT_SYMBOL(sdw_read_no_pm);
555 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
559 tmp = sdw_read_no_pm(slave, addr);
563 tmp = (tmp & ~mask) | val;
564 return sdw_write_no_pm(slave, addr, tmp);
566 EXPORT_SYMBOL(sdw_update_no_pm);
568 /* Read-Modify-Write Slave register */
569 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
573 tmp = sdw_read(slave, addr);
577 tmp = (tmp & ~mask) | val;
578 return sdw_write(slave, addr, tmp);
580 EXPORT_SYMBOL(sdw_update);
583 * sdw_nread() - Read "n" contiguous SDW Slave registers
585 * @addr: Register address
587 * @val: Buffer for values to be read
589 * This version of the function will take a PM reference to the slave
591 * Note that if the message crosses a page boundary each page will be
592 * transferred under a separate invocation of the msg_lock.
594 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
598 ret = pm_runtime_get_sync(&slave->dev);
599 if (ret < 0 && ret != -EACCES) {
600 pm_runtime_put_noidle(&slave->dev);
604 ret = sdw_nread_no_pm(slave, addr, count, val);
606 pm_runtime_mark_last_busy(&slave->dev);
607 pm_runtime_put(&slave->dev);
611 EXPORT_SYMBOL(sdw_nread);
614 * sdw_nwrite() - Write "n" contiguous SDW Slave registers
616 * @addr: Register address
618 * @val: Buffer for values to be written
620 * This version of the function will take a PM reference to the slave
622 * Note that if the message crosses a page boundary each page will be
623 * transferred under a separate invocation of the msg_lock.
625 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
629 ret = pm_runtime_get_sync(&slave->dev);
630 if (ret < 0 && ret != -EACCES) {
631 pm_runtime_put_noidle(&slave->dev);
635 ret = sdw_nwrite_no_pm(slave, addr, count, val);
637 pm_runtime_mark_last_busy(&slave->dev);
638 pm_runtime_put(&slave->dev);
642 EXPORT_SYMBOL(sdw_nwrite);
645 * sdw_read() - Read a SDW Slave register
647 * @addr: Register address
649 * This version of the function will take a PM reference to the slave
652 int sdw_read(struct sdw_slave *slave, u32 addr)
657 ret = sdw_nread(slave, addr, 1, &buf);
663 EXPORT_SYMBOL(sdw_read);
666 * sdw_write() - Write a SDW Slave register
668 * @addr: Register address
669 * @value: Register value
671 * This version of the function will take a PM reference to the slave
674 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
676 return sdw_nwrite(slave, addr, 1, &value);
678 EXPORT_SYMBOL(sdw_write);
684 /* called with bus_lock held */
685 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
687 struct sdw_slave *slave;
689 list_for_each_entry(slave, &bus->slaves, node) {
690 if (slave->dev_num == i)
697 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
699 if (slave->id.mfg_id != id.mfg_id ||
700 slave->id.part_id != id.part_id ||
701 slave->id.class_id != id.class_id ||
702 (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID &&
703 slave->id.unique_id != id.unique_id))
708 EXPORT_SYMBOL(sdw_compare_devid);
710 /* called with bus_lock held */
711 static int sdw_get_device_num(struct sdw_slave *slave)
715 if (slave->bus->dev_num_ida_min) {
716 bit = ida_alloc_range(&sdw_peripheral_ida,
717 slave->bus->dev_num_ida_min, SDW_MAX_DEVICES,
722 bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
723 if (bit == SDW_MAX_DEVICES) {
730 * Do not update dev_num in Slave data structure here,
731 * Update once program dev_num is successful
733 set_bit(bit, slave->bus->assigned);
739 static int sdw_assign_device_num(struct sdw_slave *slave)
741 struct sdw_bus *bus = slave->bus;
743 bool new_device = false;
745 /* check first if device number is assigned, if so reuse that */
746 if (!slave->dev_num) {
747 if (!slave->dev_num_sticky) {
748 mutex_lock(&slave->bus->bus_lock);
749 dev_num = sdw_get_device_num(slave);
750 mutex_unlock(&slave->bus->bus_lock);
752 dev_err(bus->dev, "Get dev_num failed: %d\n",
756 slave->dev_num = dev_num;
757 slave->dev_num_sticky = dev_num;
760 slave->dev_num = slave->dev_num_sticky;
766 "Slave already registered, reusing dev_num:%d\n",
769 /* Clear the slave->dev_num to transfer message on device 0 */
770 dev_num = slave->dev_num;
773 ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num);
775 dev_err(bus->dev, "Program device_num %d failed: %d\n",
780 /* After xfer of msg, restore dev_num */
781 slave->dev_num = slave->dev_num_sticky;
783 if (bus->ops && bus->ops->new_peripheral_assigned)
784 bus->ops->new_peripheral_assigned(bus, dev_num);
789 void sdw_extract_slave_id(struct sdw_bus *bus,
790 u64 addr, struct sdw_slave_id *id)
792 dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr);
794 id->sdw_version = SDW_VERSION(addr);
795 id->unique_id = SDW_UNIQUE_ID(addr);
796 id->mfg_id = SDW_MFG_ID(addr);
797 id->part_id = SDW_PART_ID(addr);
798 id->class_id = SDW_CLASS_ID(addr);
801 "SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n",
802 id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version);
804 EXPORT_SYMBOL(sdw_extract_slave_id);
806 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed)
808 u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
809 struct sdw_slave *slave, *_s;
810 struct sdw_slave_id id;
818 /* No Slave, so use raw xfer api */
819 ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
820 SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
825 ret = sdw_transfer(bus, &msg);
826 if (ret == -ENODATA) { /* end of device id reads */
827 dev_dbg(bus->dev, "No more devices to enumerate\n");
832 dev_err(bus->dev, "DEVID read fail:%d\n", ret);
837 * Construct the addr and extract. Cast the higher shift
838 * bits to avoid truncation due to size limit.
840 addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) |
841 ((u64)buf[2] << 24) | ((u64)buf[1] << 32) |
844 sdw_extract_slave_id(bus, addr, &id);
847 /* Now compare with entries */
848 list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
849 if (sdw_compare_devid(slave, id) == 0) {
853 * To prevent skipping state-machine stages don't
854 * program a device until we've seen it UNATTACH.
855 * Must return here because no other device on #0
856 * can be detected until this one has been
857 * assigned a device ID.
859 if (slave->status != SDW_SLAVE_UNATTACHED)
863 * Assign a new dev_num to this Slave and
864 * not mark it present. It will be marked
865 * present after it reports ATTACHED on new
868 ret = sdw_assign_device_num(slave);
871 "Assign dev_num failed:%d\n",
883 /* TODO: Park this device in Group 13 */
886 * add Slave device even if there is no platform
887 * firmware description. There will be no driver probe
888 * but the user/integration will be able to see the
889 * device, enumeration status and device number in sysfs
891 sdw_slave_add(bus, &id, NULL);
893 dev_err(bus->dev, "Slave Entry not found\n");
899 * Check till error out or retry (count) exhausts.
900 * Device can drop off and rejoin during enumeration
901 * so count till twice the bound.
904 } while (ret == 0 && count < (SDW_MAX_DEVICES * 2));
909 static void sdw_modify_slave_status(struct sdw_slave *slave,
910 enum sdw_slave_status status)
912 struct sdw_bus *bus = slave->bus;
914 mutex_lock(&bus->bus_lock);
917 "changing status slave %d status %d new status %d\n",
918 slave->dev_num, slave->status, status);
920 if (status == SDW_SLAVE_UNATTACHED) {
922 "initializing enumeration and init completion for Slave %d\n",
925 reinit_completion(&slave->enumeration_complete);
926 reinit_completion(&slave->initialization_complete);
928 } else if ((status == SDW_SLAVE_ATTACHED) &&
929 (slave->status == SDW_SLAVE_UNATTACHED)) {
931 "signaling enumeration completion for Slave %d\n",
934 complete_all(&slave->enumeration_complete);
936 slave->status = status;
937 mutex_unlock(&bus->bus_lock);
940 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
941 enum sdw_clk_stop_mode mode,
942 enum sdw_clk_stop_type type)
946 mutex_lock(&slave->sdw_dev_lock);
949 struct device *dev = &slave->dev;
950 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
952 if (drv->ops && drv->ops->clk_stop)
953 ret = drv->ops->clk_stop(slave, mode, type);
956 mutex_unlock(&slave->sdw_dev_lock);
961 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
962 enum sdw_clk_stop_mode mode,
969 wake_en = slave->prop.wake_capable;
972 val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP;
974 if (mode == SDW_CLK_STOP_MODE1)
975 val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1;
978 val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN;
980 ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
983 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret);
987 val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP);
990 ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
992 if (ret < 0 && ret != -ENODATA)
993 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret);
998 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
1000 int retry = bus->clk_stop_timeout;
1004 val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT);
1006 if (val != -ENODATA)
1007 dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val);
1010 val &= SDW_SCP_STAT_CLK_STP_NF;
1012 dev_dbg(bus->dev, "clock stop prep/de-prep done slave:%d\n",
1017 usleep_range(1000, 1500);
1021 dev_err(bus->dev, "clock stop prep/de-prep failed slave:%d\n",
1028 * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop
1030 * @bus: SDW bus instance
1032 * Query Slave for clock stop mode and prepare for that mode.
1034 int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
1036 bool simple_clk_stop = true;
1037 struct sdw_slave *slave;
1038 bool is_slave = false;
1042 * In order to save on transition time, prepare
1043 * each Slave and then wait for all Slave(s) to be
1044 * prepared for clock stop.
1045 * If one of the Slave devices has lost sync and
1046 * replies with Command Ignored/-ENODATA, we continue
1049 list_for_each_entry(slave, &bus->slaves, node) {
1050 if (!slave->dev_num)
1053 if (slave->status != SDW_SLAVE_ATTACHED &&
1054 slave->status != SDW_SLAVE_ALERT)
1057 /* Identify if Slave(s) are available on Bus */
1060 ret = sdw_slave_clk_stop_callback(slave,
1062 SDW_CLK_PRE_PREPARE);
1063 if (ret < 0 && ret != -ENODATA) {
1064 dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret);
1068 /* Only prepare a Slave device if needed */
1069 if (!slave->prop.simple_clk_stop_capable) {
1070 simple_clk_stop = false;
1072 ret = sdw_slave_clk_stop_prepare(slave,
1075 if (ret < 0 && ret != -ENODATA) {
1076 dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret);
1082 /* Skip remaining clock stop preparation if no Slave is attached */
1087 * Don't wait for all Slaves to be ready if they follow the simple
1090 if (!simple_clk_stop) {
1091 ret = sdw_bus_wait_for_clk_prep_deprep(bus,
1092 SDW_BROADCAST_DEV_NUM);
1094 * if there are no Slave devices present and the reply is
1095 * Command_Ignored/-ENODATA, we don't need to continue with the
1096 * flow and can just return here. The error code is not modified
1097 * and its handling left as an exercise for the caller.
1103 /* Inform slaves that prep is done */
1104 list_for_each_entry(slave, &bus->slaves, node) {
1105 if (!slave->dev_num)
1108 if (slave->status != SDW_SLAVE_ATTACHED &&
1109 slave->status != SDW_SLAVE_ALERT)
1112 ret = sdw_slave_clk_stop_callback(slave,
1114 SDW_CLK_POST_PREPARE);
1116 if (ret < 0 && ret != -ENODATA) {
1117 dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret);
1124 EXPORT_SYMBOL(sdw_bus_prep_clk_stop);
1127 * sdw_bus_clk_stop: stop bus clock
1129 * @bus: SDW bus instance
1131 * After preparing the Slaves for clock stop, stop the clock by broadcasting
1132 * write to SCP_CTRL register.
1134 int sdw_bus_clk_stop(struct sdw_bus *bus)
1139 * broadcast clock stop now, attached Slaves will ACK this,
1140 * unattached will ignore
1142 ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM,
1143 SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW);
1145 if (ret != -ENODATA)
1146 dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret);
1152 EXPORT_SYMBOL(sdw_bus_clk_stop);
1155 * sdw_bus_exit_clk_stop: Exit clock stop mode
1157 * @bus: SDW bus instance
1159 * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves
1160 * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate
1163 int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
1165 bool simple_clk_stop = true;
1166 struct sdw_slave *slave;
1167 bool is_slave = false;
1171 * In order to save on transition time, de-prepare
1172 * each Slave and then wait for all Slave(s) to be
1173 * de-prepared after clock resume.
1175 list_for_each_entry(slave, &bus->slaves, node) {
1176 if (!slave->dev_num)
1179 if (slave->status != SDW_SLAVE_ATTACHED &&
1180 slave->status != SDW_SLAVE_ALERT)
1183 /* Identify if Slave(s) are available on Bus */
1186 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1187 SDW_CLK_PRE_DEPREPARE);
1189 dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret);
1191 /* Only de-prepare a Slave device if needed */
1192 if (!slave->prop.simple_clk_stop_capable) {
1193 simple_clk_stop = false;
1195 ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0,
1199 dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret);
1203 /* Skip remaining clock stop de-preparation if no Slave is attached */
1208 * Don't wait for all Slaves to be ready if they follow the simple
1211 if (!simple_clk_stop) {
1212 ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM);
1214 dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret);
1217 list_for_each_entry(slave, &bus->slaves, node) {
1218 if (!slave->dev_num)
1221 if (slave->status != SDW_SLAVE_ATTACHED &&
1222 slave->status != SDW_SLAVE_ALERT)
1225 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1226 SDW_CLK_POST_DEPREPARE);
1228 dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret);
1233 EXPORT_SYMBOL(sdw_bus_exit_clk_stop);
1235 int sdw_configure_dpn_intr(struct sdw_slave *slave,
1236 int port, bool enable, int mask)
1242 if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) {
1243 dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n",
1244 enable ? "on" : "off");
1245 mask |= SDW_DPN_INT_TEST_FAIL;
1248 addr = SDW_DPN_INTMASK(port);
1250 /* Set/Clear port ready interrupt mask */
1253 val |= SDW_DPN_INT_PORT_READY;
1256 val &= ~SDW_DPN_INT_PORT_READY;
1259 ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
1261 dev_err(&slave->dev,
1262 "SDW_DPN_INTMASK write failed:%d\n", val);
1267 static int sdw_slave_set_frequency(struct sdw_slave *slave)
1269 u32 mclk_freq = slave->bus->prop.mclk_freq;
1270 u32 curr_freq = slave->bus->params.curr_dr_freq >> 1;
1277 * frequency base and scale registers are required for SDCA
1278 * devices. They may also be used for 1.2+/non-SDCA devices.
1279 * Driver can set the property, we will need a DisCo property
1280 * to discover this case from platform firmware.
1282 if (!slave->id.class_id && !slave->prop.clock_reg_supported)
1286 dev_err(&slave->dev,
1287 "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n");
1292 * map base frequency using Table 89 of SoundWire 1.2 spec.
1293 * The order of the tests just follows the specification, this
1294 * is not a selection between possible values or a search for
1295 * the best value but just a mapping. Only one case per platform
1297 * Some BIOS have inconsistent values for mclk_freq but a
1298 * correct root so we force the mclk_freq to avoid variations.
1300 if (!(19200000 % mclk_freq)) {
1301 mclk_freq = 19200000;
1302 base = SDW_SCP_BASE_CLOCK_19200000_HZ;
1303 } else if (!(24000000 % mclk_freq)) {
1304 mclk_freq = 24000000;
1305 base = SDW_SCP_BASE_CLOCK_24000000_HZ;
1306 } else if (!(24576000 % mclk_freq)) {
1307 mclk_freq = 24576000;
1308 base = SDW_SCP_BASE_CLOCK_24576000_HZ;
1309 } else if (!(22579200 % mclk_freq)) {
1310 mclk_freq = 22579200;
1311 base = SDW_SCP_BASE_CLOCK_22579200_HZ;
1312 } else if (!(32000000 % mclk_freq)) {
1313 mclk_freq = 32000000;
1314 base = SDW_SCP_BASE_CLOCK_32000000_HZ;
1316 dev_err(&slave->dev,
1317 "Unsupported clock base, mclk %d\n",
1322 if (mclk_freq % curr_freq) {
1323 dev_err(&slave->dev,
1324 "mclk %d is not multiple of bus curr_freq %d\n",
1325 mclk_freq, curr_freq);
1329 scale = mclk_freq / curr_freq;
1332 * map scale to Table 90 of SoundWire 1.2 spec - and check
1333 * that the scale is a power of two and maximum 64
1335 scale_index = ilog2(scale);
1337 if (BIT(scale_index) != scale || scale_index > 6) {
1338 dev_err(&slave->dev,
1339 "No match found for scale %d, bus mclk %d curr_freq %d\n",
1340 scale, mclk_freq, curr_freq);
1345 ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
1347 dev_err(&slave->dev,
1348 "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
1352 /* initialize scale for both banks */
1353 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
1355 dev_err(&slave->dev,
1356 "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
1359 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
1361 dev_err(&slave->dev,
1362 "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
1364 dev_dbg(&slave->dev,
1365 "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
1366 base, scale_index, mclk_freq, curr_freq);
1371 static int sdw_initialize_slave(struct sdw_slave *slave)
1373 struct sdw_slave_prop *prop = &slave->prop;
1378 ret = sdw_slave_set_frequency(slave);
1382 if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) {
1383 /* Clear bus clash interrupt before enabling interrupt mask */
1384 status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1386 dev_err(&slave->dev,
1387 "SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status);
1390 if (status & SDW_SCP_INT1_BUS_CLASH) {
1391 dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n");
1392 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH);
1394 dev_err(&slave->dev,
1395 "SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret);
1400 if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) &&
1401 !(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) {
1402 /* Clear parity interrupt before enabling interrupt mask */
1403 status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1405 dev_err(&slave->dev,
1406 "SDW_SCP_INT1 (PARITY) read failed:%d\n", status);
1409 if (status & SDW_SCP_INT1_PARITY) {
1410 dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n");
1411 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY);
1413 dev_err(&slave->dev,
1414 "SDW_SCP_INT1 (PARITY) write failed:%d\n", ret);
1421 * Set SCP_INT1_MASK register, typically bus clash and
1422 * implementation-defined interrupt mask. The Parity detection
1423 * may not always be correct on startup so its use is
1424 * device-dependent, it might e.g. only be enabled in
1425 * steady-state after a couple of frames.
1427 val = slave->prop.scp_int1_mask;
1429 /* Enable SCP interrupts */
1430 ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val);
1432 dev_err(&slave->dev,
1433 "SDW_SCP_INTMASK1 write failed:%d\n", ret);
1437 /* No need to continue if DP0 is not present */
1438 if (!slave->prop.dp0_prop)
1441 /* Enable DP0 interrupts */
1442 val = prop->dp0_prop->imp_def_interrupts;
1443 val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
1445 ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val);
1447 dev_err(&slave->dev,
1448 "SDW_DP0_INTMASK read failed:%d\n", ret);
1452 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
1454 u8 clear, impl_int_mask;
1455 int status, status2, ret, count = 0;
1457 status = sdw_read_no_pm(slave, SDW_DP0_INT);
1459 dev_err(&slave->dev,
1460 "SDW_DP0_INT read failed:%d\n", status);
1465 clear = status & ~SDW_DP0_INTERRUPTS;
1467 if (status & SDW_DP0_INT_TEST_FAIL) {
1468 dev_err(&slave->dev, "Test fail for port 0\n");
1469 clear |= SDW_DP0_INT_TEST_FAIL;
1473 * Assumption: PORT_READY interrupt will be received only for
1474 * ports implementing Channel Prepare state machine (CP_SM)
1477 if (status & SDW_DP0_INT_PORT_READY) {
1478 complete(&slave->port_ready[0]);
1479 clear |= SDW_DP0_INT_PORT_READY;
1482 if (status & SDW_DP0_INT_BRA_FAILURE) {
1483 dev_err(&slave->dev, "BRA failed\n");
1484 clear |= SDW_DP0_INT_BRA_FAILURE;
1487 impl_int_mask = SDW_DP0_INT_IMPDEF1 |
1488 SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3;
1490 if (status & impl_int_mask) {
1491 clear |= impl_int_mask;
1492 *slave_status = clear;
1495 /* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */
1496 ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear);
1498 dev_err(&slave->dev,
1499 "SDW_DP0_INT write failed:%d\n", ret);
1503 /* Read DP0 interrupt again */
1504 status2 = sdw_read_no_pm(slave, SDW_DP0_INT);
1506 dev_err(&slave->dev,
1507 "SDW_DP0_INT read failed:%d\n", status2);
1510 /* filter to limit loop to interrupts identified in the first status read */
1515 /* we can get alerts while processing so keep retrying */
1516 } while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1518 if (count == SDW_READ_INTR_CLEAR_RETRY)
1519 dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n");
1524 static int sdw_handle_port_interrupt(struct sdw_slave *slave,
1525 int port, u8 *slave_status)
1527 u8 clear, impl_int_mask;
1528 int status, status2, ret, count = 0;
1532 return sdw_handle_dp0_interrupt(slave, slave_status);
1534 addr = SDW_DPN_INT(port);
1535 status = sdw_read_no_pm(slave, addr);
1537 dev_err(&slave->dev,
1538 "SDW_DPN_INT read failed:%d\n", status);
1544 clear = status & ~SDW_DPN_INTERRUPTS;
1546 if (status & SDW_DPN_INT_TEST_FAIL) {
1547 dev_err(&slave->dev, "Test fail for port:%d\n", port);
1548 clear |= SDW_DPN_INT_TEST_FAIL;
1552 * Assumption: PORT_READY interrupt will be received only
1553 * for ports implementing CP_SM.
1555 if (status & SDW_DPN_INT_PORT_READY) {
1556 complete(&slave->port_ready[port]);
1557 clear |= SDW_DPN_INT_PORT_READY;
1560 impl_int_mask = SDW_DPN_INT_IMPDEF1 |
1561 SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3;
1563 if (status & impl_int_mask) {
1564 clear |= impl_int_mask;
1565 *slave_status = clear;
1568 /* clear the interrupt but don't touch reserved fields */
1569 ret = sdw_write_no_pm(slave, addr, clear);
1571 dev_err(&slave->dev,
1572 "SDW_DPN_INT write failed:%d\n", ret);
1576 /* Read DPN interrupt again */
1577 status2 = sdw_read_no_pm(slave, addr);
1579 dev_err(&slave->dev,
1580 "SDW_DPN_INT read failed:%d\n", status2);
1583 /* filter to limit loop to interrupts identified in the first status read */
1588 /* we can get alerts while processing so keep retrying */
1589 } while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1591 if (count == SDW_READ_INTR_CLEAR_RETRY)
1592 dev_warn(&slave->dev, "Reached MAX_RETRY on port read");
1597 static int sdw_handle_slave_alerts(struct sdw_slave *slave)
1599 struct sdw_slave_intr_status slave_intr;
1600 u8 clear = 0, bit, port_status[15] = {0};
1601 int port_num, stat, ret, count = 0;
1604 u8 sdca_cascade = 0;
1609 sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
1611 ret = pm_runtime_get_sync(&slave->dev);
1612 if (ret < 0 && ret != -EACCES) {
1613 dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
1614 pm_runtime_put_noidle(&slave->dev);
1618 /* Read Intstat 1, Intstat 2 and Intstat 3 registers */
1619 ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1621 dev_err(&slave->dev,
1622 "SDW_SCP_INT1 read failed:%d\n", ret);
1627 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1629 dev_err(&slave->dev,
1630 "SDW_SCP_INT2/3 read failed:%d\n", ret);
1634 if (slave->id.class_id) {
1635 ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1637 dev_err(&slave->dev,
1638 "SDW_DP0_INT read failed:%d\n", ret);
1641 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1645 slave_notify = false;
1648 * Check parity, bus clash and Slave (impl defined)
1651 if (buf & SDW_SCP_INT1_PARITY) {
1652 parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY;
1653 parity_quirk = !slave->first_interrupt_done &&
1654 (slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY);
1656 if (parity_check && !parity_quirk)
1657 dev_err(&slave->dev, "Parity error detected\n");
1658 clear |= SDW_SCP_INT1_PARITY;
1661 if (buf & SDW_SCP_INT1_BUS_CLASH) {
1662 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH)
1663 dev_err(&slave->dev, "Bus clash detected\n");
1664 clear |= SDW_SCP_INT1_BUS_CLASH;
1668 * When bus clash or parity errors are detected, such errors
1669 * are unlikely to be recoverable errors.
1670 * TODO: In such scenario, reset bus. Make this configurable
1671 * via sysfs property with bus reset being the default.
1674 if (buf & SDW_SCP_INT1_IMPL_DEF) {
1675 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) {
1676 dev_dbg(&slave->dev, "Slave impl defined interrupt\n");
1677 slave_notify = true;
1679 clear |= SDW_SCP_INT1_IMPL_DEF;
1682 /* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */
1684 slave_notify = true;
1686 /* Check port 0 - 3 interrupts */
1687 port = buf & SDW_SCP_INT1_PORT0_3;
1689 /* To get port number corresponding to bits, shift it */
1690 port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port);
1691 for_each_set_bit(bit, &port, 8) {
1692 sdw_handle_port_interrupt(slave, bit,
1696 /* Check if cascade 2 interrupt is present */
1697 if (buf & SDW_SCP_INT1_SCP2_CASCADE) {
1698 port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10;
1699 for_each_set_bit(bit, &port, 8) {
1700 /* scp2 ports start from 4 */
1702 sdw_handle_port_interrupt(slave,
1704 &port_status[port_num]);
1708 /* now check last cascade */
1709 if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) {
1710 port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14;
1711 for_each_set_bit(bit, &port, 8) {
1712 /* scp3 ports start from 11 */
1713 port_num = bit + 11;
1714 sdw_handle_port_interrupt(slave,
1716 &port_status[port_num]);
1720 /* Update the Slave driver */
1722 mutex_lock(&slave->sdw_dev_lock);
1724 if (slave->probed) {
1725 struct device *dev = &slave->dev;
1726 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1728 if (drv->ops && drv->ops->interrupt_callback) {
1729 slave_intr.sdca_cascade = sdca_cascade;
1730 slave_intr.control_port = clear;
1731 memcpy(slave_intr.port, &port_status,
1732 sizeof(slave_intr.port));
1734 drv->ops->interrupt_callback(slave, &slave_intr);
1738 mutex_unlock(&slave->sdw_dev_lock);
1742 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear);
1744 dev_err(&slave->dev,
1745 "SDW_SCP_INT1 write failed:%d\n", ret);
1749 /* at this point all initial interrupt sources were handled */
1750 slave->first_interrupt_done = true;
1753 * Read status again to ensure no new interrupts arrived
1754 * while servicing interrupts.
1756 ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1758 dev_err(&slave->dev,
1759 "SDW_SCP_INT1 recheck read failed:%d\n", ret);
1764 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1766 dev_err(&slave->dev,
1767 "SDW_SCP_INT2/3 recheck read failed:%d\n", ret);
1771 if (slave->id.class_id) {
1772 ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1774 dev_err(&slave->dev,
1775 "SDW_DP0_INT recheck read failed:%d\n", ret);
1778 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1782 * Make sure no interrupts are pending
1784 stat = buf || buf2[0] || buf2[1] || sdca_cascade;
1787 * Exit loop if Slave is continuously in ALERT state even
1788 * after servicing the interrupt multiple times.
1792 /* we can get alerts while processing so keep retrying */
1793 } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
1795 if (count == SDW_READ_INTR_CLEAR_RETRY)
1796 dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n");
1799 pm_runtime_mark_last_busy(&slave->dev);
1800 pm_runtime_put_autosuspend(&slave->dev);
1805 static int sdw_update_slave_status(struct sdw_slave *slave,
1806 enum sdw_slave_status status)
1810 mutex_lock(&slave->sdw_dev_lock);
1812 if (slave->probed) {
1813 struct device *dev = &slave->dev;
1814 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1816 if (drv->ops && drv->ops->update_status)
1817 ret = drv->ops->update_status(slave, status);
1820 mutex_unlock(&slave->sdw_dev_lock);
1826 * sdw_handle_slave_status() - Handle Slave status
1827 * @bus: SDW bus instance
1828 * @status: Status for all Slave(s)
1830 int sdw_handle_slave_status(struct sdw_bus *bus,
1831 enum sdw_slave_status status[])
1833 enum sdw_slave_status prev_status;
1834 struct sdw_slave *slave;
1835 bool attached_initializing, id_programmed;
1838 /* first check if any Slaves fell off the bus */
1839 for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1840 mutex_lock(&bus->bus_lock);
1841 if (test_bit(i, bus->assigned) == false) {
1842 mutex_unlock(&bus->bus_lock);
1845 mutex_unlock(&bus->bus_lock);
1847 slave = sdw_get_slave(bus, i);
1851 if (status[i] == SDW_SLAVE_UNATTACHED &&
1852 slave->status != SDW_SLAVE_UNATTACHED) {
1853 dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n",
1855 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1857 /* Ensure driver knows that peripheral unattached */
1858 ret = sdw_update_slave_status(slave, status[i]);
1860 dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret);
1864 if (status[0] == SDW_SLAVE_ATTACHED) {
1865 dev_dbg(bus->dev, "Slave attached, programming device number\n");
1868 * Programming a device number will have side effects,
1869 * so we deal with other devices at a later time.
1870 * This relies on those devices reporting ATTACHED, which will
1871 * trigger another call to this function. This will only
1872 * happen if at least one device ID was programmed.
1873 * Error returns from sdw_program_device_num() are currently
1874 * ignored because there's no useful recovery that can be done.
1875 * Returning the error here could result in the current status
1876 * of other devices not being handled, because if no device IDs
1877 * were programmed there's nothing to guarantee a status change
1878 * to trigger another call to this function.
1880 sdw_program_device_num(bus, &id_programmed);
1885 /* Continue to check other slave statuses */
1886 for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1887 mutex_lock(&bus->bus_lock);
1888 if (test_bit(i, bus->assigned) == false) {
1889 mutex_unlock(&bus->bus_lock);
1892 mutex_unlock(&bus->bus_lock);
1894 slave = sdw_get_slave(bus, i);
1898 attached_initializing = false;
1900 switch (status[i]) {
1901 case SDW_SLAVE_UNATTACHED:
1902 if (slave->status == SDW_SLAVE_UNATTACHED)
1905 dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n",
1908 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1911 case SDW_SLAVE_ALERT:
1912 ret = sdw_handle_slave_alerts(slave);
1914 dev_err(&slave->dev,
1915 "Slave %d alert handling failed: %d\n",
1919 case SDW_SLAVE_ATTACHED:
1920 if (slave->status == SDW_SLAVE_ATTACHED)
1923 prev_status = slave->status;
1924 sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED);
1926 if (prev_status == SDW_SLAVE_ALERT)
1929 attached_initializing = true;
1931 ret = sdw_initialize_slave(slave);
1933 dev_err(&slave->dev,
1934 "Slave %d initialization failed: %d\n",
1940 dev_err(&slave->dev, "Invalid slave %d status:%d\n",
1945 ret = sdw_update_slave_status(slave, status[i]);
1947 dev_err(&slave->dev,
1948 "Update Slave status failed:%d\n", ret);
1949 if (attached_initializing) {
1950 dev_dbg(&slave->dev,
1951 "signaling initialization completion for Slave %d\n",
1954 complete_all(&slave->initialization_complete);
1957 * If the manager became pm_runtime active, the peripherals will be
1958 * restarted and attach, but their pm_runtime status may remain
1959 * suspended. If the 'update_slave_status' callback initiates
1960 * any sort of deferred processing, this processing would not be
1961 * cancelled on pm_runtime suspend.
1962 * To avoid such zombie states, we queue a request to resume.
1963 * This would be a no-op in case the peripheral was being resumed
1964 * by e.g. the ALSA/ASoC framework.
1966 pm_request_resume(&slave->dev);
1972 EXPORT_SYMBOL(sdw_handle_slave_status);
1974 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request)
1976 struct sdw_slave *slave;
1979 /* Check all non-zero devices */
1980 for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1981 mutex_lock(&bus->bus_lock);
1982 if (test_bit(i, bus->assigned) == false) {
1983 mutex_unlock(&bus->bus_lock);
1986 mutex_unlock(&bus->bus_lock);
1988 slave = sdw_get_slave(bus, i);
1992 if (slave->status != SDW_SLAVE_UNATTACHED) {
1993 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1994 slave->first_interrupt_done = false;
1995 sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED);
1998 /* keep track of request, used in pm_runtime resume */
1999 slave->unattach_request = request;
2002 EXPORT_SYMBOL(sdw_clear_slave_status);