Merge tag 'sound-6.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux-2.6-microblaze.git] / drivers / soundwire / intel.c
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
3
4 /*
5  * Soundwire Intel Master Driver
6  */
7
8 #include <linux/acpi.h>
9 #include <linux/debugfs.h>
10 #include <linux/delay.h>
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/auxiliary_bus.h>
15 #include <sound/pcm_params.h>
16 #include <linux/pm_runtime.h>
17 #include <sound/soc.h>
18 #include <linux/soundwire/sdw_registers.h>
19 #include <linux/soundwire/sdw.h>
20 #include <linux/soundwire/sdw_intel.h>
21 #include "cadence_master.h"
22 #include "bus.h"
23 #include "intel.h"
24
25 #define INTEL_MASTER_SUSPEND_DELAY_MS   3000
26 #define INTEL_MASTER_RESET_ITERATIONS   10
27
28 /*
29  * debug/config flags for the Intel SoundWire Master.
30  *
31  * Since we may have multiple masters active, we can have up to 8
32  * flags reused in each byte, with master0 using the ls-byte, etc.
33  */
34
35 #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME             BIT(0)
36 #define SDW_INTEL_MASTER_DISABLE_CLOCK_STOP             BIT(1)
37 #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE        BIT(2)
38 #define SDW_INTEL_MASTER_DISABLE_MULTI_LINK             BIT(3)
39
40 static int md_flags;
41 module_param_named(sdw_md_flags, md_flags, int, 0444);
42 MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)");
43
44 enum intel_pdi_type {
45         INTEL_PDI_IN = 0,
46         INTEL_PDI_OUT = 1,
47         INTEL_PDI_BD = 2,
48 };
49
50 #define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
51
52 /*
53  * Read, write helpers for HW registers
54  */
55 static inline int intel_readl(void __iomem *base, int offset)
56 {
57         return readl(base + offset);
58 }
59
60 static inline void intel_writel(void __iomem *base, int offset, int value)
61 {
62         writel(value, base + offset);
63 }
64
65 static inline u16 intel_readw(void __iomem *base, int offset)
66 {
67         return readw(base + offset);
68 }
69
70 static inline void intel_writew(void __iomem *base, int offset, u16 value)
71 {
72         writew(value, base + offset);
73 }
74
75 static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
76 {
77         int timeout = 10;
78         u32 reg_read;
79
80         do {
81                 reg_read = readl(base + offset);
82                 if ((reg_read & mask) == target)
83                         return 0;
84
85                 timeout--;
86                 usleep_range(50, 100);
87         } while (timeout != 0);
88
89         return -EAGAIN;
90 }
91
92 static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
93 {
94         writel(value, base + offset);
95         return intel_wait_bit(base, offset, mask, 0);
96 }
97
98 static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
99 {
100         writel(value, base + offset);
101         return intel_wait_bit(base, offset, mask, mask);
102 }
103
104 /*
105  * debugfs
106  */
107 #ifdef CONFIG_DEBUG_FS
108
109 #define RD_BUF (2 * PAGE_SIZE)
110
111 static ssize_t intel_sprintf(void __iomem *mem, bool l,
112                              char *buf, size_t pos, unsigned int reg)
113 {
114         int value;
115
116         if (l)
117                 value = intel_readl(mem, reg);
118         else
119                 value = intel_readw(mem, reg);
120
121         return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
122 }
123
124 static int intel_reg_show(struct seq_file *s_file, void *data)
125 {
126         struct sdw_intel *sdw = s_file->private;
127         void __iomem *s = sdw->link_res->shim;
128         void __iomem *a = sdw->link_res->alh;
129         char *buf;
130         ssize_t ret;
131         int i, j;
132         unsigned int links, reg;
133
134         buf = kzalloc(RD_BUF, GFP_KERNEL);
135         if (!buf)
136                 return -ENOMEM;
137
138         links = intel_readl(s, SDW_SHIM_LCAP) & GENMASK(2, 0);
139
140         ret = scnprintf(buf, RD_BUF, "Register  Value\n");
141         ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
142
143         for (i = 0; i < links; i++) {
144                 reg = SDW_SHIM_LCAP + i * 4;
145                 ret += intel_sprintf(s, true, buf, ret, reg);
146         }
147
148         for (i = 0; i < links; i++) {
149                 ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
150                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
151                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
152                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
153                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
154                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
155                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
156
157                 ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
158
159                 /*
160                  * the value 10 is the number of PDIs. We will need a
161                  * cleanup to remove hard-coded Intel configurations
162                  * from cadence_master.c
163                  */
164                 for (j = 0; j < 10; j++) {
165                         ret += intel_sprintf(s, false, buf, ret,
166                                         SDW_SHIM_PCMSYCHM(i, j));
167                         ret += intel_sprintf(s, false, buf, ret,
168                                         SDW_SHIM_PCMSYCHC(i, j));
169                 }
170                 ret += scnprintf(buf + ret, RD_BUF - ret, "\n PDMSCAP, IOCTL, CTMCTL\n");
171
172                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PDMSCAP(i));
173                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
174                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
175         }
176
177         ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
178         ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
179         ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
180
181         ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
182         for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
183                 ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
184
185         seq_printf(s_file, "%s", buf);
186         kfree(buf);
187
188         return 0;
189 }
190 DEFINE_SHOW_ATTRIBUTE(intel_reg);
191
192 static int intel_set_m_datamode(void *data, u64 value)
193 {
194         struct sdw_intel *sdw = data;
195         struct sdw_bus *bus = &sdw->cdns.bus;
196
197         if (value > SDW_PORT_DATA_MODE_STATIC_1)
198                 return -EINVAL;
199
200         /* Userspace changed the hardware state behind the kernel's back */
201         add_taint(TAINT_USER, LOCKDEP_STILL_OK);
202
203         bus->params.m_data_mode = value;
204
205         return 0;
206 }
207 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
208                          intel_set_m_datamode, "%llu\n");
209
210 static int intel_set_s_datamode(void *data, u64 value)
211 {
212         struct sdw_intel *sdw = data;
213         struct sdw_bus *bus = &sdw->cdns.bus;
214
215         if (value > SDW_PORT_DATA_MODE_STATIC_1)
216                 return -EINVAL;
217
218         /* Userspace changed the hardware state behind the kernel's back */
219         add_taint(TAINT_USER, LOCKDEP_STILL_OK);
220
221         bus->params.s_data_mode = value;
222
223         return 0;
224 }
225 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
226                          intel_set_s_datamode, "%llu\n");
227
228 static void intel_debugfs_init(struct sdw_intel *sdw)
229 {
230         struct dentry *root = sdw->cdns.bus.debugfs;
231
232         if (!root)
233                 return;
234
235         sdw->debugfs = debugfs_create_dir("intel-sdw", root);
236
237         debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
238                             &intel_reg_fops);
239
240         debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
241                             &intel_set_m_datamode_fops);
242
243         debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
244                             &intel_set_s_datamode_fops);
245
246         sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
247 }
248
249 static void intel_debugfs_exit(struct sdw_intel *sdw)
250 {
251         debugfs_remove_recursive(sdw->debugfs);
252 }
253 #else
254 static void intel_debugfs_init(struct sdw_intel *sdw) {}
255 static void intel_debugfs_exit(struct sdw_intel *sdw) {}
256 #endif /* CONFIG_DEBUG_FS */
257
258 /*
259  * shim ops
260  */
261
262 static int intel_link_power_up(struct sdw_intel *sdw)
263 {
264         unsigned int link_id = sdw->instance;
265         void __iomem *shim = sdw->link_res->shim;
266         u32 *shim_mask = sdw->link_res->shim_mask;
267         struct sdw_bus *bus = &sdw->cdns.bus;
268         struct sdw_master_prop *prop = &bus->prop;
269         u32 spa_mask, cpa_mask;
270         u32 link_control;
271         int ret = 0;
272         u32 syncprd;
273         u32 sync_reg;
274
275         mutex_lock(sdw->link_res->shim_lock);
276
277         /*
278          * The hardware relies on an internal counter, typically 4kHz,
279          * to generate the SoundWire SSP - which defines a 'safe'
280          * synchronization point between commands and audio transport
281          * and allows for multi link synchronization. The SYNCPRD value
282          * is only dependent on the oscillator clock provided to
283          * the IP, so adjust based on _DSD properties reported in DSDT
284          * tables. The values reported are based on either 24MHz
285          * (CNL/CML) or 38.4 MHz (ICL/TGL+).
286          */
287         if (prop->mclk_freq % 6000000)
288                 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
289         else
290                 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
291
292         if (!*shim_mask) {
293                 dev_dbg(sdw->cdns.dev, "%s: powering up all links\n", __func__);
294
295                 /* we first need to program the SyncPRD/CPU registers */
296                 dev_dbg(sdw->cdns.dev,
297                         "%s: first link up, programming SYNCPRD\n", __func__);
298
299                 /* set SyncPRD period */
300                 sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
301                 u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
302
303                 /* Set SyncCPU bit */
304                 sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
305                 intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
306
307                 /* Link power up sequence */
308                 link_control = intel_readl(shim, SDW_SHIM_LCTL);
309
310                 /* only power-up enabled links */
311                 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
312                 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
313
314                 link_control |=  spa_mask;
315
316                 ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
317                 if (ret < 0) {
318                         dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
319                         goto out;
320                 }
321
322                 /* SyncCPU will change once link is active */
323                 ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
324                                      SDW_SHIM_SYNC_SYNCCPU, 0);
325                 if (ret < 0) {
326                         dev_err(sdw->cdns.dev,
327                                 "Failed to set SHIM_SYNC: %d\n", ret);
328                         goto out;
329                 }
330         }
331
332         *shim_mask |= BIT(link_id);
333
334         sdw->cdns.link_up = true;
335 out:
336         mutex_unlock(sdw->link_res->shim_lock);
337
338         return ret;
339 }
340
341 /* this needs to be called with shim_lock */
342 static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
343 {
344         void __iomem *shim = sdw->link_res->shim;
345         unsigned int link_id = sdw->instance;
346         u16 ioctl;
347
348         /* Switch to MIP from Glue logic */
349         ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
350
351         ioctl &= ~(SDW_SHIM_IOCTL_DOE);
352         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
353         usleep_range(10, 15);
354
355         ioctl &= ~(SDW_SHIM_IOCTL_DO);
356         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
357         usleep_range(10, 15);
358
359         ioctl |= (SDW_SHIM_IOCTL_MIF);
360         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
361         usleep_range(10, 15);
362
363         ioctl &= ~(SDW_SHIM_IOCTL_BKE);
364         ioctl &= ~(SDW_SHIM_IOCTL_COE);
365         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
366         usleep_range(10, 15);
367
368         /* at this point Master IP has full control of the I/Os */
369 }
370
371 /* this needs to be called with shim_lock */
372 static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
373 {
374         unsigned int link_id = sdw->instance;
375         void __iomem *shim = sdw->link_res->shim;
376         u16 ioctl;
377
378         /* Glue logic */
379         ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
380         ioctl |= SDW_SHIM_IOCTL_BKE;
381         ioctl |= SDW_SHIM_IOCTL_COE;
382         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
383         usleep_range(10, 15);
384
385         ioctl &= ~(SDW_SHIM_IOCTL_MIF);
386         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
387         usleep_range(10, 15);
388
389         /* at this point Integration Glue has full control of the I/Os */
390 }
391
392 static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
393 {
394         void __iomem *shim = sdw->link_res->shim;
395         unsigned int link_id = sdw->instance;
396         int ret = 0;
397         u16 ioctl = 0, act = 0;
398
399         mutex_lock(sdw->link_res->shim_lock);
400
401         /* Initialize Shim */
402         ioctl |= SDW_SHIM_IOCTL_BKE;
403         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
404         usleep_range(10, 15);
405
406         ioctl |= SDW_SHIM_IOCTL_WPDD;
407         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
408         usleep_range(10, 15);
409
410         ioctl |= SDW_SHIM_IOCTL_DO;
411         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
412         usleep_range(10, 15);
413
414         ioctl |= SDW_SHIM_IOCTL_DOE;
415         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
416         usleep_range(10, 15);
417
418         intel_shim_glue_to_master_ip(sdw);
419
420         u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
421         act |= SDW_SHIM_CTMCTL_DACTQE;
422         act |= SDW_SHIM_CTMCTL_DODS;
423         intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
424         usleep_range(10, 15);
425
426         mutex_unlock(sdw->link_res->shim_lock);
427
428         return ret;
429 }
430
431 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
432 {
433         void __iomem *shim = sdw->link_res->shim;
434         unsigned int link_id = sdw->instance;
435         u16 wake_en, wake_sts;
436
437         mutex_lock(sdw->link_res->shim_lock);
438         wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
439
440         if (wake_enable) {
441                 /* Enable the wakeup */
442                 wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
443                 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
444         } else {
445                 /* Disable the wake up interrupt */
446                 wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
447                 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
448
449                 /* Clear wake status */
450                 wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
451                 wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
452                 intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
453         }
454         mutex_unlock(sdw->link_res->shim_lock);
455 }
456
457 static int intel_link_power_down(struct sdw_intel *sdw)
458 {
459         u32 link_control, spa_mask, cpa_mask;
460         unsigned int link_id = sdw->instance;
461         void __iomem *shim = sdw->link_res->shim;
462         u32 *shim_mask = sdw->link_res->shim_mask;
463         int ret = 0;
464
465         mutex_lock(sdw->link_res->shim_lock);
466
467         if (!(*shim_mask & BIT(link_id)))
468                 dev_err(sdw->cdns.dev,
469                         "%s: Unbalanced power-up/down calls\n", __func__);
470
471         sdw->cdns.link_up = false;
472
473         intel_shim_master_ip_to_glue(sdw);
474
475         *shim_mask &= ~BIT(link_id);
476
477         if (!*shim_mask) {
478
479                 dev_dbg(sdw->cdns.dev, "%s: powering down all links\n", __func__);
480
481                 /* Link power down sequence */
482                 link_control = intel_readl(shim, SDW_SHIM_LCTL);
483
484                 /* only power-down enabled links */
485                 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
486                 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
487
488                 link_control &=  spa_mask;
489
490                 ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
491                 if (ret < 0) {
492                         dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
493
494                         /*
495                          * we leave the sdw->cdns.link_up flag as false since we've disabled
496                          * the link at this point and cannot handle interrupts any longer.
497                          */
498                 }
499         }
500
501         mutex_unlock(sdw->link_res->shim_lock);
502
503         return ret;
504 }
505
506 static void intel_shim_sync_arm(struct sdw_intel *sdw)
507 {
508         void __iomem *shim = sdw->link_res->shim;
509         u32 sync_reg;
510
511         mutex_lock(sdw->link_res->shim_lock);
512
513         /* update SYNC register */
514         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
515         sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
516         intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
517
518         mutex_unlock(sdw->link_res->shim_lock);
519 }
520
521 static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
522 {
523         void __iomem *shim = sdw->link_res->shim;
524         u32 sync_reg;
525         int ret;
526
527         /* Read SYNC register */
528         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
529
530         /*
531          * Set SyncGO bit to synchronously trigger a bank switch for
532          * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
533          * the Masters.
534          */
535         sync_reg |= SDW_SHIM_SYNC_SYNCGO;
536
537         ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
538                               SDW_SHIM_SYNC_SYNCGO);
539
540         if (ret < 0)
541                 dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
542
543         return ret;
544 }
545
546 static int intel_shim_sync_go(struct sdw_intel *sdw)
547 {
548         int ret;
549
550         mutex_lock(sdw->link_res->shim_lock);
551
552         ret = intel_shim_sync_go_unlocked(sdw);
553
554         mutex_unlock(sdw->link_res->shim_lock);
555
556         return ret;
557 }
558
559 /*
560  * PDI routines
561  */
562 static void intel_pdi_init(struct sdw_intel *sdw,
563                            struct sdw_cdns_stream_config *config)
564 {
565         void __iomem *shim = sdw->link_res->shim;
566         unsigned int link_id = sdw->instance;
567         int pcm_cap;
568
569         /* PCM Stream Capability */
570         pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
571
572         config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
573         config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
574         config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
575
576         dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
577                 config->pcm_bd, config->pcm_in, config->pcm_out);
578 }
579
580 static int
581 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
582 {
583         void __iomem *shim = sdw->link_res->shim;
584         unsigned int link_id = sdw->instance;
585         int count;
586
587         count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
588
589         /*
590          * WORKAROUND: on all existing Intel controllers, pdi
591          * number 2 reports channel count as 1 even though it
592          * supports 8 channels. Performing hardcoding for pdi
593          * number 2.
594          */
595         if (pdi_num == 2)
596                 count = 7;
597
598         /* zero based values for channel count in register */
599         count++;
600
601         return count;
602 }
603
604 static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
605                                    struct sdw_cdns_pdi *pdi,
606                                    unsigned int num_pdi,
607                                    unsigned int *num_ch)
608 {
609         int i, ch_count = 0;
610
611         for (i = 0; i < num_pdi; i++) {
612                 pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
613                 ch_count += pdi->ch_count;
614                 pdi++;
615         }
616
617         *num_ch = ch_count;
618         return 0;
619 }
620
621 static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
622                                       struct sdw_cdns_streams *stream)
623 {
624         intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
625                                 &stream->num_ch_bd);
626
627         intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
628                                 &stream->num_ch_in);
629
630         intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
631                                 &stream->num_ch_out);
632
633         return 0;
634 }
635
636 static int intel_pdi_ch_update(struct sdw_intel *sdw)
637 {
638         intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
639
640         return 0;
641 }
642
643 static void
644 intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
645 {
646         void __iomem *shim = sdw->link_res->shim;
647         unsigned int link_id = sdw->instance;
648         int pdi_conf = 0;
649
650         /* the Bulk and PCM streams are not contiguous */
651         pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
652         if (pdi->num >= 2)
653                 pdi->intel_alh_id += 2;
654
655         /*
656          * Program stream parameters to stream SHIM register
657          * This is applicable for PCM stream only.
658          */
659         if (pdi->type != SDW_STREAM_PCM)
660                 return;
661
662         if (pdi->dir == SDW_DATA_DIR_RX)
663                 pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
664         else
665                 pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
666
667         u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
668         u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
669         u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
670
671         intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
672 }
673
674 static void
675 intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
676 {
677         void __iomem *alh = sdw->link_res->alh;
678         unsigned int link_id = sdw->instance;
679         unsigned int conf;
680
681         /* the Bulk and PCM streams are not contiguous */
682         pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
683         if (pdi->num >= 2)
684                 pdi->intel_alh_id += 2;
685
686         /* Program Stream config ALH register */
687         conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
688
689         u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
690         u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
691
692         intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
693 }
694
695 static int intel_params_stream(struct sdw_intel *sdw,
696                                int stream,
697                                struct snd_soc_dai *dai,
698                                struct snd_pcm_hw_params *hw_params,
699                                int link_id, int alh_stream_id)
700 {
701         struct sdw_intel_link_res *res = sdw->link_res;
702         struct sdw_intel_stream_params_data params_data;
703
704         params_data.stream = stream; /* direction */
705         params_data.dai = dai;
706         params_data.hw_params = hw_params;
707         params_data.link_id = link_id;
708         params_data.alh_stream_id = alh_stream_id;
709
710         if (res->ops && res->ops->params_stream && res->dev)
711                 return res->ops->params_stream(res->dev,
712                                                &params_data);
713         return -EIO;
714 }
715
716 static int intel_free_stream(struct sdw_intel *sdw,
717                              int stream,
718                              struct snd_soc_dai *dai,
719                              int link_id)
720 {
721         struct sdw_intel_link_res *res = sdw->link_res;
722         struct sdw_intel_stream_free_data free_data;
723
724         free_data.stream = stream; /* direction */
725         free_data.dai = dai;
726         free_data.link_id = link_id;
727
728         if (res->ops && res->ops->free_stream && res->dev)
729                 return res->ops->free_stream(res->dev,
730                                              &free_data);
731
732         return 0;
733 }
734
735 /*
736  * bank switch routines
737  */
738
739 static int intel_pre_bank_switch(struct sdw_bus *bus)
740 {
741         struct sdw_cdns *cdns = bus_to_cdns(bus);
742         struct sdw_intel *sdw = cdns_to_intel(cdns);
743
744         /* Write to register only for multi-link */
745         if (!bus->multi_link)
746                 return 0;
747
748         intel_shim_sync_arm(sdw);
749
750         return 0;
751 }
752
753 static int intel_post_bank_switch(struct sdw_bus *bus)
754 {
755         struct sdw_cdns *cdns = bus_to_cdns(bus);
756         struct sdw_intel *sdw = cdns_to_intel(cdns);
757         void __iomem *shim = sdw->link_res->shim;
758         int sync_reg, ret;
759
760         /* Write to register only for multi-link */
761         if (!bus->multi_link)
762                 return 0;
763
764         mutex_lock(sdw->link_res->shim_lock);
765
766         /* Read SYNC register */
767         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
768
769         /*
770          * post_bank_switch() ops is called from the bus in loop for
771          * all the Masters in the steam with the expectation that
772          * we trigger the bankswitch for the only first Master in the list
773          * and do nothing for the other Masters
774          *
775          * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
776          */
777         if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
778                 ret = 0;
779                 goto unlock;
780         }
781
782         ret = intel_shim_sync_go_unlocked(sdw);
783 unlock:
784         mutex_unlock(sdw->link_res->shim_lock);
785
786         if (ret < 0)
787                 dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
788
789         return ret;
790 }
791
792 /*
793  * DAI routines
794  */
795
796 static int intel_startup(struct snd_pcm_substream *substream,
797                          struct snd_soc_dai *dai)
798 {
799         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
800         int ret;
801
802         ret = pm_runtime_resume_and_get(cdns->dev);
803         if (ret < 0 && ret != -EACCES) {
804                 dev_err_ratelimited(cdns->dev,
805                                     "pm_runtime_resume_and_get failed in %s, ret %d\n",
806                                     __func__, ret);
807                 return ret;
808         }
809         return 0;
810 }
811
812 static int intel_hw_params(struct snd_pcm_substream *substream,
813                            struct snd_pcm_hw_params *params,
814                            struct snd_soc_dai *dai)
815 {
816         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
817         struct sdw_intel *sdw = cdns_to_intel(cdns);
818         struct sdw_cdns_dma_data *dma;
819         struct sdw_cdns_pdi *pdi;
820         struct sdw_stream_config sconfig;
821         struct sdw_port_config *pconfig;
822         int ch, dir;
823         int ret;
824
825         dma = snd_soc_dai_get_dma_data(dai, substream);
826         if (!dma)
827                 return -EIO;
828
829         ch = params_channels(params);
830         if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
831                 dir = SDW_DATA_DIR_RX;
832         else
833                 dir = SDW_DATA_DIR_TX;
834
835         pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
836
837         if (!pdi) {
838                 ret = -EINVAL;
839                 goto error;
840         }
841
842         /* do run-time configurations for SHIM, ALH and PDI/PORT */
843         intel_pdi_shim_configure(sdw, pdi);
844         intel_pdi_alh_configure(sdw, pdi);
845         sdw_cdns_config_stream(cdns, ch, dir, pdi);
846
847         /* store pdi and hw_params, may be needed in prepare step */
848         dma->paused = false;
849         dma->suspended = false;
850         dma->pdi = pdi;
851         dma->hw_params = params;
852
853         /* Inform DSP about PDI stream number */
854         ret = intel_params_stream(sdw, substream->stream, dai, params,
855                                   sdw->instance,
856                                   pdi->intel_alh_id);
857         if (ret)
858                 goto error;
859
860         sconfig.direction = dir;
861         sconfig.ch_count = ch;
862         sconfig.frame_rate = params_rate(params);
863         sconfig.type = dma->stream_type;
864
865         sconfig.bps = snd_pcm_format_width(params_format(params));
866
867         /* Port configuration */
868         pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
869         if (!pconfig) {
870                 ret =  -ENOMEM;
871                 goto error;
872         }
873
874         pconfig->num = pdi->num;
875         pconfig->ch_mask = (1 << ch) - 1;
876
877         ret = sdw_stream_add_master(&cdns->bus, &sconfig,
878                                     pconfig, 1, dma->stream);
879         if (ret)
880                 dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
881
882         kfree(pconfig);
883 error:
884         return ret;
885 }
886
887 static int intel_prepare(struct snd_pcm_substream *substream,
888                          struct snd_soc_dai *dai)
889 {
890         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
891         struct sdw_intel *sdw = cdns_to_intel(cdns);
892         struct sdw_cdns_dma_data *dma;
893         int ch, dir;
894         int ret = 0;
895
896         dma = snd_soc_dai_get_dma_data(dai, substream);
897         if (!dma) {
898                 dev_err(dai->dev, "failed to get dma data in %s\n",
899                         __func__);
900                 return -EIO;
901         }
902
903         if (dma->suspended) {
904                 dma->suspended = false;
905
906                 /*
907                  * .prepare() is called after system resume, where we
908                  * need to reinitialize the SHIM/ALH/Cadence IP.
909                  * .prepare() is also called to deal with underflows,
910                  * but in those cases we cannot touch ALH/SHIM
911                  * registers
912                  */
913
914                 /* configure stream */
915                 ch = params_channels(dma->hw_params);
916                 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
917                         dir = SDW_DATA_DIR_RX;
918                 else
919                         dir = SDW_DATA_DIR_TX;
920
921                 intel_pdi_shim_configure(sdw, dma->pdi);
922                 intel_pdi_alh_configure(sdw, dma->pdi);
923                 sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
924
925                 /* Inform DSP about PDI stream number */
926                 ret = intel_params_stream(sdw, substream->stream, dai,
927                                           dma->hw_params,
928                                           sdw->instance,
929                                           dma->pdi->intel_alh_id);
930         }
931
932         return ret;
933 }
934
935 static int
936 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
937 {
938         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
939         struct sdw_intel *sdw = cdns_to_intel(cdns);
940         struct sdw_cdns_dma_data *dma;
941         int ret;
942
943         dma = snd_soc_dai_get_dma_data(dai, substream);
944         if (!dma)
945                 return -EIO;
946
947         /*
948          * The sdw stream state will transition to RELEASED when stream->
949          * master_list is empty. So the stream state will transition to
950          * DEPREPARED for the first cpu-dai and to RELEASED for the last
951          * cpu-dai.
952          */
953         ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
954         if (ret < 0) {
955                 dev_err(dai->dev, "remove master from stream %s failed: %d\n",
956                         dma->stream->name, ret);
957                 return ret;
958         }
959
960         ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
961         if (ret < 0) {
962                 dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
963                 return ret;
964         }
965
966         dma->hw_params = NULL;
967         dma->pdi = NULL;
968
969         return 0;
970 }
971
972 static void intel_shutdown(struct snd_pcm_substream *substream,
973                            struct snd_soc_dai *dai)
974 {
975         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
976
977         pm_runtime_mark_last_busy(cdns->dev);
978         pm_runtime_put_autosuspend(cdns->dev);
979 }
980
981 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
982                                     void *stream, int direction)
983 {
984         return cdns_set_sdw_stream(dai, stream, direction);
985 }
986
987 static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
988                                   int direction)
989 {
990         struct sdw_cdns_dma_data *dma;
991
992         if (direction == SNDRV_PCM_STREAM_PLAYBACK)
993                 dma = dai->playback_dma_data;
994         else
995                 dma = dai->capture_dma_data;
996
997         if (!dma)
998                 return ERR_PTR(-EINVAL);
999
1000         return dma->stream;
1001 }
1002
1003 static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
1004 {
1005         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1006         struct sdw_intel *sdw = cdns_to_intel(cdns);
1007         struct sdw_intel_link_res *res = sdw->link_res;
1008         struct sdw_cdns_dma_data *dma;
1009         int ret = 0;
1010
1011         /*
1012          * The .trigger callback is used to send required IPC to audio
1013          * firmware. The .free_stream callback will still be called
1014          * by intel_free_stream() in the TRIGGER_SUSPEND case.
1015          */
1016         if (res->ops && res->ops->trigger)
1017                 res->ops->trigger(dai, cmd, substream->stream);
1018
1019         dma = snd_soc_dai_get_dma_data(dai, substream);
1020         if (!dma) {
1021                 dev_err(dai->dev, "failed to get dma data in %s\n",
1022                         __func__);
1023                 return -EIO;
1024         }
1025
1026         switch (cmd) {
1027         case SNDRV_PCM_TRIGGER_SUSPEND:
1028
1029                 /*
1030                  * The .prepare callback is used to deal with xruns and resume operations.
1031                  * In the case of xruns, the DMAs and SHIM registers cannot be touched,
1032                  * but for resume operations the DMAs and SHIM registers need to be initialized.
1033                  * the .trigger callback is used to track the suspend case only.
1034                  */
1035
1036                 dma->suspended = true;
1037
1038                 ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
1039                 break;
1040
1041         case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
1042                 dma->paused = true;
1043                 break;
1044         case SNDRV_PCM_TRIGGER_STOP:
1045         case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1046                 dma->paused = false;
1047                 break;
1048         default:
1049                 break;
1050         }
1051
1052         return ret;
1053 }
1054
1055 static int intel_component_probe(struct snd_soc_component *component)
1056 {
1057         int ret;
1058
1059         /*
1060          * make sure the device is pm_runtime_active before initiating
1061          * bus transactions during the card registration.
1062          * We use pm_runtime_resume() here, without taking a reference
1063          * and releasing it immediately.
1064          */
1065         ret = pm_runtime_resume(component->dev);
1066         if (ret < 0 && ret != -EACCES)
1067                 return ret;
1068
1069         return 0;
1070 }
1071
1072 static int intel_component_dais_suspend(struct snd_soc_component *component)
1073 {
1074         struct snd_soc_dai *dai;
1075
1076         /*
1077          * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
1078          * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
1079          * Since the component suspend is called last, we can trap this corner case
1080          * and force the DAIs to release their resources.
1081          */
1082         for_each_component_dais(component, dai) {
1083                 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1084                 struct sdw_intel *sdw = cdns_to_intel(cdns);
1085                 struct sdw_cdns_dma_data *dma;
1086                 int stream;
1087                 int ret;
1088
1089                 dma = dai->playback_dma_data;
1090                 stream = SNDRV_PCM_STREAM_PLAYBACK;
1091                 if (!dma) {
1092                         dma = dai->capture_dma_data;
1093                         stream = SNDRV_PCM_STREAM_CAPTURE;
1094                 }
1095
1096                 if (!dma)
1097                         continue;
1098
1099                 if (dma->suspended)
1100                         continue;
1101
1102                 if (dma->paused) {
1103                         dma->suspended = true;
1104
1105                         ret = intel_free_stream(sdw, stream, dai, sdw->instance);
1106                         if (ret < 0)
1107                                 return ret;
1108                 }
1109         }
1110
1111         return 0;
1112 }
1113
1114 static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
1115         .startup = intel_startup,
1116         .hw_params = intel_hw_params,
1117         .prepare = intel_prepare,
1118         .hw_free = intel_hw_free,
1119         .trigger = intel_trigger,
1120         .shutdown = intel_shutdown,
1121         .set_stream = intel_pcm_set_sdw_stream,
1122         .get_stream = intel_get_sdw_stream,
1123 };
1124
1125 static const struct snd_soc_component_driver dai_component = {
1126         .name                   = "soundwire",
1127         .probe                  = intel_component_probe,
1128         .suspend                = intel_component_dais_suspend,
1129         .legacy_dai_naming      = 1,
1130 };
1131
1132 static int intel_create_dai(struct sdw_cdns *cdns,
1133                             struct snd_soc_dai_driver *dais,
1134                             enum intel_pdi_type type,
1135                             u32 num, u32 off, u32 max_ch)
1136 {
1137         int i;
1138
1139         if (num == 0)
1140                 return 0;
1141
1142          /* TODO: Read supported rates/formats from hardware */
1143         for (i = off; i < (off + num); i++) {
1144                 dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1145                                               "SDW%d Pin%d",
1146                                               cdns->instance, i);
1147                 if (!dais[i].name)
1148                         return -ENOMEM;
1149
1150                 if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1151                         dais[i].playback.channels_min = 1;
1152                         dais[i].playback.channels_max = max_ch;
1153                         dais[i].playback.rates = SNDRV_PCM_RATE_48000;
1154                         dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
1155                 }
1156
1157                 if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1158                         dais[i].capture.channels_min = 1;
1159                         dais[i].capture.channels_max = max_ch;
1160                         dais[i].capture.rates = SNDRV_PCM_RATE_48000;
1161                         dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
1162                 }
1163
1164                 dais[i].ops = &intel_pcm_dai_ops;
1165         }
1166
1167         return 0;
1168 }
1169
1170 static int intel_register_dai(struct sdw_intel *sdw)
1171 {
1172         struct sdw_cdns *cdns = &sdw->cdns;
1173         struct sdw_cdns_streams *stream;
1174         struct snd_soc_dai_driver *dais;
1175         int num_dai, ret, off = 0;
1176
1177         /* DAIs are created based on total number of PDIs supported */
1178         num_dai = cdns->pcm.num_pdi;
1179
1180         dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1181         if (!dais)
1182                 return -ENOMEM;
1183
1184         /* Create PCM DAIs */
1185         stream = &cdns->pcm;
1186
1187         ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1188                                off, stream->num_ch_in);
1189         if (ret)
1190                 return ret;
1191
1192         off += cdns->pcm.num_in;
1193         ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1194                                off, stream->num_ch_out);
1195         if (ret)
1196                 return ret;
1197
1198         off += cdns->pcm.num_out;
1199         ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1200                                off, stream->num_ch_bd);
1201         if (ret)
1202                 return ret;
1203
1204         return snd_soc_register_component(cdns->dev, &dai_component,
1205                                           dais, num_dai);
1206 }
1207
1208 static int sdw_master_read_intel_prop(struct sdw_bus *bus)
1209 {
1210         struct sdw_master_prop *prop = &bus->prop;
1211         struct fwnode_handle *link;
1212         char name[32];
1213         u32 quirk_mask;
1214
1215         /* Find master handle */
1216         snprintf(name, sizeof(name),
1217                  "mipi-sdw-link-%d-subproperties", bus->link_id);
1218
1219         link = device_get_named_child_node(bus->dev, name);
1220         if (!link) {
1221                 dev_err(bus->dev, "Master node %s not found\n", name);
1222                 return -EIO;
1223         }
1224
1225         fwnode_property_read_u32(link,
1226                                  "intel-sdw-ip-clock",
1227                                  &prop->mclk_freq);
1228
1229         /* the values reported by BIOS are the 2x clock, not the bus clock */
1230         prop->mclk_freq /= 2;
1231
1232         fwnode_property_read_u32(link,
1233                                  "intel-quirk-mask",
1234                                  &quirk_mask);
1235
1236         if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
1237                 prop->hw_disabled = true;
1238
1239         prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
1240                 SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
1241
1242         return 0;
1243 }
1244
1245 static int intel_prop_read(struct sdw_bus *bus)
1246 {
1247         /* Initialize with default handler to read all DisCo properties */
1248         sdw_master_read_prop(bus);
1249
1250         /* read Intel-specific properties */
1251         sdw_master_read_intel_prop(bus);
1252
1253         return 0;
1254 }
1255
1256 static struct sdw_master_ops sdw_intel_ops = {
1257         .read_prop = sdw_master_read_prop,
1258         .override_adr = sdw_dmi_override_adr,
1259         .xfer_msg = cdns_xfer_msg,
1260         .xfer_msg_defer = cdns_xfer_msg_defer,
1261         .reset_page_addr = cdns_reset_page_addr,
1262         .set_bus_conf = cdns_bus_conf,
1263         .pre_bank_switch = intel_pre_bank_switch,
1264         .post_bank_switch = intel_post_bank_switch,
1265 };
1266
1267 static int intel_init(struct sdw_intel *sdw)
1268 {
1269         bool clock_stop;
1270
1271         /* Initialize shim and controller */
1272         intel_link_power_up(sdw);
1273
1274         clock_stop = sdw_cdns_is_clock_stop(&sdw->cdns);
1275
1276         intel_shim_init(sdw, clock_stop);
1277
1278         return 0;
1279 }
1280
1281 /*
1282  * probe and init (aux_dev_id argument is required by function prototype but not used)
1283  */
1284 static int intel_link_probe(struct auxiliary_device *auxdev,
1285                             const struct auxiliary_device_id *aux_dev_id)
1286
1287 {
1288         struct device *dev = &auxdev->dev;
1289         struct sdw_intel_link_dev *ldev = auxiliary_dev_to_sdw_intel_link_dev(auxdev);
1290         struct sdw_intel *sdw;
1291         struct sdw_cdns *cdns;
1292         struct sdw_bus *bus;
1293         int ret;
1294
1295         sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL);
1296         if (!sdw)
1297                 return -ENOMEM;
1298
1299         cdns = &sdw->cdns;
1300         bus = &cdns->bus;
1301
1302         sdw->instance = auxdev->id;
1303         sdw->link_res = &ldev->link_res;
1304         cdns->dev = dev;
1305         cdns->registers = sdw->link_res->registers;
1306         cdns->instance = sdw->instance;
1307         cdns->msg_count = 0;
1308
1309         bus->link_id = auxdev->id;
1310
1311         sdw_cdns_probe(cdns);
1312
1313         /* Set property read ops */
1314         sdw_intel_ops.read_prop = intel_prop_read;
1315         bus->ops = &sdw_intel_ops;
1316
1317         /* set driver data, accessed by snd_soc_dai_get_drvdata() */
1318         auxiliary_set_drvdata(auxdev, cdns);
1319
1320         /* use generic bandwidth allocation algorithm */
1321         sdw->cdns.bus.compute_params = sdw_compute_params;
1322
1323         /* avoid resuming from pm_runtime suspend if it's not required */
1324         dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
1325
1326         ret = sdw_bus_master_add(bus, dev, dev->fwnode);
1327         if (ret) {
1328                 dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
1329                 return ret;
1330         }
1331
1332         if (bus->prop.hw_disabled)
1333                 dev_info(dev,
1334                          "SoundWire master %d is disabled, will be ignored\n",
1335                          bus->link_id);
1336         /*
1337          * Ignore BIOS err_threshold, it's a really bad idea when dealing
1338          * with multiple hardware synchronized links
1339          */
1340         bus->prop.err_threshold = 0;
1341
1342         return 0;
1343 }
1344
1345 int intel_link_startup(struct auxiliary_device *auxdev)
1346 {
1347         struct sdw_cdns_stream_config config;
1348         struct device *dev = &auxdev->dev;
1349         struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
1350         struct sdw_intel *sdw = cdns_to_intel(cdns);
1351         struct sdw_bus *bus = &cdns->bus;
1352         int link_flags;
1353         bool multi_link;
1354         u32 clock_stop_quirks;
1355         int ret;
1356
1357         if (bus->prop.hw_disabled) {
1358                 dev_info(dev,
1359                          "SoundWire master %d is disabled, ignoring\n",
1360                          sdw->instance);
1361                 return 0;
1362         }
1363
1364         link_flags = md_flags >> (bus->link_id * 8);
1365         multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1366         if (!multi_link) {
1367                 dev_dbg(dev, "Multi-link is disabled\n");
1368                 bus->multi_link = false;
1369         } else {
1370                 /*
1371                  * hardware-based synchronization is required regardless
1372                  * of the number of segments used by a stream: SSP-based
1373                  * synchronization is gated by gsync when the multi-master
1374                  * mode is set.
1375                  */
1376                 bus->multi_link = true;
1377                 bus->hw_sync_min_links = 1;
1378         }
1379
1380         /* Initialize shim, controller */
1381         ret = intel_init(sdw);
1382         if (ret)
1383                 goto err_init;
1384
1385         /* Read the PDI config and initialize cadence PDI */
1386         intel_pdi_init(sdw, &config);
1387         ret = sdw_cdns_pdi_init(cdns, config);
1388         if (ret)
1389                 goto err_init;
1390
1391         intel_pdi_ch_update(sdw);
1392
1393         ret = sdw_cdns_enable_interrupt(cdns, true);
1394         if (ret < 0) {
1395                 dev_err(dev, "cannot enable interrupts\n");
1396                 goto err_init;
1397         }
1398
1399         /*
1400          * follow recommended programming flows to avoid timeouts when
1401          * gsync is enabled
1402          */
1403         if (multi_link)
1404                 intel_shim_sync_arm(sdw);
1405
1406         ret = sdw_cdns_init(cdns);
1407         if (ret < 0) {
1408                 dev_err(dev, "unable to initialize Cadence IP\n");
1409                 goto err_interrupt;
1410         }
1411
1412         ret = sdw_cdns_exit_reset(cdns);
1413         if (ret < 0) {
1414                 dev_err(dev, "unable to exit bus reset sequence\n");
1415                 goto err_interrupt;
1416         }
1417
1418         if (multi_link) {
1419                 ret = intel_shim_sync_go(sdw);
1420                 if (ret < 0) {
1421                         dev_err(dev, "sync go failed: %d\n", ret);
1422                         goto err_interrupt;
1423                 }
1424         }
1425         sdw_cdns_check_self_clearing_bits(cdns, __func__,
1426                                           true, INTEL_MASTER_RESET_ITERATIONS);
1427
1428         /* Register DAIs */
1429         ret = intel_register_dai(sdw);
1430         if (ret) {
1431                 dev_err(dev, "DAI registration failed: %d\n", ret);
1432                 snd_soc_unregister_component(dev);
1433                 goto err_interrupt;
1434         }
1435
1436         intel_debugfs_init(sdw);
1437
1438         /* Enable runtime PM */
1439         if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
1440                 pm_runtime_set_autosuspend_delay(dev,
1441                                                  INTEL_MASTER_SUSPEND_DELAY_MS);
1442                 pm_runtime_use_autosuspend(dev);
1443                 pm_runtime_mark_last_busy(dev);
1444
1445                 pm_runtime_set_active(dev);
1446                 pm_runtime_enable(dev);
1447         }
1448
1449         clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1450         if (clock_stop_quirks & SDW_INTEL_CLK_STOP_NOT_ALLOWED) {
1451                 /*
1452                  * To keep the clock running we need to prevent
1453                  * pm_runtime suspend from happening by increasing the
1454                  * reference count.
1455                  * This quirk is specified by the parent PCI device in
1456                  * case of specific latency requirements. It will have
1457                  * no effect if pm_runtime is disabled by the user via
1458                  * a module parameter for testing purposes.
1459                  */
1460                 pm_runtime_get_noresume(dev);
1461         }
1462
1463         /*
1464          * The runtime PM status of Slave devices is "Unsupported"
1465          * until they report as ATTACHED. If they don't, e.g. because
1466          * there are no Slave devices populated or if the power-on is
1467          * delayed or dependent on a power switch, the Master will
1468          * remain active and prevent its parent from suspending.
1469          *
1470          * Conditionally force the pm_runtime core to re-evaluate the
1471          * Master status in the absence of any Slave activity. A quirk
1472          * is provided to e.g. deal with Slaves that may be powered on
1473          * with a delay. A more complete solution would require the
1474          * definition of Master properties.
1475          */
1476         if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1477                 pm_runtime_idle(dev);
1478
1479         sdw->startup_done = true;
1480         return 0;
1481
1482 err_interrupt:
1483         sdw_cdns_enable_interrupt(cdns, false);
1484 err_init:
1485         return ret;
1486 }
1487
1488 static void intel_link_remove(struct auxiliary_device *auxdev)
1489 {
1490         struct device *dev = &auxdev->dev;
1491         struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
1492         struct sdw_intel *sdw = cdns_to_intel(cdns);
1493         struct sdw_bus *bus = &cdns->bus;
1494
1495         /*
1496          * Since pm_runtime is already disabled, we don't decrease
1497          * the refcount when the clock_stop_quirk is
1498          * SDW_INTEL_CLK_STOP_NOT_ALLOWED
1499          */
1500         if (!bus->prop.hw_disabled) {
1501                 intel_debugfs_exit(sdw);
1502                 sdw_cdns_enable_interrupt(cdns, false);
1503                 snd_soc_unregister_component(dev);
1504         }
1505         sdw_bus_master_delete(bus);
1506 }
1507
1508 int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
1509 {
1510         struct device *dev = &auxdev->dev;
1511         struct sdw_intel *sdw;
1512         struct sdw_bus *bus;
1513         void __iomem *shim;
1514         u16 wake_sts;
1515
1516         sdw = auxiliary_get_drvdata(auxdev);
1517         bus = &sdw->cdns.bus;
1518
1519         if (bus->prop.hw_disabled || !sdw->startup_done) {
1520                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1521                         bus->link_id);
1522                 return 0;
1523         }
1524
1525         shim = sdw->link_res->shim;
1526         wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
1527
1528         if (!(wake_sts & BIT(sdw->instance)))
1529                 return 0;
1530
1531         /* disable WAKEEN interrupt ASAP to prevent interrupt flood */
1532         intel_shim_wake(sdw, false);
1533
1534         /*
1535          * resume the Master, which will generate a bus reset and result in
1536          * Slaves re-attaching and be re-enumerated. The SoundWire physical
1537          * device which generated the wake will trigger an interrupt, which
1538          * will in turn cause the corresponding Linux Slave device to be
1539          * resumed and the Slave codec driver to check the status.
1540          */
1541         pm_request_resume(dev);
1542
1543         return 0;
1544 }
1545
1546 /*
1547  * PM calls
1548  */
1549
1550 static int intel_resume_child_device(struct device *dev, void *data)
1551 {
1552         int ret;
1553         struct sdw_slave *slave = dev_to_sdw_dev(dev);
1554
1555         if (!slave->probed) {
1556                 dev_dbg(dev, "%s: skipping device, no probed driver\n", __func__);
1557                 return 0;
1558         }
1559         if (!slave->dev_num_sticky) {
1560                 dev_dbg(dev, "%s: skipping device, never detected on bus\n", __func__);
1561                 return 0;
1562         }
1563
1564         ret = pm_request_resume(dev);
1565         if (ret < 0)
1566                 dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret);
1567
1568         return ret;
1569 }
1570
1571 static int __maybe_unused intel_pm_prepare(struct device *dev)
1572 {
1573         struct sdw_cdns *cdns = dev_get_drvdata(dev);
1574         struct sdw_intel *sdw = cdns_to_intel(cdns);
1575         struct sdw_bus *bus = &cdns->bus;
1576         u32 clock_stop_quirks;
1577         int ret;
1578
1579         if (bus->prop.hw_disabled || !sdw->startup_done) {
1580                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1581                         bus->link_id);
1582                 return 0;
1583         }
1584
1585         clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1586
1587         if (pm_runtime_suspended(dev) &&
1588             pm_runtime_suspended(dev->parent) &&
1589             ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) ||
1590              !clock_stop_quirks)) {
1591                 /*
1592                  * if we've enabled clock stop, and the parent is suspended, the SHIM registers
1593                  * are not accessible and the shim wake cannot be disabled.
1594                  * The only solution is to resume the entire bus to full power
1595                  */
1596
1597                 /*
1598                  * If any operation in this block fails, we keep going since we don't want
1599                  * to prevent system suspend from happening and errors should be recoverable
1600                  * on resume.
1601                  */
1602
1603                 /*
1604                  * first resume the device for this link. This will also by construction
1605                  * resume the PCI parent device.
1606                  */
1607                 ret = pm_request_resume(dev);
1608                 if (ret < 0) {
1609                         dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret);
1610                         return 0;
1611                 }
1612
1613                 /*
1614                  * Continue resuming the entire bus (parent + child devices) to exit
1615                  * the clock stop mode. If there are no devices connected on this link
1616                  * this is a no-op.
1617                  * The resume to full power could have been implemented with a .prepare
1618                  * step in SoundWire codec drivers. This would however require a lot
1619                  * of code to handle an Intel-specific corner case. It is simpler in
1620                  * practice to add a loop at the link level.
1621                  */
1622                 ret = device_for_each_child(bus->dev, NULL, intel_resume_child_device);
1623
1624                 if (ret < 0)
1625                         dev_err(dev, "%s: intel_resume_child_device failed: %d\n", __func__, ret);
1626         }
1627
1628         return 0;
1629 }
1630
1631 static int __maybe_unused intel_suspend(struct device *dev)
1632 {
1633         struct sdw_cdns *cdns = dev_get_drvdata(dev);
1634         struct sdw_intel *sdw = cdns_to_intel(cdns);
1635         struct sdw_bus *bus = &cdns->bus;
1636         u32 clock_stop_quirks;
1637         int ret;
1638
1639         if (bus->prop.hw_disabled || !sdw->startup_done) {
1640                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1641                         bus->link_id);
1642                 return 0;
1643         }
1644
1645         if (pm_runtime_suspended(dev)) {
1646                 dev_dbg(dev, "%s: pm_runtime status: suspended\n", __func__);
1647
1648                 clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1649
1650                 if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) ||
1651                     !clock_stop_quirks) {
1652
1653                         if (pm_runtime_suspended(dev->parent)) {
1654                                 /*
1655                                  * paranoia check: this should not happen with the .prepare
1656                                  * resume to full power
1657                                  */
1658                                 dev_err(dev, "%s: invalid config: parent is suspended\n", __func__);
1659                         } else {
1660                                 intel_shim_wake(sdw, false);
1661                         }
1662                 }
1663
1664                 return 0;
1665         }
1666
1667         ret = sdw_cdns_enable_interrupt(cdns, false);
1668         if (ret < 0) {
1669                 dev_err(dev, "cannot disable interrupts on suspend\n");
1670                 return ret;
1671         }
1672
1673         ret = intel_link_power_down(sdw);
1674         if (ret) {
1675                 dev_err(dev, "Link power down failed: %d\n", ret);
1676                 return ret;
1677         }
1678
1679         intel_shim_wake(sdw, false);
1680
1681         return 0;
1682 }
1683
1684 static int __maybe_unused intel_suspend_runtime(struct device *dev)
1685 {
1686         struct sdw_cdns *cdns = dev_get_drvdata(dev);
1687         struct sdw_intel *sdw = cdns_to_intel(cdns);
1688         struct sdw_bus *bus = &cdns->bus;
1689         u32 clock_stop_quirks;
1690         int ret;
1691
1692         if (bus->prop.hw_disabled || !sdw->startup_done) {
1693                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1694                         bus->link_id);
1695                 return 0;
1696         }
1697
1698         clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1699
1700         if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1701
1702                 ret = sdw_cdns_enable_interrupt(cdns, false);
1703                 if (ret < 0) {
1704                         dev_err(dev, "cannot disable interrupts on suspend\n");
1705                         return ret;
1706                 }
1707
1708                 ret = intel_link_power_down(sdw);
1709                 if (ret) {
1710                         dev_err(dev, "Link power down failed: %d\n", ret);
1711                         return ret;
1712                 }
1713
1714                 intel_shim_wake(sdw, false);
1715
1716         } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
1717                    !clock_stop_quirks) {
1718                 bool wake_enable = true;
1719
1720                 ret = sdw_cdns_clock_stop(cdns, true);
1721                 if (ret < 0) {
1722                         dev_err(dev, "cannot enable clock stop on suspend\n");
1723                         wake_enable = false;
1724                 }
1725
1726                 ret = sdw_cdns_enable_interrupt(cdns, false);
1727                 if (ret < 0) {
1728                         dev_err(dev, "cannot disable interrupts on suspend\n");
1729                         return ret;
1730                 }
1731
1732                 ret = intel_link_power_down(sdw);
1733                 if (ret) {
1734                         dev_err(dev, "Link power down failed: %d\n", ret);
1735                         return ret;
1736                 }
1737
1738                 intel_shim_wake(sdw, wake_enable);
1739         } else {
1740                 dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
1741                         __func__, clock_stop_quirks);
1742                 ret = -EINVAL;
1743         }
1744
1745         return ret;
1746 }
1747
1748 static int __maybe_unused intel_resume(struct device *dev)
1749 {
1750         struct sdw_cdns *cdns = dev_get_drvdata(dev);
1751         struct sdw_intel *sdw = cdns_to_intel(cdns);
1752         struct sdw_bus *bus = &cdns->bus;
1753         int link_flags;
1754         bool multi_link;
1755         int ret;
1756
1757         if (bus->prop.hw_disabled || !sdw->startup_done) {
1758                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1759                         bus->link_id);
1760                 return 0;
1761         }
1762
1763         link_flags = md_flags >> (bus->link_id * 8);
1764         multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1765
1766         if (pm_runtime_suspended(dev)) {
1767                 dev_dbg(dev, "%s: pm_runtime status was suspended, forcing active\n", __func__);
1768
1769                 /* follow required sequence from runtime_pm.rst */
1770                 pm_runtime_disable(dev);
1771                 pm_runtime_set_active(dev);
1772                 pm_runtime_mark_last_busy(dev);
1773                 pm_runtime_enable(dev);
1774
1775                 link_flags = md_flags >> (bus->link_id * 8);
1776
1777                 if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1778                         pm_runtime_idle(dev);
1779         }
1780
1781         ret = intel_init(sdw);
1782         if (ret) {
1783                 dev_err(dev, "%s failed: %d\n", __func__, ret);
1784                 return ret;
1785         }
1786
1787         /*
1788          * make sure all Slaves are tagged as UNATTACHED and provide
1789          * reason for reinitialization
1790          */
1791         sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1792
1793         ret = sdw_cdns_enable_interrupt(cdns, true);
1794         if (ret < 0) {
1795                 dev_err(dev, "cannot enable interrupts during resume\n");
1796                 return ret;
1797         }
1798
1799         /*
1800          * follow recommended programming flows to avoid timeouts when
1801          * gsync is enabled
1802          */
1803         if (multi_link)
1804                 intel_shim_sync_arm(sdw);
1805
1806         ret = sdw_cdns_init(&sdw->cdns);
1807         if (ret < 0) {
1808                 dev_err(dev, "unable to initialize Cadence IP during resume\n");
1809                 return ret;
1810         }
1811
1812         ret = sdw_cdns_exit_reset(cdns);
1813         if (ret < 0) {
1814                 dev_err(dev, "unable to exit bus reset sequence during resume\n");
1815                 return ret;
1816         }
1817
1818         if (multi_link) {
1819                 ret = intel_shim_sync_go(sdw);
1820                 if (ret < 0) {
1821                         dev_err(dev, "sync go failed during resume\n");
1822                         return ret;
1823                 }
1824         }
1825         sdw_cdns_check_self_clearing_bits(cdns, __func__,
1826                                           true, INTEL_MASTER_RESET_ITERATIONS);
1827
1828         /*
1829          * after system resume, the pm_runtime suspend() may kick in
1830          * during the enumeration, before any children device force the
1831          * master device to remain active.  Using pm_runtime_get()
1832          * routines is not really possible, since it'd prevent the
1833          * master from suspending.
1834          * A reasonable compromise is to update the pm_runtime
1835          * counters and delay the pm_runtime suspend by several
1836          * seconds, by when all enumeration should be complete.
1837          */
1838         pm_runtime_mark_last_busy(dev);
1839
1840         return ret;
1841 }
1842
1843 static int __maybe_unused intel_resume_runtime(struct device *dev)
1844 {
1845         struct sdw_cdns *cdns = dev_get_drvdata(dev);
1846         struct sdw_intel *sdw = cdns_to_intel(cdns);
1847         struct sdw_bus *bus = &cdns->bus;
1848         u32 clock_stop_quirks;
1849         bool clock_stop0;
1850         int link_flags;
1851         bool multi_link;
1852         int status;
1853         int ret;
1854
1855         if (bus->prop.hw_disabled || !sdw->startup_done) {
1856                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1857                         bus->link_id);
1858                 return 0;
1859         }
1860
1861         /* unconditionally disable WAKEEN interrupt */
1862         intel_shim_wake(sdw, false);
1863
1864         link_flags = md_flags >> (bus->link_id * 8);
1865         multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1866
1867         clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1868
1869         if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1870                 ret = intel_init(sdw);
1871                 if (ret) {
1872                         dev_err(dev, "%s failed: %d\n", __func__, ret);
1873                         return ret;
1874                 }
1875
1876                 /*
1877                  * make sure all Slaves are tagged as UNATTACHED and provide
1878                  * reason for reinitialization
1879                  */
1880                 sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1881
1882                 ret = sdw_cdns_enable_interrupt(cdns, true);
1883                 if (ret < 0) {
1884                         dev_err(dev, "cannot enable interrupts during resume\n");
1885                         return ret;
1886                 }
1887
1888                 /*
1889                  * follow recommended programming flows to avoid
1890                  * timeouts when gsync is enabled
1891                  */
1892                 if (multi_link)
1893                         intel_shim_sync_arm(sdw);
1894
1895                 ret = sdw_cdns_init(&sdw->cdns);
1896                 if (ret < 0) {
1897                         dev_err(dev, "unable to initialize Cadence IP during resume\n");
1898                         return ret;
1899                 }
1900
1901                 ret = sdw_cdns_exit_reset(cdns);
1902                 if (ret < 0) {
1903                         dev_err(dev, "unable to exit bus reset sequence during resume\n");
1904                         return ret;
1905                 }
1906
1907                 if (multi_link) {
1908                         ret = intel_shim_sync_go(sdw);
1909                         if (ret < 0) {
1910                                 dev_err(dev, "sync go failed during resume\n");
1911                                 return ret;
1912                         }
1913                 }
1914                 sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime TEARDOWN",
1915                                                   true, INTEL_MASTER_RESET_ITERATIONS);
1916
1917         } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
1918                 ret = intel_init(sdw);
1919                 if (ret) {
1920                         dev_err(dev, "%s failed: %d\n", __func__, ret);
1921                         return ret;
1922                 }
1923
1924                 /*
1925                  * An exception condition occurs for the CLK_STOP_BUS_RESET
1926                  * case if one or more masters remain active. In this condition,
1927                  * all the masters are powered on for they are in the same power
1928                  * domain. Master can preserve its context for clock stop0, so
1929                  * there is no need to clear slave status and reset bus.
1930                  */
1931                 clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1932
1933                 if (!clock_stop0) {
1934
1935                         /*
1936                          * make sure all Slaves are tagged as UNATTACHED and
1937                          * provide reason for reinitialization
1938                          */
1939
1940                         status = SDW_UNATTACH_REQUEST_MASTER_RESET;
1941                         sdw_clear_slave_status(bus, status);
1942
1943                         ret = sdw_cdns_enable_interrupt(cdns, true);
1944                         if (ret < 0) {
1945                                 dev_err(dev, "cannot enable interrupts during resume\n");
1946                                 return ret;
1947                         }
1948
1949                         /*
1950                          * follow recommended programming flows to avoid
1951                          * timeouts when gsync is enabled
1952                          */
1953                         if (multi_link)
1954                                 intel_shim_sync_arm(sdw);
1955
1956                         /*
1957                          * Re-initialize the IP since it was powered-off
1958                          */
1959                         sdw_cdns_init(&sdw->cdns);
1960
1961                 } else {
1962                         ret = sdw_cdns_enable_interrupt(cdns, true);
1963                         if (ret < 0) {
1964                                 dev_err(dev, "cannot enable interrupts during resume\n");
1965                                 return ret;
1966                         }
1967                 }
1968
1969                 ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
1970                 if (ret < 0) {
1971                         dev_err(dev, "unable to restart clock during resume\n");
1972                         return ret;
1973                 }
1974
1975                 if (!clock_stop0) {
1976                         ret = sdw_cdns_exit_reset(cdns);
1977                         if (ret < 0) {
1978                                 dev_err(dev, "unable to exit bus reset sequence during resume\n");
1979                                 return ret;
1980                         }
1981
1982                         if (multi_link) {
1983                                 ret = intel_shim_sync_go(sdw);
1984                                 if (ret < 0) {
1985                                         dev_err(sdw->cdns.dev, "sync go failed during resume\n");
1986                                         return ret;
1987                                 }
1988                         }
1989                 }
1990                 sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime BUS_RESET",
1991                                                   true, INTEL_MASTER_RESET_ITERATIONS);
1992
1993         } else if (!clock_stop_quirks) {
1994
1995                 clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1996                 if (!clock_stop0)
1997                         dev_err(dev, "%s invalid configuration, clock was not stopped", __func__);
1998
1999                 ret = intel_init(sdw);
2000                 if (ret) {
2001                         dev_err(dev, "%s failed: %d\n", __func__, ret);
2002                         return ret;
2003                 }
2004
2005                 ret = sdw_cdns_enable_interrupt(cdns, true);
2006                 if (ret < 0) {
2007                         dev_err(dev, "cannot enable interrupts during resume\n");
2008                         return ret;
2009                 }
2010
2011                 ret = sdw_cdns_clock_restart(cdns, false);
2012                 if (ret < 0) {
2013                         dev_err(dev, "unable to resume master during resume\n");
2014                         return ret;
2015                 }
2016
2017                 sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
2018                                                   true, INTEL_MASTER_RESET_ITERATIONS);
2019         } else {
2020                 dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
2021                         __func__, clock_stop_quirks);
2022                 ret = -EINVAL;
2023         }
2024
2025         return ret;
2026 }
2027
2028 static const struct dev_pm_ops intel_pm = {
2029         .prepare = intel_pm_prepare,
2030         SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
2031         SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL)
2032 };
2033
2034 static const struct auxiliary_device_id intel_link_id_table[] = {
2035         { .name = "soundwire_intel.link" },
2036         {},
2037 };
2038 MODULE_DEVICE_TABLE(auxiliary, intel_link_id_table);
2039
2040 static struct auxiliary_driver sdw_intel_drv = {
2041         .probe = intel_link_probe,
2042         .remove = intel_link_remove,
2043         .driver = {
2044                 /* auxiliary_driver_register() sets .name to be the modname */
2045                 .pm = &intel_pm,
2046         },
2047         .id_table = intel_link_id_table
2048 };
2049 module_auxiliary_driver(sdw_intel_drv);
2050
2051 MODULE_LICENSE("Dual BSD/GPL");
2052 MODULE_DESCRIPTION("Intel Soundwire Link Driver");