io_uring: use original request task for inflight tracking
[linux-2.6-microblaze.git] / drivers / misc / mei / pci-me.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6
7 #include <linux/module.h>
8 #include <linux/kernel.h>
9 #include <linux/device.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/pci.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16
17 #include <linux/pm_domain.h>
18 #include <linux/pm_runtime.h>
19
20 #include <linux/mei.h>
21
22 #include "mei_dev.h"
23 #include "client.h"
24 #include "hw-me-regs.h"
25 #include "hw-me.h"
26
27 /* mei_pci_tbl - PCI Device ID Table */
28 static const struct pci_device_id mei_me_pci_tbl[] = {
29         {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, MEI_ME_ICH_CFG)},
30         {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, MEI_ME_ICH_CFG)},
31         {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, MEI_ME_ICH_CFG)},
32         {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, MEI_ME_ICH_CFG)},
33         {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, MEI_ME_ICH_CFG)},
34         {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, MEI_ME_ICH_CFG)},
35         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, MEI_ME_ICH_CFG)},
36         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, MEI_ME_ICH_CFG)},
37         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, MEI_ME_ICH_CFG)},
38         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, MEI_ME_ICH_CFG)},
39         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, MEI_ME_ICH_CFG)},
40
41         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, MEI_ME_ICH_CFG)},
42         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, MEI_ME_ICH_CFG)},
43         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, MEI_ME_ICH_CFG)},
44         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, MEI_ME_ICH_CFG)},
45         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, MEI_ME_ICH_CFG)},
46         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, MEI_ME_ICH_CFG)},
47         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, MEI_ME_ICH_CFG)},
48         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, MEI_ME_ICH_CFG)},
49         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, MEI_ME_ICH_CFG)},
50
51         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, MEI_ME_ICH10_CFG)},
52         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, MEI_ME_ICH10_CFG)},
53         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
54         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
55
56         {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
57         {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
58         {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
59         {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
60         {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
61         {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
62         {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
63         {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
64         {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
65         {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
66         {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
67         {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
68         {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
69
70         {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
71         {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
72         {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_3, MEI_ME_PCH8_ITOUCH_CFG)},
73         {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
74         {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
75         {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
76
77         {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
78         {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
79
80         {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
81
82         {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
83
84         {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
85         {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
86         {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_3, MEI_ME_PCH8_CFG)},
87
88         {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
89         {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
90         {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
91         {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_ITOUCH_CFG)},
92
93         {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
94         {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
95         {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
96         {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)},
97         {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
98
99         {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
100         {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)},
101
102         {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
103         {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
104
105         {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
106
107         {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
108         {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
109
110         {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
111
112         {MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)},
113
114         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
115         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
116         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
117         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
118
119         /* required last entry */
120         {0, }
121 };
122
123 MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
124
125 #ifdef CONFIG_PM
126 static inline void mei_me_set_pm_domain(struct mei_device *dev);
127 static inline void mei_me_unset_pm_domain(struct mei_device *dev);
128 #else
129 static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
130 static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
131 #endif /* CONFIG_PM */
132
133 static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val)
134 {
135         struct pci_dev *pdev = to_pci_dev(dev->dev);
136
137         return pci_read_config_dword(pdev, where, val);
138 }
139
140 /**
141  * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
142  *
143  * @pdev: PCI device structure
144  * @cfg: per generation config
145  *
146  * Return: true if ME Interface is valid, false otherwise
147  */
148 static bool mei_me_quirk_probe(struct pci_dev *pdev,
149                                 const struct mei_cfg *cfg)
150 {
151         if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
152                 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
153                 return false;
154         }
155
156         return true;
157 }
158
159 /**
160  * mei_me_probe - Device Initialization Routine
161  *
162  * @pdev: PCI device structure
163  * @ent: entry in kcs_pci_tbl
164  *
165  * Return: 0 on success, <0 on failure.
166  */
167 static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
168 {
169         const struct mei_cfg *cfg;
170         struct mei_device *dev;
171         struct mei_me_hw *hw;
172         unsigned int irqflags;
173         int err;
174
175         cfg = mei_me_get_cfg(ent->driver_data);
176         if (!cfg)
177                 return -ENODEV;
178
179         if (!mei_me_quirk_probe(pdev, cfg))
180                 return -ENODEV;
181
182         /* enable pci dev */
183         err = pcim_enable_device(pdev);
184         if (err) {
185                 dev_err(&pdev->dev, "failed to enable pci device.\n");
186                 goto end;
187         }
188         /* set PCI host mastering  */
189         pci_set_master(pdev);
190         /* pci request regions and mapping IO device memory for mei driver */
191         err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
192         if (err) {
193                 dev_err(&pdev->dev, "failed to get pci regions.\n");
194                 goto end;
195         }
196
197         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
198         if (err) {
199                 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
200                 goto end;
201         }
202
203         /* allocates and initializes the mei dev structure */
204         dev = mei_me_dev_init(&pdev->dev, cfg);
205         if (!dev) {
206                 err = -ENOMEM;
207                 goto end;
208         }
209         hw = to_me_hw(dev);
210         hw->mem_addr = pcim_iomap_table(pdev)[0];
211         hw->read_fws = mei_me_read_fws;
212
213         pci_enable_msi(pdev);
214
215         hw->irq = pdev->irq;
216
217          /* request and enable interrupt */
218         irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
219
220         err = request_threaded_irq(pdev->irq,
221                         mei_me_irq_quick_handler,
222                         mei_me_irq_thread_handler,
223                         irqflags, KBUILD_MODNAME, dev);
224         if (err) {
225                 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
226                        pdev->irq);
227                 goto end;
228         }
229
230         if (mei_start(dev)) {
231                 dev_err(&pdev->dev, "init hw failure.\n");
232                 err = -ENODEV;
233                 goto release_irq;
234         }
235
236         pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
237         pm_runtime_use_autosuspend(&pdev->dev);
238
239         err = mei_register(dev, &pdev->dev);
240         if (err)
241                 goto stop;
242
243         pci_set_drvdata(pdev, dev);
244
245         /*
246          * MEI requires to resume from runtime suspend mode
247          * in order to perform link reset flow upon system suspend.
248          */
249         dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
250
251         /*
252          * ME maps runtime suspend/resume to D0i states,
253          * hence we need to go around native PCI runtime service which
254          * eventually brings the device into D3cold/hot state,
255          * but the mei device cannot wake up from D3 unlike from D0i3.
256          * To get around the PCI device native runtime pm,
257          * ME uses runtime pm domain handlers which take precedence
258          * over the driver's pm handlers.
259          */
260         mei_me_set_pm_domain(dev);
261
262         if (mei_pg_is_enabled(dev)) {
263                 pm_runtime_put_noidle(&pdev->dev);
264                 if (hw->d0i3_supported)
265                         pm_runtime_allow(&pdev->dev);
266         }
267
268         dev_dbg(&pdev->dev, "initialization successful.\n");
269
270         return 0;
271
272 stop:
273         mei_stop(dev);
274 release_irq:
275         mei_cancel_work(dev);
276         mei_disable_interrupts(dev);
277         free_irq(pdev->irq, dev);
278 end:
279         dev_err(&pdev->dev, "initialization failed.\n");
280         return err;
281 }
282
283 /**
284  * mei_me_shutdown - Device Removal Routine
285  *
286  * @pdev: PCI device structure
287  *
288  * mei_me_shutdown is called from the reboot notifier
289  * it's a simplified version of remove so we go down
290  * faster.
291  */
292 static void mei_me_shutdown(struct pci_dev *pdev)
293 {
294         struct mei_device *dev;
295
296         dev = pci_get_drvdata(pdev);
297         if (!dev)
298                 return;
299
300         dev_dbg(&pdev->dev, "shutdown\n");
301         mei_stop(dev);
302
303         mei_me_unset_pm_domain(dev);
304
305         mei_disable_interrupts(dev);
306         free_irq(pdev->irq, dev);
307 }
308
309 /**
310  * mei_me_remove - Device Removal Routine
311  *
312  * @pdev: PCI device structure
313  *
314  * mei_me_remove is called by the PCI subsystem to alert the driver
315  * that it should release a PCI device.
316  */
317 static void mei_me_remove(struct pci_dev *pdev)
318 {
319         struct mei_device *dev;
320
321         dev = pci_get_drvdata(pdev);
322         if (!dev)
323                 return;
324
325         if (mei_pg_is_enabled(dev))
326                 pm_runtime_get_noresume(&pdev->dev);
327
328         dev_dbg(&pdev->dev, "stop\n");
329         mei_stop(dev);
330
331         mei_me_unset_pm_domain(dev);
332
333         mei_disable_interrupts(dev);
334
335         free_irq(pdev->irq, dev);
336
337         mei_deregister(dev);
338 }
339
340 #ifdef CONFIG_PM_SLEEP
341 static int mei_me_pci_suspend(struct device *device)
342 {
343         struct pci_dev *pdev = to_pci_dev(device);
344         struct mei_device *dev = pci_get_drvdata(pdev);
345
346         if (!dev)
347                 return -ENODEV;
348
349         dev_dbg(&pdev->dev, "suspend\n");
350
351         mei_stop(dev);
352
353         mei_disable_interrupts(dev);
354
355         free_irq(pdev->irq, dev);
356         pci_disable_msi(pdev);
357
358         return 0;
359 }
360
361 static int mei_me_pci_resume(struct device *device)
362 {
363         struct pci_dev *pdev = to_pci_dev(device);
364         struct mei_device *dev;
365         unsigned int irqflags;
366         int err;
367
368         dev = pci_get_drvdata(pdev);
369         if (!dev)
370                 return -ENODEV;
371
372         pci_enable_msi(pdev);
373
374         irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
375
376         /* request and enable interrupt */
377         err = request_threaded_irq(pdev->irq,
378                         mei_me_irq_quick_handler,
379                         mei_me_irq_thread_handler,
380                         irqflags, KBUILD_MODNAME, dev);
381
382         if (err) {
383                 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
384                                 pdev->irq);
385                 return err;
386         }
387
388         err = mei_restart(dev);
389         if (err)
390                 return err;
391
392         /* Start timer if stopped in suspend */
393         schedule_delayed_work(&dev->timer_work, HZ);
394
395         return 0;
396 }
397 #endif /* CONFIG_PM_SLEEP */
398
399 #ifdef CONFIG_PM
400 static int mei_me_pm_runtime_idle(struct device *device)
401 {
402         struct mei_device *dev;
403
404         dev_dbg(device, "rpm: me: runtime_idle\n");
405
406         dev = dev_get_drvdata(device);
407         if (!dev)
408                 return -ENODEV;
409         if (mei_write_is_idle(dev))
410                 pm_runtime_autosuspend(device);
411
412         return -EBUSY;
413 }
414
415 static int mei_me_pm_runtime_suspend(struct device *device)
416 {
417         struct mei_device *dev;
418         int ret;
419
420         dev_dbg(device, "rpm: me: runtime suspend\n");
421
422         dev = dev_get_drvdata(device);
423         if (!dev)
424                 return -ENODEV;
425
426         mutex_lock(&dev->device_lock);
427
428         if (mei_write_is_idle(dev))
429                 ret = mei_me_pg_enter_sync(dev);
430         else
431                 ret = -EAGAIN;
432
433         mutex_unlock(&dev->device_lock);
434
435         dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret);
436
437         if (ret && ret != -EAGAIN)
438                 schedule_work(&dev->reset_work);
439
440         return ret;
441 }
442
443 static int mei_me_pm_runtime_resume(struct device *device)
444 {
445         struct mei_device *dev;
446         int ret;
447
448         dev_dbg(device, "rpm: me: runtime resume\n");
449
450         dev = dev_get_drvdata(device);
451         if (!dev)
452                 return -ENODEV;
453
454         mutex_lock(&dev->device_lock);
455
456         ret = mei_me_pg_exit_sync(dev);
457
458         mutex_unlock(&dev->device_lock);
459
460         dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret);
461
462         if (ret)
463                 schedule_work(&dev->reset_work);
464
465         return ret;
466 }
467
468 /**
469  * mei_me_set_pm_domain - fill and set pm domain structure for device
470  *
471  * @dev: mei_device
472  */
473 static inline void mei_me_set_pm_domain(struct mei_device *dev)
474 {
475         struct pci_dev *pdev  = to_pci_dev(dev->dev);
476
477         if (pdev->dev.bus && pdev->dev.bus->pm) {
478                 dev->pg_domain.ops = *pdev->dev.bus->pm;
479
480                 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
481                 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
482                 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
483
484                 dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
485         }
486 }
487
488 /**
489  * mei_me_unset_pm_domain - clean pm domain structure for device
490  *
491  * @dev: mei_device
492  */
493 static inline void mei_me_unset_pm_domain(struct mei_device *dev)
494 {
495         /* stop using pm callbacks if any */
496         dev_pm_domain_set(dev->dev, NULL);
497 }
498
499 static const struct dev_pm_ops mei_me_pm_ops = {
500         SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
501                                 mei_me_pci_resume)
502         SET_RUNTIME_PM_OPS(
503                 mei_me_pm_runtime_suspend,
504                 mei_me_pm_runtime_resume,
505                 mei_me_pm_runtime_idle)
506 };
507
508 #define MEI_ME_PM_OPS   (&mei_me_pm_ops)
509 #else
510 #define MEI_ME_PM_OPS   NULL
511 #endif /* CONFIG_PM */
512 /*
513  *  PCI driver structure
514  */
515 static struct pci_driver mei_me_driver = {
516         .name = KBUILD_MODNAME,
517         .id_table = mei_me_pci_tbl,
518         .probe = mei_me_probe,
519         .remove = mei_me_remove,
520         .shutdown = mei_me_shutdown,
521         .driver.pm = MEI_ME_PM_OPS,
522         .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
523 };
524
525 module_pci_driver(mei_me_driver);
526
527 MODULE_AUTHOR("Intel Corporation");
528 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
529 MODULE_LICENSE("GPL v2");