soundwire: sysfs: add slave status and device number before probe
[linux-2.6-microblaze.git] / drivers / dma / idxd / irq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <uapi/linux/idxd.h>
10 #include "../dmaengine.h"
11 #include "idxd.h"
12 #include "registers.h"
13
14 void idxd_device_wqs_clear_state(struct idxd_device *idxd)
15 {
16         int i;
17
18         lockdep_assert_held(&idxd->dev_lock);
19         for (i = 0; i < idxd->max_wqs; i++) {
20                 struct idxd_wq *wq = &idxd->wqs[i];
21
22                 wq->state = IDXD_WQ_DISABLED;
23         }
24 }
25
26 static void idxd_device_reinit(struct work_struct *work)
27 {
28         struct idxd_device *idxd = container_of(work, struct idxd_device, work);
29         struct device *dev = &idxd->pdev->dev;
30         int rc, i;
31
32         idxd_device_reset(idxd);
33         rc = idxd_device_config(idxd);
34         if (rc < 0)
35                 goto out;
36
37         rc = idxd_device_enable(idxd);
38         if (rc < 0)
39                 goto out;
40
41         for (i = 0; i < idxd->max_wqs; i++) {
42                 struct idxd_wq *wq = &idxd->wqs[i];
43
44                 if (wq->state == IDXD_WQ_ENABLED) {
45                         rc = idxd_wq_enable(wq);
46                         if (rc < 0) {
47                                 dev_warn(dev, "Unable to re-enable wq %s\n",
48                                          dev_name(&wq->conf_dev));
49                         }
50                 }
51         }
52
53         return;
54
55  out:
56         idxd_device_wqs_clear_state(idxd);
57 }
58
59 irqreturn_t idxd_irq_handler(int vec, void *data)
60 {
61         struct idxd_irq_entry *irq_entry = data;
62         struct idxd_device *idxd = irq_entry->idxd;
63
64         idxd_mask_msix_vector(idxd, irq_entry->id);
65         return IRQ_WAKE_THREAD;
66 }
67
68 irqreturn_t idxd_misc_thread(int vec, void *data)
69 {
70         struct idxd_irq_entry *irq_entry = data;
71         struct idxd_device *idxd = irq_entry->idxd;
72         struct device *dev = &idxd->pdev->dev;
73         union gensts_reg gensts;
74         u32 cause, val = 0;
75         int i;
76         bool err = false;
77
78         cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
79
80         if (cause & IDXD_INTC_ERR) {
81                 spin_lock_bh(&idxd->dev_lock);
82                 for (i = 0; i < 4; i++)
83                         idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
84                                         IDXD_SWERR_OFFSET + i * sizeof(u64));
85                 iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
86
87                 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
88                         int id = idxd->sw_err.wq_idx;
89                         struct idxd_wq *wq = &idxd->wqs[id];
90
91                         if (wq->type == IDXD_WQT_USER)
92                                 wake_up_interruptible(&wq->idxd_cdev.err_queue);
93                 } else {
94                         int i;
95
96                         for (i = 0; i < idxd->max_wqs; i++) {
97                                 struct idxd_wq *wq = &idxd->wqs[i];
98
99                                 if (wq->type == IDXD_WQT_USER)
100                                         wake_up_interruptible(&wq->idxd_cdev.err_queue);
101                         }
102                 }
103
104                 spin_unlock_bh(&idxd->dev_lock);
105                 val |= IDXD_INTC_ERR;
106
107                 for (i = 0; i < 4; i++)
108                         dev_warn(dev, "err[%d]: %#16.16llx\n",
109                                  i, idxd->sw_err.bits[i]);
110                 err = true;
111         }
112
113         if (cause & IDXD_INTC_CMD) {
114                 val |= IDXD_INTC_CMD;
115                 complete(idxd->cmd_done);
116         }
117
118         if (cause & IDXD_INTC_OCCUPY) {
119                 /* Driver does not utilize occupancy interrupt */
120                 val |= IDXD_INTC_OCCUPY;
121         }
122
123         if (cause & IDXD_INTC_PERFMON_OVFL) {
124                 /*
125                  * Driver does not utilize perfmon counter overflow interrupt
126                  * yet.
127                  */
128                 val |= IDXD_INTC_PERFMON_OVFL;
129         }
130
131         val ^= cause;
132         if (val)
133                 dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
134                               val);
135
136         iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
137         if (!err)
138                 goto out;
139
140         gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
141         if (gensts.state == IDXD_DEVICE_STATE_HALT) {
142                 idxd->state = IDXD_DEV_HALTED;
143                 if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
144                         /*
145                          * If we need a software reset, we will throw the work
146                          * on a system workqueue in order to allow interrupts
147                          * for the device command completions.
148                          */
149                         INIT_WORK(&idxd->work, idxd_device_reinit);
150                         queue_work(idxd->wq, &idxd->work);
151                 } else {
152                         spin_lock_bh(&idxd->dev_lock);
153                         idxd_device_wqs_clear_state(idxd);
154                         dev_err(&idxd->pdev->dev,
155                                 "idxd halted, need %s.\n",
156                                 gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
157                                 "FLR" : "system reset");
158                         spin_unlock_bh(&idxd->dev_lock);
159                 }
160         }
161
162  out:
163         idxd_unmask_msix_vector(idxd, irq_entry->id);
164         return IRQ_HANDLED;
165 }
166
167 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
168                                      int *processed)
169 {
170         struct idxd_desc *desc, *t;
171         struct llist_node *head;
172         int queued = 0;
173
174         *processed = 0;
175         head = llist_del_all(&irq_entry->pending_llist);
176         if (!head)
177                 return 0;
178
179         llist_for_each_entry_safe(desc, t, head, llnode) {
180                 if (desc->completion->status) {
181                         idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
182                         idxd_free_desc(desc->wq, desc);
183                         (*processed)++;
184                 } else {
185                         list_add_tail(&desc->list, &irq_entry->work_list);
186                         queued++;
187                 }
188         }
189
190         return queued;
191 }
192
193 static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
194                                  int *processed)
195 {
196         struct list_head *node, *next;
197         int queued = 0;
198
199         *processed = 0;
200         if (list_empty(&irq_entry->work_list))
201                 return 0;
202
203         list_for_each_safe(node, next, &irq_entry->work_list) {
204                 struct idxd_desc *desc =
205                         container_of(node, struct idxd_desc, list);
206
207                 if (desc->completion->status) {
208                         list_del(&desc->list);
209                         /* process and callback */
210                         idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
211                         idxd_free_desc(desc->wq, desc);
212                         (*processed)++;
213                 } else {
214                         queued++;
215                 }
216         }
217
218         return queued;
219 }
220
221 static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
222 {
223         int rc, processed, total = 0;
224
225         /*
226          * There are two lists we are processing. The pending_llist is where
227          * submmiter adds all the submitted descriptor after sending it to
228          * the workqueue. It's a lockless singly linked list. The work_list
229          * is the common linux double linked list. We are in a scenario of
230          * multiple producers and a single consumer. The producers are all
231          * the kernel submitters of descriptors, and the consumer is the
232          * kernel irq handler thread for the msix vector when using threaded
233          * irq. To work with the restrictions of llist to remain lockless,
234          * we are doing the following steps:
235          * 1. Iterate through the work_list and process any completed
236          *    descriptor. Delete the completed entries during iteration.
237          * 2. llist_del_all() from the pending list.
238          * 3. Iterate through the llist that was deleted from the pending list
239          *    and process the completed entries.
240          * 4. If the entry is still waiting on hardware, list_add_tail() to
241          *    the work_list.
242          * 5. Repeat until no more descriptors.
243          */
244         do {
245                 rc = irq_process_work_list(irq_entry, &processed);
246                 total += processed;
247                 if (rc != 0)
248                         continue;
249
250                 rc = irq_process_pending_llist(irq_entry, &processed);
251                 total += processed;
252         } while (rc != 0);
253
254         return total;
255 }
256
257 irqreturn_t idxd_wq_thread(int irq, void *data)
258 {
259         struct idxd_irq_entry *irq_entry = data;
260         int processed;
261
262         processed = idxd_desc_process(irq_entry);
263         idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
264
265         if (processed == 0)
266                 return IRQ_NONE;
267
268         return IRQ_HANDLED;
269 }