1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <uapi/linux/idxd.h>
10 #include "../dmaengine.h"
12 #include "registers.h"
16 IRQ_WORK_PROCESS_FAULT,
20 struct work_struct work;
22 struct idxd_device *idxd;
25 static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
26 enum irq_work_type wtype,
27 int *processed, u64 data);
28 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
29 enum irq_work_type wtype,
30 int *processed, u64 data);
32 static void idxd_device_reinit(struct work_struct *work)
34 struct idxd_device *idxd = container_of(work, struct idxd_device, work);
35 struct device *dev = &idxd->pdev->dev;
38 idxd_device_reset(idxd);
39 rc = idxd_device_config(idxd);
43 rc = idxd_device_enable(idxd);
47 for (i = 0; i < idxd->max_wqs; i++) {
48 struct idxd_wq *wq = &idxd->wqs[i];
50 if (wq->state == IDXD_WQ_ENABLED) {
51 rc = idxd_wq_enable(wq);
53 dev_warn(dev, "Unable to re-enable wq %s\n",
54 dev_name(&wq->conf_dev));
62 idxd_device_wqs_clear_state(idxd);
65 static void idxd_device_fault_work(struct work_struct *work)
67 struct idxd_fault *fault = container_of(work, struct idxd_fault, work);
68 struct idxd_irq_entry *ie;
71 int irqcnt = fault->idxd->num_wq_irqs + 1;
73 for (i = 1; i < irqcnt; i++) {
74 ie = &fault->idxd->irq_entries[i];
75 irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT,
76 &processed, fault->addr);
80 irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT,
81 &processed, fault->addr);
89 static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
92 struct idxd_fault *fault;
94 fault = kmalloc(sizeof(*fault), GFP_ATOMIC);
98 fault->addr = fault_addr;
100 INIT_WORK(&fault->work, idxd_device_fault_work);
101 queue_work(idxd->wq, &fault->work);
105 irqreturn_t idxd_irq_handler(int vec, void *data)
107 struct idxd_irq_entry *irq_entry = data;
108 struct idxd_device *idxd = irq_entry->idxd;
110 idxd_mask_msix_vector(idxd, irq_entry->id);
111 return IRQ_WAKE_THREAD;
114 static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
116 struct device *dev = &idxd->pdev->dev;
117 union gensts_reg gensts;
122 if (cause & IDXD_INTC_ERR) {
123 spin_lock_bh(&idxd->dev_lock);
124 for (i = 0; i < 4; i++)
125 idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
126 IDXD_SWERR_OFFSET + i * sizeof(u64));
128 iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
129 idxd->reg_base + IDXD_SWERR_OFFSET);
131 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
132 int id = idxd->sw_err.wq_idx;
133 struct idxd_wq *wq = &idxd->wqs[id];
135 if (wq->type == IDXD_WQT_USER)
136 wake_up_interruptible(&wq->idxd_cdev.err_queue);
140 for (i = 0; i < idxd->max_wqs; i++) {
141 struct idxd_wq *wq = &idxd->wqs[i];
143 if (wq->type == IDXD_WQT_USER)
144 wake_up_interruptible(&wq->idxd_cdev.err_queue);
148 spin_unlock_bh(&idxd->dev_lock);
149 val |= IDXD_INTC_ERR;
151 for (i = 0; i < 4; i++)
152 dev_warn(dev, "err[%d]: %#16.16llx\n",
153 i, idxd->sw_err.bits[i]);
157 if (cause & IDXD_INTC_CMD) {
158 val |= IDXD_INTC_CMD;
159 complete(idxd->cmd_done);
162 if (cause & IDXD_INTC_OCCUPY) {
163 /* Driver does not utilize occupancy interrupt */
164 val |= IDXD_INTC_OCCUPY;
167 if (cause & IDXD_INTC_PERFMON_OVFL) {
169 * Driver does not utilize perfmon counter overflow interrupt
172 val |= IDXD_INTC_PERFMON_OVFL;
177 dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
184 * This case should rarely happen and typically is due to software
185 * programming error by the driver.
187 if (idxd->sw_err.valid &&
188 idxd->sw_err.desc_valid &&
189 idxd->sw_err.fault_addr)
190 idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr);
192 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
193 if (gensts.state == IDXD_DEVICE_STATE_HALT) {
194 idxd->state = IDXD_DEV_HALTED;
195 if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
197 * If we need a software reset, we will throw the work
198 * on a system workqueue in order to allow interrupts
199 * for the device command completions.
201 INIT_WORK(&idxd->work, idxd_device_reinit);
202 queue_work(idxd->wq, &idxd->work);
204 spin_lock_bh(&idxd->dev_lock);
205 idxd_device_wqs_clear_state(idxd);
206 dev_err(&idxd->pdev->dev,
207 "idxd halted, need %s.\n",
208 gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
209 "FLR" : "system reset");
210 spin_unlock_bh(&idxd->dev_lock);
218 irqreturn_t idxd_misc_thread(int vec, void *data)
220 struct idxd_irq_entry *irq_entry = data;
221 struct idxd_device *idxd = irq_entry->idxd;
225 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
227 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
230 rc = process_misc_interrupts(idxd, cause);
233 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
235 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
238 idxd_unmask_msix_vector(idxd, irq_entry->id);
242 static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
245 * Completion address can be bad as well. Check fault address match for descriptor
246 * and completion address.
248 if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
249 struct idxd_device *idxd = desc->wq->idxd;
250 struct device *dev = &idxd->pdev->dev;
252 dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
259 static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
261 idxd_dma_complete_txd(desc, reason);
262 idxd_free_desc(desc->wq, desc);
265 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
266 enum irq_work_type wtype,
267 int *processed, u64 data)
269 struct idxd_desc *desc, *t;
270 struct llist_node *head;
273 enum idxd_complete_type reason;
276 head = llist_del_all(&irq_entry->pending_llist);
280 if (wtype == IRQ_WORK_NORMAL)
281 reason = IDXD_COMPLETE_NORMAL;
283 reason = IDXD_COMPLETE_DEV_FAIL;
285 llist_for_each_entry_safe(desc, t, head, llnode) {
286 if (desc->completion->status) {
287 if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
288 match_fault(desc, data);
289 complete_desc(desc, reason);
292 spin_lock_irqsave(&irq_entry->list_lock, flags);
293 list_add_tail(&desc->list,
294 &irq_entry->work_list);
295 spin_unlock_irqrestore(&irq_entry->list_lock, flags);
304 static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
305 enum irq_work_type wtype,
306 int *processed, u64 data)
311 struct idxd_desc *desc, *n;
312 enum idxd_complete_type reason;
315 if (wtype == IRQ_WORK_NORMAL)
316 reason = IDXD_COMPLETE_NORMAL;
318 reason = IDXD_COMPLETE_DEV_FAIL;
321 * This lock protects list corruption from access of list outside of the irq handler
324 spin_lock_irqsave(&irq_entry->list_lock, flags);
325 if (list_empty(&irq_entry->work_list)) {
326 spin_unlock_irqrestore(&irq_entry->list_lock, flags);
330 list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
331 if (desc->completion->status) {
332 list_del(&desc->list);
334 list_add_tail(&desc->list, &flist);
340 spin_unlock_irqrestore(&irq_entry->list_lock, flags);
342 list_for_each_entry(desc, &flist, list) {
343 if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
344 match_fault(desc, data);
345 complete_desc(desc, reason);
351 static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
353 int rc, processed, total = 0;
356 * There are two lists we are processing. The pending_llist is where
357 * submmiter adds all the submitted descriptor after sending it to
358 * the workqueue. It's a lockless singly linked list. The work_list
359 * is the common linux double linked list. We are in a scenario of
360 * multiple producers and a single consumer. The producers are all
361 * the kernel submitters of descriptors, and the consumer is the
362 * kernel irq handler thread for the msix vector when using threaded
363 * irq. To work with the restrictions of llist to remain lockless,
364 * we are doing the following steps:
365 * 1. Iterate through the work_list and process any completed
366 * descriptor. Delete the completed entries during iteration.
367 * 2. llist_del_all() from the pending list.
368 * 3. Iterate through the llist that was deleted from the pending list
369 * and process the completed entries.
370 * 4. If the entry is still waiting on hardware, list_add_tail() to
372 * 5. Repeat until no more descriptors.
375 rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL,
381 rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL,
389 irqreturn_t idxd_wq_thread(int irq, void *data)
391 struct idxd_irq_entry *irq_entry = data;
394 processed = idxd_desc_process(irq_entry);
395 idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);