Merge tag 'defconfig-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / drivers / dma / idxd / submit.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <uapi/linux/idxd.h>
8 #include "idxd.h"
9 #include "registers.h"
10
11 static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
12 {
13         struct idxd_desc *desc;
14         struct idxd_device *idxd = wq->idxd;
15
16         desc = wq->descs[idx];
17         memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
18         memset(desc->completion, 0, idxd->data->compl_size);
19         desc->cpu = cpu;
20
21         if (device_pasid_enabled(idxd))
22                 desc->hw->pasid = idxd->pasid;
23
24         /*
25          * Descriptor completion vectors are 1...N for MSIX. We will round
26          * robin through the N vectors.
27          */
28         wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
29         if (!idxd->int_handles) {
30                 desc->hw->int_handle = wq->vec_ptr;
31         } else {
32                 /*
33                  * int_handles are only for descriptor completion. However for device
34                  * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
35                  * though we are rotating through 1...N for descriptor interrupts, we
36                  * need to acqurie the int_handles from 0..N-1.
37                  */
38                 desc->hw->int_handle = idxd->int_handles[desc->vector - 1];
39         }
40
41         return desc;
42 }
43
44 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
45 {
46         int cpu, idx;
47         struct idxd_device *idxd = wq->idxd;
48         DEFINE_SBQ_WAIT(wait);
49         struct sbq_wait_state *ws;
50         struct sbitmap_queue *sbq;
51
52         if (idxd->state != IDXD_DEV_ENABLED)
53                 return ERR_PTR(-EIO);
54
55         sbq = &wq->sbq;
56         idx = sbitmap_queue_get(sbq, &cpu);
57         if (idx < 0) {
58                 if (optype == IDXD_OP_NONBLOCK)
59                         return ERR_PTR(-EAGAIN);
60         } else {
61                 return __get_desc(wq, idx, cpu);
62         }
63
64         ws = &sbq->ws[0];
65         for (;;) {
66                 sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_INTERRUPTIBLE);
67                 if (signal_pending_state(TASK_INTERRUPTIBLE, current))
68                         break;
69                 idx = sbitmap_queue_get(sbq, &cpu);
70                 if (idx > 0)
71                         break;
72                 schedule();
73         }
74
75         sbitmap_finish_wait(sbq, ws, &wait);
76         if (idx < 0)
77                 return ERR_PTR(-EAGAIN);
78
79         return __get_desc(wq, idx, cpu);
80 }
81
82 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
83 {
84         int cpu = desc->cpu;
85
86         desc->cpu = -1;
87         sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
88 }
89
90 static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
91                                          struct idxd_desc *desc)
92 {
93         struct idxd_desc *d, *n;
94
95         lockdep_assert_held(&ie->list_lock);
96         list_for_each_entry_safe(d, n, &ie->work_list, list) {
97                 if (d == desc) {
98                         list_del(&d->list);
99                         return d;
100                 }
101         }
102
103         /*
104          * At this point, the desc needs to be aborted is held by the completion
105          * handler where it has taken it off the pending list but has not added to the
106          * work list. It will be cleaned up by the interrupt handler when it sees the
107          * IDXD_COMP_DESC_ABORT for completion status.
108          */
109         return NULL;
110 }
111
112 static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
113                              struct idxd_desc *desc)
114 {
115         struct idxd_desc *d, *t, *found = NULL;
116         struct llist_node *head;
117         unsigned long flags;
118
119         desc->completion->status = IDXD_COMP_DESC_ABORT;
120         /*
121          * Grab the list lock so it will block the irq thread handler. This allows the
122          * abort code to locate the descriptor need to be aborted.
123          */
124         spin_lock_irqsave(&ie->list_lock, flags);
125         head = llist_del_all(&ie->pending_llist);
126         if (head) {
127                 llist_for_each_entry_safe(d, t, head, llnode) {
128                         if (d == desc) {
129                                 found = desc;
130                                 continue;
131                         }
132                         list_add_tail(&desc->list, &ie->work_list);
133                 }
134         }
135
136         if (!found)
137                 found = list_abort_desc(wq, ie, desc);
138         spin_unlock_irqrestore(&ie->list_lock, flags);
139
140         if (found)
141                 complete_desc(found, IDXD_COMPLETE_ABORT);
142 }
143
144 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
145 {
146         struct idxd_device *idxd = wq->idxd;
147         struct idxd_irq_entry *ie = NULL;
148         void __iomem *portal;
149         int rc;
150
151         if (idxd->state != IDXD_DEV_ENABLED)
152                 return -EIO;
153
154         if (!percpu_ref_tryget_live(&wq->wq_active))
155                 return -ENXIO;
156
157         portal = wq->portal;
158
159         /*
160          * The wmb() flushes writes to coherent DMA data before
161          * possibly triggering a DMA read. The wmb() is necessary
162          * even on UP because the recipient is a device.
163          */
164         wmb();
165
166         /*
167          * Pending the descriptor to the lockless list for the irq_entry
168          * that we designated the descriptor to.
169          */
170         if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
171                 ie = &idxd->irq_entries[desc->vector];
172                 llist_add(&desc->llnode, &ie->pending_llist);
173         }
174
175         if (wq_dedicated(wq)) {
176                 iosubmit_cmds512(portal, desc->hw, 1);
177         } else {
178                 /*
179                  * It's not likely that we would receive queue full rejection
180                  * since the descriptor allocation gates at wq size. If we
181                  * receive a -EAGAIN, that means something went wrong such as the
182                  * device is not accepting descriptor at all.
183                  */
184                 rc = enqcmds(portal, desc->hw);
185                 if (rc < 0) {
186                         if (ie)
187                                 llist_abort_desc(wq, ie, desc);
188                         return rc;
189                 }
190         }
191
192         percpu_ref_put(&wq->wq_active);
193         return 0;
194 }