Merge tag 'v5.12' into next
[linux-2.6-microblaze.git] / drivers / dma / idxd / device.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <linux/irq.h>
10 #include <linux/msi.h>
11 #include <uapi/linux/idxd.h>
12 #include "../dmaengine.h"
13 #include "idxd.h"
14 #include "registers.h"
15
16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
17                           u32 *status);
18
19 /* Interrupt control bits */
20 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
21 {
22         struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
23
24         pci_msi_mask_irq(data);
25 }
26
27 void idxd_mask_msix_vectors(struct idxd_device *idxd)
28 {
29         struct pci_dev *pdev = idxd->pdev;
30         int msixcnt = pci_msix_vec_count(pdev);
31         int i;
32
33         for (i = 0; i < msixcnt; i++)
34                 idxd_mask_msix_vector(idxd, i);
35 }
36
37 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
38 {
39         struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
40
41         pci_msi_unmask_irq(data);
42 }
43
44 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
45 {
46         union genctrl_reg genctrl;
47
48         genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
49         genctrl.softerr_int_en = 1;
50         iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
51 }
52
53 void idxd_mask_error_interrupts(struct idxd_device *idxd)
54 {
55         union genctrl_reg genctrl;
56
57         genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
58         genctrl.softerr_int_en = 0;
59         iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
60 }
61
62 static void free_hw_descs(struct idxd_wq *wq)
63 {
64         int i;
65
66         for (i = 0; i < wq->num_descs; i++)
67                 kfree(wq->hw_descs[i]);
68
69         kfree(wq->hw_descs);
70 }
71
72 static int alloc_hw_descs(struct idxd_wq *wq, int num)
73 {
74         struct device *dev = &wq->idxd->pdev->dev;
75         int i;
76         int node = dev_to_node(dev);
77
78         wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
79                                     GFP_KERNEL, node);
80         if (!wq->hw_descs)
81                 return -ENOMEM;
82
83         for (i = 0; i < num; i++) {
84                 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
85                                                GFP_KERNEL, node);
86                 if (!wq->hw_descs[i]) {
87                         free_hw_descs(wq);
88                         return -ENOMEM;
89                 }
90         }
91
92         return 0;
93 }
94
95 static void free_descs(struct idxd_wq *wq)
96 {
97         int i;
98
99         for (i = 0; i < wq->num_descs; i++)
100                 kfree(wq->descs[i]);
101
102         kfree(wq->descs);
103 }
104
105 static int alloc_descs(struct idxd_wq *wq, int num)
106 {
107         struct device *dev = &wq->idxd->pdev->dev;
108         int i;
109         int node = dev_to_node(dev);
110
111         wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
112                                  GFP_KERNEL, node);
113         if (!wq->descs)
114                 return -ENOMEM;
115
116         for (i = 0; i < num; i++) {
117                 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
118                                             GFP_KERNEL, node);
119                 if (!wq->descs[i]) {
120                         free_descs(wq);
121                         return -ENOMEM;
122                 }
123         }
124
125         return 0;
126 }
127
128 /* WQ control bits */
129 int idxd_wq_alloc_resources(struct idxd_wq *wq)
130 {
131         struct idxd_device *idxd = wq->idxd;
132         struct device *dev = &idxd->pdev->dev;
133         int rc, num_descs, i;
134         int align;
135         u64 tmp;
136
137         if (wq->type != IDXD_WQT_KERNEL)
138                 return 0;
139
140         wq->num_descs = wq->size;
141         num_descs = wq->size;
142
143         rc = alloc_hw_descs(wq, num_descs);
144         if (rc < 0)
145                 return rc;
146
147         if (idxd->type == IDXD_TYPE_DSA)
148                 align = 32;
149         else if (idxd->type == IDXD_TYPE_IAX)
150                 align = 64;
151         else
152                 return -ENODEV;
153
154         wq->compls_size = num_descs * idxd->compl_size + align;
155         wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
156                                             &wq->compls_addr_raw, GFP_KERNEL);
157         if (!wq->compls_raw) {
158                 rc = -ENOMEM;
159                 goto fail_alloc_compls;
160         }
161
162         /* Adjust alignment */
163         wq->compls_addr = (wq->compls_addr_raw + (align - 1)) & ~(align - 1);
164         tmp = (u64)wq->compls_raw;
165         tmp = (tmp + (align - 1)) & ~(align - 1);
166         wq->compls = (struct dsa_completion_record *)tmp;
167
168         rc = alloc_descs(wq, num_descs);
169         if (rc < 0)
170                 goto fail_alloc_descs;
171
172         rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
173                                      dev_to_node(dev));
174         if (rc < 0)
175                 goto fail_sbitmap_init;
176
177         for (i = 0; i < num_descs; i++) {
178                 struct idxd_desc *desc = wq->descs[i];
179
180                 desc->hw = wq->hw_descs[i];
181                 if (idxd->type == IDXD_TYPE_DSA)
182                         desc->completion = &wq->compls[i];
183                 else if (idxd->type == IDXD_TYPE_IAX)
184                         desc->iax_completion = &wq->iax_compls[i];
185                 desc->compl_dma = wq->compls_addr + idxd->compl_size * i;
186                 desc->id = i;
187                 desc->wq = wq;
188                 desc->cpu = -1;
189                 dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
190                 desc->txd.tx_submit = idxd_dma_tx_submit;
191         }
192
193         return 0;
194
195  fail_sbitmap_init:
196         free_descs(wq);
197  fail_alloc_descs:
198         dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
199                           wq->compls_addr_raw);
200  fail_alloc_compls:
201         free_hw_descs(wq);
202         return rc;
203 }
204
205 void idxd_wq_free_resources(struct idxd_wq *wq)
206 {
207         struct device *dev = &wq->idxd->pdev->dev;
208
209         if (wq->type != IDXD_WQT_KERNEL)
210                 return;
211
212         free_hw_descs(wq);
213         free_descs(wq);
214         dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
215                           wq->compls_addr_raw);
216         sbitmap_queue_free(&wq->sbq);
217 }
218
219 int idxd_wq_enable(struct idxd_wq *wq)
220 {
221         struct idxd_device *idxd = wq->idxd;
222         struct device *dev = &idxd->pdev->dev;
223         u32 status;
224
225         if (wq->state == IDXD_WQ_ENABLED) {
226                 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
227                 return -ENXIO;
228         }
229
230         idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
231
232         if (status != IDXD_CMDSTS_SUCCESS &&
233             status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
234                 dev_dbg(dev, "WQ enable failed: %#x\n", status);
235                 return -ENXIO;
236         }
237
238         wq->state = IDXD_WQ_ENABLED;
239         dev_dbg(dev, "WQ %d enabled\n", wq->id);
240         return 0;
241 }
242
243 int idxd_wq_disable(struct idxd_wq *wq)
244 {
245         struct idxd_device *idxd = wq->idxd;
246         struct device *dev = &idxd->pdev->dev;
247         u32 status, operand;
248
249         dev_dbg(dev, "Disabling WQ %d\n", wq->id);
250
251         if (wq->state != IDXD_WQ_ENABLED) {
252                 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
253                 return 0;
254         }
255
256         operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
257         idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
258
259         if (status != IDXD_CMDSTS_SUCCESS) {
260                 dev_dbg(dev, "WQ disable failed: %#x\n", status);
261                 return -ENXIO;
262         }
263
264         wq->state = IDXD_WQ_DISABLED;
265         dev_dbg(dev, "WQ %d disabled\n", wq->id);
266         return 0;
267 }
268
269 void idxd_wq_drain(struct idxd_wq *wq)
270 {
271         struct idxd_device *idxd = wq->idxd;
272         struct device *dev = &idxd->pdev->dev;
273         u32 operand;
274
275         if (wq->state != IDXD_WQ_ENABLED) {
276                 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
277                 return;
278         }
279
280         dev_dbg(dev, "Draining WQ %d\n", wq->id);
281         operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
282         idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
283 }
284
285 void idxd_wq_reset(struct idxd_wq *wq)
286 {
287         struct idxd_device *idxd = wq->idxd;
288         struct device *dev = &idxd->pdev->dev;
289         u32 operand;
290
291         if (wq->state != IDXD_WQ_ENABLED) {
292                 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
293                 return;
294         }
295
296         operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
297         idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
298         wq->state = IDXD_WQ_DISABLED;
299 }
300
301 int idxd_wq_map_portal(struct idxd_wq *wq)
302 {
303         struct idxd_device *idxd = wq->idxd;
304         struct pci_dev *pdev = idxd->pdev;
305         struct device *dev = &pdev->dev;
306         resource_size_t start;
307
308         start = pci_resource_start(pdev, IDXD_WQ_BAR);
309         start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
310
311         wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
312         if (!wq->portal)
313                 return -ENOMEM;
314
315         return 0;
316 }
317
318 void idxd_wq_unmap_portal(struct idxd_wq *wq)
319 {
320         struct device *dev = &wq->idxd->pdev->dev;
321
322         devm_iounmap(dev, wq->portal);
323 }
324
325 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
326 {
327         struct idxd_device *idxd = wq->idxd;
328         int rc;
329         union wqcfg wqcfg;
330         unsigned int offset;
331         unsigned long flags;
332
333         rc = idxd_wq_disable(wq);
334         if (rc < 0)
335                 return rc;
336
337         offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
338         spin_lock_irqsave(&idxd->dev_lock, flags);
339         wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
340         wqcfg.pasid_en = 1;
341         wqcfg.pasid = pasid;
342         iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
343         spin_unlock_irqrestore(&idxd->dev_lock, flags);
344
345         rc = idxd_wq_enable(wq);
346         if (rc < 0)
347                 return rc;
348
349         return 0;
350 }
351
352 int idxd_wq_disable_pasid(struct idxd_wq *wq)
353 {
354         struct idxd_device *idxd = wq->idxd;
355         int rc;
356         union wqcfg wqcfg;
357         unsigned int offset;
358         unsigned long flags;
359
360         rc = idxd_wq_disable(wq);
361         if (rc < 0)
362                 return rc;
363
364         offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
365         spin_lock_irqsave(&idxd->dev_lock, flags);
366         wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
367         wqcfg.pasid_en = 0;
368         wqcfg.pasid = 0;
369         iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
370         spin_unlock_irqrestore(&idxd->dev_lock, flags);
371
372         rc = idxd_wq_enable(wq);
373         if (rc < 0)
374                 return rc;
375
376         return 0;
377 }
378
379 void idxd_wq_disable_cleanup(struct idxd_wq *wq)
380 {
381         struct idxd_device *idxd = wq->idxd;
382
383         lockdep_assert_held(&idxd->dev_lock);
384         memset(wq->wqcfg, 0, idxd->wqcfg_size);
385         wq->type = IDXD_WQT_NONE;
386         wq->size = 0;
387         wq->group = NULL;
388         wq->threshold = 0;
389         wq->priority = 0;
390         wq->ats_dis = 0;
391         clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
392         memset(wq->name, 0, WQ_NAME_SIZE);
393 }
394
395 /* Device control bits */
396 static inline bool idxd_is_enabled(struct idxd_device *idxd)
397 {
398         union gensts_reg gensts;
399
400         gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
401
402         if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
403                 return true;
404         return false;
405 }
406
407 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
408 {
409         union gensts_reg gensts;
410
411         gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
412
413         return (gensts.state == IDXD_DEVICE_STATE_HALT);
414 }
415
416 /*
417  * This is function is only used for reset during probe and will
418  * poll for completion. Once the device is setup with interrupts,
419  * all commands will be done via interrupt completion.
420  */
421 int idxd_device_init_reset(struct idxd_device *idxd)
422 {
423         struct device *dev = &idxd->pdev->dev;
424         union idxd_command_reg cmd;
425         unsigned long flags;
426
427         if (idxd_device_is_halted(idxd)) {
428                 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
429                 return -ENXIO;
430         }
431
432         memset(&cmd, 0, sizeof(cmd));
433         cmd.cmd = IDXD_CMD_RESET_DEVICE;
434         dev_dbg(dev, "%s: sending reset for init.\n", __func__);
435         spin_lock_irqsave(&idxd->dev_lock, flags);
436         iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
437
438         while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
439                IDXD_CMDSTS_ACTIVE)
440                 cpu_relax();
441         spin_unlock_irqrestore(&idxd->dev_lock, flags);
442         return 0;
443 }
444
445 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
446                           u32 *status)
447 {
448         union idxd_command_reg cmd;
449         DECLARE_COMPLETION_ONSTACK(done);
450         unsigned long flags;
451
452         if (idxd_device_is_halted(idxd)) {
453                 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
454                 *status = IDXD_CMDSTS_HW_ERR;
455                 return;
456         }
457
458         memset(&cmd, 0, sizeof(cmd));
459         cmd.cmd = cmd_code;
460         cmd.operand = operand;
461         cmd.int_req = 1;
462
463         spin_lock_irqsave(&idxd->dev_lock, flags);
464         wait_event_lock_irq(idxd->cmd_waitq,
465                             !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
466                             idxd->dev_lock);
467
468         dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
469                 __func__, cmd_code, operand);
470
471         idxd->cmd_status = 0;
472         __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
473         idxd->cmd_done = &done;
474         iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
475
476         /*
477          * After command submitted, release lock and go to sleep until
478          * the command completes via interrupt.
479          */
480         spin_unlock_irqrestore(&idxd->dev_lock, flags);
481         wait_for_completion(&done);
482         spin_lock_irqsave(&idxd->dev_lock, flags);
483         if (status) {
484                 *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
485                 idxd->cmd_status = *status & GENMASK(7, 0);
486         }
487
488         __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
489         /* Wake up other pending commands */
490         wake_up(&idxd->cmd_waitq);
491         spin_unlock_irqrestore(&idxd->dev_lock, flags);
492 }
493
494 int idxd_device_enable(struct idxd_device *idxd)
495 {
496         struct device *dev = &idxd->pdev->dev;
497         u32 status;
498
499         if (idxd_is_enabled(idxd)) {
500                 dev_dbg(dev, "Device already enabled\n");
501                 return -ENXIO;
502         }
503
504         idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
505
506         /* If the command is successful or if the device was enabled */
507         if (status != IDXD_CMDSTS_SUCCESS &&
508             status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
509                 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
510                 return -ENXIO;
511         }
512
513         idxd->state = IDXD_DEV_ENABLED;
514         return 0;
515 }
516
517 void idxd_device_wqs_clear_state(struct idxd_device *idxd)
518 {
519         int i;
520
521         lockdep_assert_held(&idxd->dev_lock);
522
523         for (i = 0; i < idxd->max_wqs; i++) {
524                 struct idxd_wq *wq = &idxd->wqs[i];
525
526                 if (wq->state == IDXD_WQ_ENABLED) {
527                         idxd_wq_disable_cleanup(wq);
528                         wq->state = IDXD_WQ_DISABLED;
529                 }
530         }
531 }
532
533 int idxd_device_disable(struct idxd_device *idxd)
534 {
535         struct device *dev = &idxd->pdev->dev;
536         u32 status;
537         unsigned long flags;
538
539         if (!idxd_is_enabled(idxd)) {
540                 dev_dbg(dev, "Device is not enabled\n");
541                 return 0;
542         }
543
544         idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
545
546         /* If the command is successful or if the device was disabled */
547         if (status != IDXD_CMDSTS_SUCCESS &&
548             !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
549                 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
550                 return -ENXIO;
551         }
552
553         spin_lock_irqsave(&idxd->dev_lock, flags);
554         idxd_device_wqs_clear_state(idxd);
555         idxd->state = IDXD_DEV_CONF_READY;
556         spin_unlock_irqrestore(&idxd->dev_lock, flags);
557         return 0;
558 }
559
560 void idxd_device_reset(struct idxd_device *idxd)
561 {
562         unsigned long flags;
563
564         idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
565         spin_lock_irqsave(&idxd->dev_lock, flags);
566         idxd_device_wqs_clear_state(idxd);
567         idxd->state = IDXD_DEV_CONF_READY;
568         spin_unlock_irqrestore(&idxd->dev_lock, flags);
569 }
570
571 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
572 {
573         struct device *dev = &idxd->pdev->dev;
574         u32 operand;
575
576         operand = pasid;
577         dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
578         idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
579         dev_dbg(dev, "pasid %d drained\n", pasid);
580 }
581
582 /* Device configuration bits */
583 void idxd_msix_perm_setup(struct idxd_device *idxd)
584 {
585         union msix_perm mperm;
586         int i, msixcnt;
587
588         msixcnt = pci_msix_vec_count(idxd->pdev);
589         if (msixcnt < 0)
590                 return;
591
592         mperm.bits = 0;
593         mperm.pasid = idxd->pasid;
594         mperm.pasid_en = device_pasid_enabled(idxd);
595         for (i = 1; i < msixcnt; i++)
596                 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
597 }
598
599 void idxd_msix_perm_clear(struct idxd_device *idxd)
600 {
601         union msix_perm mperm;
602         int i, msixcnt;
603
604         msixcnt = pci_msix_vec_count(idxd->pdev);
605         if (msixcnt < 0)
606                 return;
607
608         mperm.bits = 0;
609         for (i = 1; i < msixcnt; i++)
610                 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
611 }
612
613 static void idxd_group_config_write(struct idxd_group *group)
614 {
615         struct idxd_device *idxd = group->idxd;
616         struct device *dev = &idxd->pdev->dev;
617         int i;
618         u32 grpcfg_offset;
619
620         dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
621
622         /* setup GRPWQCFG */
623         for (i = 0; i < GRPWQCFG_STRIDES; i++) {
624                 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
625                 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
626                 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
627                         group->id, i, grpcfg_offset,
628                         ioread64(idxd->reg_base + grpcfg_offset));
629         }
630
631         /* setup GRPENGCFG */
632         grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
633         iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
634         dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
635                 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
636
637         /* setup GRPFLAGS */
638         grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
639         iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
640         dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
641                 group->id, grpcfg_offset,
642                 ioread32(idxd->reg_base + grpcfg_offset));
643 }
644
645 static int idxd_groups_config_write(struct idxd_device *idxd)
646
647 {
648         union gencfg_reg reg;
649         int i;
650         struct device *dev = &idxd->pdev->dev;
651
652         /* Setup bandwidth token limit */
653         if (idxd->token_limit) {
654                 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
655                 reg.token_limit = idxd->token_limit;
656                 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
657         }
658
659         dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
660                 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
661
662         for (i = 0; i < idxd->max_groups; i++) {
663                 struct idxd_group *group = &idxd->groups[i];
664
665                 idxd_group_config_write(group);
666         }
667
668         return 0;
669 }
670
671 static int idxd_wq_config_write(struct idxd_wq *wq)
672 {
673         struct idxd_device *idxd = wq->idxd;
674         struct device *dev = &idxd->pdev->dev;
675         u32 wq_offset;
676         int i;
677
678         if (!wq->group)
679                 return 0;
680
681         /*
682          * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
683          * wq reset. This will copy back the sticky values that are present on some devices.
684          */
685         for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
686                 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
687                 wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
688         }
689
690         /* byte 0-3 */
691         wq->wqcfg->wq_size = wq->size;
692
693         if (wq->size == 0) {
694                 dev_warn(dev, "Incorrect work queue size: 0\n");
695                 return -EINVAL;
696         }
697
698         /* bytes 4-7 */
699         wq->wqcfg->wq_thresh = wq->threshold;
700
701         /* byte 8-11 */
702         wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
703         if (wq_dedicated(wq))
704                 wq->wqcfg->mode = 1;
705
706         if (device_pasid_enabled(idxd)) {
707                 wq->wqcfg->pasid_en = 1;
708                 if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
709                         wq->wqcfg->pasid = idxd->pasid;
710         }
711
712         wq->wqcfg->priority = wq->priority;
713
714         if (idxd->hw.gen_cap.block_on_fault &&
715             test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
716                 wq->wqcfg->bof = 1;
717
718         if (idxd->hw.wq_cap.wq_ats_support)
719                 wq->wqcfg->wq_ats_disable = wq->ats_dis;
720
721         /* bytes 12-15 */
722         wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
723         wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
724
725         dev_dbg(dev, "WQ %d CFGs\n", wq->id);
726         for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
727                 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
728                 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
729                 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
730                         wq->id, i, wq_offset,
731                         ioread32(idxd->reg_base + wq_offset));
732         }
733
734         return 0;
735 }
736
737 static int idxd_wqs_config_write(struct idxd_device *idxd)
738 {
739         int i, rc;
740
741         for (i = 0; i < idxd->max_wqs; i++) {
742                 struct idxd_wq *wq = &idxd->wqs[i];
743
744                 rc = idxd_wq_config_write(wq);
745                 if (rc < 0)
746                         return rc;
747         }
748
749         return 0;
750 }
751
752 static void idxd_group_flags_setup(struct idxd_device *idxd)
753 {
754         int i;
755
756         /* TC-A 0 and TC-B 1 should be defaults */
757         for (i = 0; i < idxd->max_groups; i++) {
758                 struct idxd_group *group = &idxd->groups[i];
759
760                 if (group->tc_a == -1)
761                         group->tc_a = group->grpcfg.flags.tc_a = 0;
762                 else
763                         group->grpcfg.flags.tc_a = group->tc_a;
764                 if (group->tc_b == -1)
765                         group->tc_b = group->grpcfg.flags.tc_b = 1;
766                 else
767                         group->grpcfg.flags.tc_b = group->tc_b;
768                 group->grpcfg.flags.use_token_limit = group->use_token_limit;
769                 group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
770                 if (group->tokens_allowed)
771                         group->grpcfg.flags.tokens_allowed =
772                                 group->tokens_allowed;
773                 else
774                         group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
775         }
776 }
777
778 static int idxd_engines_setup(struct idxd_device *idxd)
779 {
780         int i, engines = 0;
781         struct idxd_engine *eng;
782         struct idxd_group *group;
783
784         for (i = 0; i < idxd->max_groups; i++) {
785                 group = &idxd->groups[i];
786                 group->grpcfg.engines = 0;
787         }
788
789         for (i = 0; i < idxd->max_engines; i++) {
790                 eng = &idxd->engines[i];
791                 group = eng->group;
792
793                 if (!group)
794                         continue;
795
796                 group->grpcfg.engines |= BIT(eng->id);
797                 engines++;
798         }
799
800         if (!engines)
801                 return -EINVAL;
802
803         return 0;
804 }
805
806 static int idxd_wqs_setup(struct idxd_device *idxd)
807 {
808         struct idxd_wq *wq;
809         struct idxd_group *group;
810         int i, j, configured = 0;
811         struct device *dev = &idxd->pdev->dev;
812
813         for (i = 0; i < idxd->max_groups; i++) {
814                 group = &idxd->groups[i];
815                 for (j = 0; j < 4; j++)
816                         group->grpcfg.wqs[j] = 0;
817         }
818
819         for (i = 0; i < idxd->max_wqs; i++) {
820                 wq = &idxd->wqs[i];
821                 group = wq->group;
822
823                 if (!wq->group)
824                         continue;
825                 if (!wq->size)
826                         continue;
827
828                 if (wq_shared(wq) && !device_swq_supported(idxd)) {
829                         dev_warn(dev, "No shared wq support but configured.\n");
830                         return -EINVAL;
831                 }
832
833                 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
834                 configured++;
835         }
836
837         if (configured == 0)
838                 return -EINVAL;
839
840         return 0;
841 }
842
843 int idxd_device_config(struct idxd_device *idxd)
844 {
845         int rc;
846
847         lockdep_assert_held(&idxd->dev_lock);
848         rc = idxd_wqs_setup(idxd);
849         if (rc < 0)
850                 return rc;
851
852         rc = idxd_engines_setup(idxd);
853         if (rc < 0)
854                 return rc;
855
856         idxd_group_flags_setup(idxd);
857
858         rc = idxd_wqs_config_write(idxd);
859         if (rc < 0)
860                 return rc;
861
862         rc = idxd_groups_config_write(idxd);
863         if (rc < 0)
864                 return rc;
865
866         return 0;
867 }