1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <uapi/linux/idxd.h>
10 #include "../dmaengine.h"
12 #include "registers.h"
14 static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout);
15 static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand);
17 /* Interrupt control bits */
18 int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
20 struct pci_dev *pdev = idxd->pdev;
21 int msixcnt = pci_msix_vec_count(pdev);
25 if (vec_id < 0 || vec_id >= msixcnt)
28 offset = idxd->msix_perm_offset + vec_id * 8;
29 perm.bits = ioread32(idxd->reg_base + offset);
31 iowrite32(perm.bits, idxd->reg_base + offset);
36 void idxd_mask_msix_vectors(struct idxd_device *idxd)
38 struct pci_dev *pdev = idxd->pdev;
39 int msixcnt = pci_msix_vec_count(pdev);
42 for (i = 0; i < msixcnt; i++) {
43 rc = idxd_mask_msix_vector(idxd, i);
46 "Failed disabling msix vec %d\n", i);
50 int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
52 struct pci_dev *pdev = idxd->pdev;
53 int msixcnt = pci_msix_vec_count(pdev);
57 if (vec_id < 0 || vec_id >= msixcnt)
60 offset = idxd->msix_perm_offset + vec_id * 8;
61 perm.bits = ioread32(idxd->reg_base + offset);
63 iowrite32(perm.bits, idxd->reg_base + offset);
66 * A readback from the device ensures that any previously generated
67 * completion record writes are visible to software based on PCI
70 perm.bits = ioread32(idxd->reg_base + offset);
75 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
77 union genctrl_reg genctrl;
79 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
80 genctrl.softerr_int_en = 1;
81 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
84 void idxd_mask_error_interrupts(struct idxd_device *idxd)
86 union genctrl_reg genctrl;
88 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
89 genctrl.softerr_int_en = 0;
90 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
93 static void free_hw_descs(struct idxd_wq *wq)
97 for (i = 0; i < wq->num_descs; i++)
98 kfree(wq->hw_descs[i]);
103 static int alloc_hw_descs(struct idxd_wq *wq, int num)
105 struct device *dev = &wq->idxd->pdev->dev;
107 int node = dev_to_node(dev);
109 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
114 for (i = 0; i < num; i++) {
115 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
117 if (!wq->hw_descs[i]) {
126 static void free_descs(struct idxd_wq *wq)
130 for (i = 0; i < wq->num_descs; i++)
136 static int alloc_descs(struct idxd_wq *wq, int num)
138 struct device *dev = &wq->idxd->pdev->dev;
140 int node = dev_to_node(dev);
142 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
147 for (i = 0; i < num; i++) {
148 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
159 /* WQ control bits */
160 int idxd_wq_alloc_resources(struct idxd_wq *wq)
162 struct idxd_device *idxd = wq->idxd;
163 struct idxd_group *group = wq->group;
164 struct device *dev = &idxd->pdev->dev;
165 int rc, num_descs, i;
167 if (wq->type != IDXD_WQT_KERNEL)
170 num_descs = wq->size +
171 idxd->hw.gen_cap.max_descs_per_engine * group->num_engines;
172 wq->num_descs = num_descs;
174 rc = alloc_hw_descs(wq, num_descs);
178 wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
179 wq->compls = dma_alloc_coherent(dev, wq->compls_size,
180 &wq->compls_addr, GFP_KERNEL);
183 goto fail_alloc_compls;
186 rc = alloc_descs(wq, num_descs);
188 goto fail_alloc_descs;
190 rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL,
193 goto fail_sbitmap_init;
195 for (i = 0; i < num_descs; i++) {
196 struct idxd_desc *desc = wq->descs[i];
198 desc->hw = wq->hw_descs[i];
199 desc->completion = &wq->compls[i];
200 desc->compl_dma = wq->compls_addr +
201 sizeof(struct dsa_completion_record) * i;
205 dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
206 desc->txd.tx_submit = idxd_dma_tx_submit;
214 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
220 void idxd_wq_free_resources(struct idxd_wq *wq)
222 struct device *dev = &wq->idxd->pdev->dev;
224 if (wq->type != IDXD_WQT_KERNEL)
229 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
230 sbitmap_free(&wq->sbmap);
233 int idxd_wq_enable(struct idxd_wq *wq)
235 struct idxd_device *idxd = wq->idxd;
236 struct device *dev = &idxd->pdev->dev;
240 lockdep_assert_held(&idxd->dev_lock);
242 if (wq->state == IDXD_WQ_ENABLED) {
243 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
247 rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id);
250 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
254 if (status != IDXD_CMDSTS_SUCCESS &&
255 status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
256 dev_dbg(dev, "WQ enable failed: %#x\n", status);
260 wq->state = IDXD_WQ_ENABLED;
261 dev_dbg(dev, "WQ %d enabled\n", wq->id);
265 int idxd_wq_disable(struct idxd_wq *wq)
267 struct idxd_device *idxd = wq->idxd;
268 struct device *dev = &idxd->pdev->dev;
272 lockdep_assert_held(&idxd->dev_lock);
273 dev_dbg(dev, "Disabling WQ %d\n", wq->id);
275 if (wq->state != IDXD_WQ_ENABLED) {
276 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
280 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
281 rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand);
284 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
288 if (status != IDXD_CMDSTS_SUCCESS) {
289 dev_dbg(dev, "WQ disable failed: %#x\n", status);
293 wq->state = IDXD_WQ_DISABLED;
294 dev_dbg(dev, "WQ %d disabled\n", wq->id);
298 int idxd_wq_map_portal(struct idxd_wq *wq)
300 struct idxd_device *idxd = wq->idxd;
301 struct pci_dev *pdev = idxd->pdev;
302 struct device *dev = &pdev->dev;
303 resource_size_t start;
305 start = pci_resource_start(pdev, IDXD_WQ_BAR);
306 start = start + wq->id * IDXD_PORTAL_SIZE;
308 wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
311 dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
316 void idxd_wq_unmap_portal(struct idxd_wq *wq)
318 struct device *dev = &wq->idxd->pdev->dev;
320 devm_iounmap(dev, wq->dportal);
323 void idxd_wq_disable_cleanup(struct idxd_wq *wq)
325 struct idxd_device *idxd = wq->idxd;
326 struct device *dev = &idxd->pdev->dev;
329 lockdep_assert_held(&idxd->dev_lock);
330 memset(&wq->wqcfg, 0, sizeof(wq->wqcfg));
331 wq->type = IDXD_WQT_NONE;
336 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
337 memset(wq->name, 0, WQ_NAME_SIZE);
339 for (i = 0; i < 8; i++) {
340 wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
341 iowrite32(0, idxd->reg_base + wq_offset);
342 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
343 wq->id, i, wq_offset,
344 ioread32(idxd->reg_base + wq_offset));
348 /* Device control bits */
349 static inline bool idxd_is_enabled(struct idxd_device *idxd)
351 union gensts_reg gensts;
353 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
355 if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
360 static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout)
362 u32 sts, to = timeout;
364 lockdep_assert_held(&idxd->dev_lock);
365 sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
366 while (sts & IDXD_CMDSTS_ACTIVE && --to) {
368 sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
371 if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) {
372 dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__);
381 static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand)
383 union idxd_command_reg cmd;
387 lockdep_assert_held(&idxd->dev_lock);
388 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
392 memset(&cmd, 0, sizeof(cmd));
394 cmd.operand = operand;
395 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
396 __func__, cmd_code, operand);
397 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
402 int idxd_device_enable(struct idxd_device *idxd)
404 struct device *dev = &idxd->pdev->dev;
408 lockdep_assert_held(&idxd->dev_lock);
409 if (idxd_is_enabled(idxd)) {
410 dev_dbg(dev, "Device already enabled\n");
414 rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0);
417 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
421 /* If the command is successful or if the device was enabled */
422 if (status != IDXD_CMDSTS_SUCCESS &&
423 status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
424 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
428 idxd->state = IDXD_DEV_ENABLED;
432 int idxd_device_disable(struct idxd_device *idxd)
434 struct device *dev = &idxd->pdev->dev;
438 lockdep_assert_held(&idxd->dev_lock);
439 if (!idxd_is_enabled(idxd)) {
440 dev_dbg(dev, "Device is not enabled\n");
444 rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0);
447 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
451 /* If the command is successful or if the device was disabled */
452 if (status != IDXD_CMDSTS_SUCCESS &&
453 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
454 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
459 idxd->state = IDXD_DEV_CONF_READY;
463 int __idxd_device_reset(struct idxd_device *idxd)
468 rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0);
471 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
478 int idxd_device_reset(struct idxd_device *idxd)
483 spin_lock_irqsave(&idxd->dev_lock, flags);
484 rc = __idxd_device_reset(idxd);
485 spin_unlock_irqrestore(&idxd->dev_lock, flags);
489 /* Device configuration bits */
490 static void idxd_group_config_write(struct idxd_group *group)
492 struct idxd_device *idxd = group->idxd;
493 struct device *dev = &idxd->pdev->dev;
497 dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
500 for (i = 0; i < 4; i++) {
501 grpcfg_offset = idxd->grpcfg_offset +
502 group->id * 64 + i * sizeof(u64);
503 iowrite64(group->grpcfg.wqs[i],
504 idxd->reg_base + grpcfg_offset);
505 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
506 group->id, i, grpcfg_offset,
507 ioread64(idxd->reg_base + grpcfg_offset));
510 /* setup GRPENGCFG */
511 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
512 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
513 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
514 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
517 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
518 iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
519 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
520 group->id, grpcfg_offset,
521 ioread32(idxd->reg_base + grpcfg_offset));
524 static int idxd_groups_config_write(struct idxd_device *idxd)
527 union gencfg_reg reg;
529 struct device *dev = &idxd->pdev->dev;
531 /* Setup bandwidth token limit */
532 if (idxd->token_limit) {
533 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
534 reg.token_limit = idxd->token_limit;
535 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
538 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
539 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
541 for (i = 0; i < idxd->max_groups; i++) {
542 struct idxd_group *group = &idxd->groups[i];
544 idxd_group_config_write(group);
550 static int idxd_wq_config_write(struct idxd_wq *wq)
552 struct idxd_device *idxd = wq->idxd;
553 struct device *dev = &idxd->pdev->dev;
560 memset(&wq->wqcfg, 0, sizeof(union wqcfg));
563 wq->wqcfg.wq_size = wq->size;
566 dev_warn(dev, "Incorrect work queue size: 0\n");
571 wq->wqcfg.wq_thresh = wq->threshold;
574 wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL);
577 wq->wqcfg.priority = wq->priority;
580 wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
581 wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
583 dev_dbg(dev, "WQ %d CFGs\n", wq->id);
584 for (i = 0; i < 8; i++) {
585 wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
586 iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset);
587 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
588 wq->id, i, wq_offset,
589 ioread32(idxd->reg_base + wq_offset));
595 static int idxd_wqs_config_write(struct idxd_device *idxd)
599 for (i = 0; i < idxd->max_wqs; i++) {
600 struct idxd_wq *wq = &idxd->wqs[i];
602 rc = idxd_wq_config_write(wq);
610 static void idxd_group_flags_setup(struct idxd_device *idxd)
614 /* TC-A 0 and TC-B 1 should be defaults */
615 for (i = 0; i < idxd->max_groups; i++) {
616 struct idxd_group *group = &idxd->groups[i];
618 if (group->tc_a == -1)
619 group->tc_a = group->grpcfg.flags.tc_a = 0;
621 group->grpcfg.flags.tc_a = group->tc_a;
622 if (group->tc_b == -1)
623 group->tc_b = group->grpcfg.flags.tc_b = 1;
625 group->grpcfg.flags.tc_b = group->tc_b;
626 group->grpcfg.flags.use_token_limit = group->use_token_limit;
627 group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
628 if (group->tokens_allowed)
629 group->grpcfg.flags.tokens_allowed =
630 group->tokens_allowed;
632 group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
636 static int idxd_engines_setup(struct idxd_device *idxd)
639 struct idxd_engine *eng;
640 struct idxd_group *group;
642 for (i = 0; i < idxd->max_groups; i++) {
643 group = &idxd->groups[i];
644 group->grpcfg.engines = 0;
647 for (i = 0; i < idxd->max_engines; i++) {
648 eng = &idxd->engines[i];
654 group->grpcfg.engines |= BIT(eng->id);
664 static int idxd_wqs_setup(struct idxd_device *idxd)
667 struct idxd_group *group;
668 int i, j, configured = 0;
669 struct device *dev = &idxd->pdev->dev;
671 for (i = 0; i < idxd->max_groups; i++) {
672 group = &idxd->groups[i];
673 for (j = 0; j < 4; j++)
674 group->grpcfg.wqs[j] = 0;
677 for (i = 0; i < idxd->max_wqs; i++) {
686 if (!wq_dedicated(wq)) {
687 dev_warn(dev, "No shared workqueue support.\n");
691 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
701 int idxd_device_config(struct idxd_device *idxd)
705 lockdep_assert_held(&idxd->dev_lock);
706 rc = idxd_wqs_setup(idxd);
710 rc = idxd_engines_setup(idxd);
714 idxd_group_flags_setup(idxd);
716 rc = idxd_wqs_config_write(idxd);
720 rc = idxd_groups_config_write(idxd);