1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
6 #include <linux/sbitmap.h>
7 #include <linux/dmaengine.h>
8 #include <linux/percpu-rwsem.h>
9 #include <linux/wait.h>
10 #include <linux/cdev.h>
11 #include <linux/idr.h>
12 #include <linux/pci.h>
13 #include <linux/ioasid.h>
14 #include <linux/perf_event.h>
15 #include <uapi/linux/idxd.h>
16 #include "registers.h"
18 #define IDXD_DRIVER_VERSION "1.00"
20 extern struct kmem_cache *idxd_desc_pool;
21 extern bool tc_override;
38 struct device conf_dev;
39 enum idxd_dev_type type;
42 #define IDXD_REG_TIMEOUT 50
43 #define IDXD_DRAIN_TIMEOUT 5000
46 IDXD_TYPE_UNKNOWN = -1,
52 #define IDXD_NAME_SIZE 128
53 #define IDXD_PMU_EVENT_MAX 64
55 #define IDXD_ENQCMDS_RETRIES 32
56 #define IDXD_ENQCMDS_MAX_RETRIES 64
58 struct idxd_device_driver {
60 enum idxd_dev_type *type;
61 int (*probe)(struct idxd_dev *idxd_dev);
62 void (*remove)(struct idxd_dev *idxd_dev);
63 struct device_driver drv;
66 extern struct idxd_device_driver dsa_drv;
67 extern struct idxd_device_driver idxd_drv;
68 extern struct idxd_device_driver idxd_dmaengine_drv;
69 extern struct idxd_device_driver idxd_user_drv;
71 #define INVALID_INT_HANDLE -1
72 struct idxd_irq_entry {
75 struct llist_head pending_llist;
76 struct list_head work_list;
78 * Lock to protect access between irq thread process descriptor
79 * and irq thread processing error descriptor.
87 struct idxd_dev idxd_dev;
88 struct idxd_device *idxd;
101 struct idxd_device *idxd;
103 struct perf_event *event_list[IDXD_PMU_EVENT_MAX];
106 DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX);
109 char name[IDXD_NAME_SIZE];
114 int n_event_categories;
116 bool per_counter_caps_supported;
117 unsigned long supported_event_categories;
119 unsigned long supported_filters;
122 struct hlist_node cpuhp_node;
125 #define IDXD_MAX_PRIORITY 0xf
128 IDXD_WQ_DISABLED = 0,
133 WQ_FLAG_DEDICATED = 0,
134 WQ_FLAG_BLOCK_ON_FAULT,
146 struct idxd_dev idxd_dev;
150 #define IDXD_ALLOCATED_BATCH_SIZE 128U
151 #define WQ_NAME_SIZE 1024
152 #define WQ_TYPE_SIZE 10
154 #define WQ_DEFAULT_QUEUE_DEPTH 16
155 #define WQ_DEFAULT_MAX_XFER SZ_2M
156 #define WQ_DEFAULT_MAX_BATCH 32
160 IDXD_OP_NONBLOCK = 1,
163 enum idxd_complete_type {
164 IDXD_COMPLETE_NORMAL = 0,
166 IDXD_COMPLETE_DEV_FAIL,
169 struct idxd_dma_chan {
170 struct dma_chan chan;
175 void __iomem *portal;
177 unsigned int enqcmds_retries;
178 struct percpu_ref wq_active;
179 struct completion wq_dead;
180 struct completion wq_resurrect;
181 struct idxd_dev idxd_dev;
182 struct idxd_cdev *idxd_cdev;
183 struct wait_queue_head err_queue;
184 struct idxd_device *idxd;
186 struct idxd_irq_entry ie;
187 enum idxd_wq_type type;
188 struct idxd_group *group;
190 struct mutex wq_lock; /* mutex for workqueue */
194 enum idxd_wq_state state;
197 struct dsa_hw_desc **hw_descs;
200 struct dsa_completion_record *compls;
201 struct iax_completion_record *iax_compls;
203 dma_addr_t compls_addr;
205 struct idxd_desc **descs;
206 struct sbitmap_queue sbq;
207 struct idxd_dma_chan *idxd_chan;
208 char name[WQ_NAME_SIZE + 1];
215 struct idxd_dev idxd_dev;
217 struct idxd_group *group;
218 struct idxd_device *idxd;
221 /* shadow registers */
224 union gen_cap_reg gen_cap;
225 union wq_cap_reg wq_cap;
226 union group_cap_reg group_cap;
227 union engine_cap_reg engine_cap;
232 enum idxd_device_state {
233 IDXD_DEV_HALTED = -1,
234 IDXD_DEV_DISABLED = 0,
238 enum idxd_device_flag {
239 IDXD_FLAG_CONFIGURABLE = 0,
240 IDXD_FLAG_CMD_RUNNING,
241 IDXD_FLAG_PASID_ENABLED,
244 struct idxd_dma_dev {
245 struct idxd_device *idxd;
246 struct dma_device dma;
249 struct idxd_driver_data {
250 const char *name_prefix;
252 struct device_type *dev_type;
258 struct idxd_dev idxd_dev;
259 struct idxd_driver_data *data;
260 struct list_head list;
262 enum idxd_device_state state;
267 struct idxd_irq_entry ie; /* misc irq, msix 0 */
269 struct pci_dev *pdev;
270 void __iomem *reg_base;
272 spinlock_t dev_lock; /* spinlock for device */
273 spinlock_t cmd_lock; /* spinlock for device commands */
274 struct completion *cmd_done;
275 struct idxd_group **groups;
276 struct idxd_wq **wqs;
277 struct idxd_engine **engines;
279 struct iommu_sva *sva;
284 bool request_int_handles;
286 u32 msix_perm_offset;
299 int nr_rdbufs; /* non-reserved read buffers */
300 unsigned int wqcfg_size;
302 union sw_err_reg sw_err;
303 wait_queue_head_t cmd_waitq;
305 struct idxd_dma_dev *idxd_dma;
306 struct workqueue_struct *wq;
307 struct work_struct work;
309 struct idxd_pmu *idxd_pmu;
312 /* IDXD software descriptor */
315 struct dsa_hw_desc *hw;
316 struct iax_hw_desc *iax_hw;
320 struct dsa_completion_record *completion;
321 struct iax_completion_record *iax_completion;
323 dma_addr_t compl_dma;
324 struct dma_async_tx_descriptor txd;
325 struct llist_node llnode;
326 struct list_head list;
333 * This is software defined error for the completion status. We overload the error code
334 * that will never appear in completion status and only SWERR register.
336 enum idxd_completion_status {
337 IDXD_COMP_DESC_ABORT = 0xff,
340 #define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev
341 #define wq_confdev(wq) &wq->idxd_dev.conf_dev
342 #define engine_confdev(engine) &engine->idxd_dev.conf_dev
343 #define group_confdev(group) &group->idxd_dev.conf_dev
344 #define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
346 #define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
347 #define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
348 #define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
350 static inline struct idxd_device *confdev_to_idxd(struct device *dev)
352 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
354 return idxd_dev_to_idxd(idxd_dev);
357 static inline struct idxd_wq *confdev_to_wq(struct device *dev)
359 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
361 return idxd_dev_to_wq(idxd_dev);
364 static inline struct idxd_engine *confdev_to_engine(struct device *dev)
366 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
368 return container_of(idxd_dev, struct idxd_engine, idxd_dev);
371 static inline struct idxd_group *confdev_to_group(struct device *dev)
373 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
375 return container_of(idxd_dev, struct idxd_group, idxd_dev);
378 static inline struct idxd_cdev *dev_to_cdev(struct device *dev)
380 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
382 return container_of(idxd_dev, struct idxd_cdev, idxd_dev);
385 static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
387 if (type >= IDXD_DEV_MAX_TYPE) {
388 idev->type = IDXD_DEV_NONE;
395 static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx)
397 return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie;
400 static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie)
402 return container_of(ie, struct idxd_wq, ie);
405 static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
407 return container_of(ie, struct idxd_device, ie);
410 extern struct bus_type dsa_bus_type;
412 extern bool support_enqcmd;
413 extern struct ida idxd_ida;
414 extern struct device_type dsa_device_type;
415 extern struct device_type iax_device_type;
416 extern struct device_type idxd_wq_device_type;
417 extern struct device_type idxd_engine_device_type;
418 extern struct device_type idxd_group_device_type;
420 static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
422 return idxd_dev->type == IDXD_DEV_DSA;
425 static inline bool is_iax_dev(struct idxd_dev *idxd_dev)
427 return idxd_dev->type == IDXD_DEV_IAX;
430 static inline bool is_idxd_dev(struct idxd_dev *idxd_dev)
432 return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev);
435 static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev)
437 return idxd_dev->type == IDXD_DEV_WQ;
440 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
442 if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
447 static inline bool is_idxd_wq_user(struct idxd_wq *wq)
449 return wq->type == IDXD_WQT_USER;
452 static inline bool is_idxd_wq_kernel(struct idxd_wq *wq)
454 return wq->type == IDXD_WQT_KERNEL;
457 static inline bool wq_dedicated(struct idxd_wq *wq)
459 return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
462 static inline bool wq_shared(struct idxd_wq *wq)
464 return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
467 static inline bool device_pasid_enabled(struct idxd_device *idxd)
469 return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
472 static inline bool device_swq_supported(struct idxd_device *idxd)
474 return (support_enqcmd && device_pasid_enabled(idxd));
477 enum idxd_portal_prot {
478 IDXD_PORTAL_UNLIMITED = 0,
482 enum idxd_interrupt_type {
487 static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
489 return prot * 0x1000;
492 static inline int idxd_get_wq_portal_full_offset(int wq_id,
493 enum idxd_portal_prot prot)
495 return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
498 #define IDXD_PORTAL_MASK (PAGE_SIZE - 1)
501 * Even though this function can be accessed by multiple threads, it is safe to use.
502 * At worst the address gets used more than once before it gets incremented. We don't
503 * hit a threshold until iops becomes many million times a second. So the occasional
504 * reuse of the same address is tolerable compare to using an atomic variable. This is
505 * safe on a system that has atomic load/store for 32bit integers. Given that this is an
506 * Intel iEP device, that should not be a problem.
508 static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq)
510 int ofs = wq->portal_offset;
512 wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK;
513 return wq->portal + ofs;
516 static inline void idxd_wq_get(struct idxd_wq *wq)
521 static inline void idxd_wq_put(struct idxd_wq *wq)
526 static inline int idxd_wq_refcount(struct idxd_wq *wq)
528 return wq->client_count;
531 int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
532 struct module *module, const char *mod_name);
533 #define idxd_driver_register(driver) \
534 __idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
536 void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);
538 #define module_idxd_driver(__idxd_driver) \
539 module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
541 int idxd_register_bus_type(void);
542 void idxd_unregister_bus_type(void);
543 int idxd_register_devices(struct idxd_device *idxd);
544 void idxd_unregister_devices(struct idxd_device *idxd);
545 int idxd_register_driver(void);
546 void idxd_unregister_driver(void);
547 void idxd_wqs_quiesce(struct idxd_device *idxd);
548 bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
550 /* device interrupt control */
551 irqreturn_t idxd_misc_thread(int vec, void *data);
552 irqreturn_t idxd_wq_thread(int irq, void *data);
553 void idxd_mask_error_interrupts(struct idxd_device *idxd);
554 void idxd_unmask_error_interrupts(struct idxd_device *idxd);
557 int idxd_register_idxd_drv(void);
558 void idxd_unregister_idxd_drv(void);
559 int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
560 void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
561 int drv_enable_wq(struct idxd_wq *wq);
562 void drv_disable_wq(struct idxd_wq *wq);
563 int idxd_device_init_reset(struct idxd_device *idxd);
564 int idxd_device_enable(struct idxd_device *idxd);
565 int idxd_device_disable(struct idxd_device *idxd);
566 void idxd_device_reset(struct idxd_device *idxd);
567 void idxd_device_clear_state(struct idxd_device *idxd);
568 int idxd_device_config(struct idxd_device *idxd);
569 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
570 int idxd_device_load_config(struct idxd_device *idxd);
571 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
572 enum idxd_interrupt_type irq_type);
573 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
574 enum idxd_interrupt_type irq_type);
576 /* work queue control */
577 void idxd_wqs_unmap_portal(struct idxd_device *idxd);
578 int idxd_wq_alloc_resources(struct idxd_wq *wq);
579 void idxd_wq_free_resources(struct idxd_wq *wq);
580 int idxd_wq_enable(struct idxd_wq *wq);
581 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config);
582 void idxd_wq_drain(struct idxd_wq *wq);
583 void idxd_wq_reset(struct idxd_wq *wq);
584 int idxd_wq_map_portal(struct idxd_wq *wq);
585 void idxd_wq_unmap_portal(struct idxd_wq *wq);
586 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
587 int idxd_wq_disable_pasid(struct idxd_wq *wq);
588 void __idxd_wq_quiesce(struct idxd_wq *wq);
589 void idxd_wq_quiesce(struct idxd_wq *wq);
590 int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
591 void idxd_wq_free_irq(struct idxd_wq *wq);
592 int idxd_wq_request_irq(struct idxd_wq *wq);
595 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
596 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
597 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
598 int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
601 int idxd_register_dma_device(struct idxd_device *idxd);
602 void idxd_unregister_dma_device(struct idxd_device *idxd);
603 int idxd_register_dma_channel(struct idxd_wq *wq);
604 void idxd_unregister_dma_channel(struct idxd_wq *wq);
605 void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
606 void idxd_dma_complete_txd(struct idxd_desc *desc,
607 enum idxd_complete_type comp_type, bool free_desc);
610 int idxd_cdev_register(void);
611 void idxd_cdev_remove(void);
612 int idxd_cdev_get_major(struct idxd_device *idxd);
613 int idxd_wq_add_cdev(struct idxd_wq *wq);
614 void idxd_wq_del_cdev(struct idxd_wq *wq);
617 #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
618 int perfmon_pmu_init(struct idxd_device *idxd);
619 void perfmon_pmu_remove(struct idxd_device *idxd);
620 void perfmon_counter_overflow(struct idxd_device *idxd);
621 void perfmon_init(void);
622 void perfmon_exit(void);
624 static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; }
625 static inline void perfmon_pmu_remove(struct idxd_device *idxd) {}
626 static inline void perfmon_counter_overflow(struct idxd_device *idxd) {}
627 static inline void perfmon_init(void) {}
628 static inline void perfmon_exit(void) {}