1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
6 #include <linux/sbitmap.h>
7 #include <linux/dmaengine.h>
8 #include <linux/percpu-rwsem.h>
9 #include <linux/wait.h>
10 #include <linux/cdev.h>
11 #include <linux/idr.h>
12 #include <linux/pci.h>
13 #include <linux/perf_event.h>
14 #include "registers.h"
16 #define IDXD_DRIVER_VERSION "1.00"
18 extern struct kmem_cache *idxd_desc_pool;
23 #define IDXD_REG_TIMEOUT 50
24 #define IDXD_DRAIN_TIMEOUT 5000
27 IDXD_TYPE_UNKNOWN = -1,
33 #define IDXD_NAME_SIZE 128
34 #define IDXD_PMU_EVENT_MAX 64
36 struct idxd_device_driver {
37 struct device_driver drv;
40 struct idxd_irq_entry {
41 struct idxd_device *idxd;
44 struct llist_head pending_llist;
45 struct list_head work_list;
47 * Lock to protect access between irq thread process descriptor
48 * and irq thread processing error descriptor.
54 struct device conf_dev;
55 struct idxd_device *idxd;
68 struct idxd_device *idxd;
70 struct perf_event *event_list[IDXD_PMU_EVENT_MAX];
73 DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX);
76 char name[IDXD_NAME_SIZE];
81 int n_event_categories;
83 bool per_counter_caps_supported;
84 unsigned long supported_event_categories;
86 unsigned long supported_filters;
89 struct hlist_node cpuhp_node;
92 #define IDXD_MAX_PRIORITY 0xf
100 WQ_FLAG_DEDICATED = 0,
101 WQ_FLAG_BLOCK_ON_FAULT,
117 #define IDXD_ALLOCATED_BATCH_SIZE 128U
118 #define WQ_NAME_SIZE 1024
119 #define WQ_TYPE_SIZE 10
123 IDXD_OP_NONBLOCK = 1,
126 enum idxd_complete_type {
127 IDXD_COMPLETE_NORMAL = 0,
129 IDXD_COMPLETE_DEV_FAIL,
132 struct idxd_dma_chan {
133 struct dma_chan chan;
138 void __iomem *portal;
139 struct percpu_ref wq_active;
140 struct completion wq_dead;
141 struct device conf_dev;
142 struct idxd_cdev *idxd_cdev;
143 struct wait_queue_head err_queue;
144 struct idxd_device *idxd;
146 enum idxd_wq_type type;
147 struct idxd_group *group;
149 struct mutex wq_lock; /* mutex for workqueue */
153 enum idxd_wq_state state;
156 u32 vec_ptr; /* interrupt steering */
157 struct dsa_hw_desc **hw_descs;
160 struct dsa_completion_record *compls;
161 struct iax_completion_record *iax_compls;
164 dma_addr_t compls_addr;
165 dma_addr_t compls_addr_raw;
167 struct idxd_desc **descs;
168 struct sbitmap_queue sbq;
169 struct idxd_dma_chan *idxd_chan;
170 char name[WQ_NAME_SIZE + 1];
177 struct device conf_dev;
179 struct idxd_group *group;
180 struct idxd_device *idxd;
183 /* shadow registers */
186 union gen_cap_reg gen_cap;
187 union wq_cap_reg wq_cap;
188 union group_cap_reg group_cap;
189 union engine_cap_reg engine_cap;
194 enum idxd_device_state {
195 IDXD_DEV_HALTED = -1,
196 IDXD_DEV_DISABLED = 0,
201 enum idxd_device_flag {
202 IDXD_FLAG_CONFIGURABLE = 0,
203 IDXD_FLAG_CMD_RUNNING,
204 IDXD_FLAG_PASID_ENABLED,
207 struct idxd_dma_dev {
208 struct idxd_device *idxd;
209 struct dma_device dma;
212 struct idxd_driver_data {
213 const char *name_prefix;
215 struct device_type *dev_type;
221 struct device conf_dev;
222 struct idxd_driver_data *data;
223 struct list_head list;
225 enum idxd_device_state state;
231 struct pci_dev *pdev;
232 void __iomem *reg_base;
234 spinlock_t dev_lock; /* spinlock for device */
235 spinlock_t cmd_lock; /* spinlock for device commands */
236 struct completion *cmd_done;
237 struct idxd_group **groups;
238 struct idxd_wq **wqs;
239 struct idxd_engine **engines;
241 struct iommu_sva *sva;
246 u32 msix_perm_offset;
259 int nr_tokens; /* non-reserved tokens */
260 unsigned int wqcfg_size;
262 union sw_err_reg sw_err;
263 wait_queue_head_t cmd_waitq;
265 struct idxd_irq_entry *irq_entries;
267 struct idxd_dma_dev *idxd_dma;
268 struct workqueue_struct *wq;
269 struct work_struct work;
273 struct idxd_pmu *idxd_pmu;
276 /* IDXD software descriptor */
279 struct dsa_hw_desc *hw;
280 struct iax_hw_desc *iax_hw;
284 struct dsa_completion_record *completion;
285 struct iax_completion_record *iax_completion;
287 dma_addr_t compl_dma;
288 struct dma_async_tx_descriptor txd;
289 struct llist_node llnode;
290 struct list_head list;
298 * This is software defined error for the completion status. We overload the error code
299 * that will never appear in completion status and only SWERR register.
301 enum idxd_completion_status {
302 IDXD_COMP_DESC_ABORT = 0xff,
305 #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
306 #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
308 extern struct bus_type dsa_bus_type;
309 extern struct bus_type iax_bus_type;
311 extern bool support_enqcmd;
312 extern struct ida idxd_ida;
313 extern struct device_type dsa_device_type;
314 extern struct device_type iax_device_type;
315 extern struct device_type idxd_wq_device_type;
316 extern struct device_type idxd_engine_device_type;
317 extern struct device_type idxd_group_device_type;
319 static inline bool is_dsa_dev(struct device *dev)
321 return dev->type == &dsa_device_type;
324 static inline bool is_iax_dev(struct device *dev)
326 return dev->type == &iax_device_type;
329 static inline bool is_idxd_dev(struct device *dev)
331 return is_dsa_dev(dev) || is_iax_dev(dev);
334 static inline bool is_idxd_wq_dev(struct device *dev)
336 return dev->type == &idxd_wq_device_type;
339 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
341 if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
346 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
348 return wq->type == IDXD_WQT_USER;
351 static inline bool wq_dedicated(struct idxd_wq *wq)
353 return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
356 static inline bool wq_shared(struct idxd_wq *wq)
358 return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
361 static inline bool device_pasid_enabled(struct idxd_device *idxd)
363 return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
366 static inline bool device_swq_supported(struct idxd_device *idxd)
368 return (support_enqcmd && device_pasid_enabled(idxd));
371 enum idxd_portal_prot {
372 IDXD_PORTAL_UNLIMITED = 0,
376 enum idxd_interrupt_type {
381 static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
383 return prot * 0x1000;
386 static inline int idxd_get_wq_portal_full_offset(int wq_id,
387 enum idxd_portal_prot prot)
389 return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
392 static inline void idxd_wq_get(struct idxd_wq *wq)
397 static inline void idxd_wq_put(struct idxd_wq *wq)
402 static inline int idxd_wq_refcount(struct idxd_wq *wq)
404 return wq->client_count;
407 int idxd_register_bus_type(void);
408 void idxd_unregister_bus_type(void);
409 int idxd_register_devices(struct idxd_device *idxd);
410 void idxd_unregister_devices(struct idxd_device *idxd);
411 int idxd_register_driver(void);
412 void idxd_unregister_driver(void);
413 void idxd_wqs_quiesce(struct idxd_device *idxd);
415 /* device interrupt control */
416 void idxd_msix_perm_setup(struct idxd_device *idxd);
417 void idxd_msix_perm_clear(struct idxd_device *idxd);
418 irqreturn_t idxd_misc_thread(int vec, void *data);
419 irqreturn_t idxd_wq_thread(int irq, void *data);
420 void idxd_mask_error_interrupts(struct idxd_device *idxd);
421 void idxd_unmask_error_interrupts(struct idxd_device *idxd);
422 void idxd_mask_msix_vectors(struct idxd_device *idxd);
423 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
424 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
427 int idxd_device_init_reset(struct idxd_device *idxd);
428 int idxd_device_enable(struct idxd_device *idxd);
429 int idxd_device_disable(struct idxd_device *idxd);
430 void idxd_device_reset(struct idxd_device *idxd);
431 void idxd_device_cleanup(struct idxd_device *idxd);
432 int idxd_device_config(struct idxd_device *idxd);
433 void idxd_device_wqs_clear_state(struct idxd_device *idxd);
434 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
435 int idxd_device_load_config(struct idxd_device *idxd);
436 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
437 enum idxd_interrupt_type irq_type);
438 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
439 enum idxd_interrupt_type irq_type);
441 /* work queue control */
442 void idxd_wqs_unmap_portal(struct idxd_device *idxd);
443 int idxd_wq_alloc_resources(struct idxd_wq *wq);
444 void idxd_wq_free_resources(struct idxd_wq *wq);
445 int idxd_wq_enable(struct idxd_wq *wq);
446 int idxd_wq_disable(struct idxd_wq *wq);
447 void idxd_wq_drain(struct idxd_wq *wq);
448 void idxd_wq_reset(struct idxd_wq *wq);
449 int idxd_wq_map_portal(struct idxd_wq *wq);
450 void idxd_wq_unmap_portal(struct idxd_wq *wq);
451 void idxd_wq_disable_cleanup(struct idxd_wq *wq);
452 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
453 int idxd_wq_disable_pasid(struct idxd_wq *wq);
454 void idxd_wq_quiesce(struct idxd_wq *wq);
455 int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
458 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
459 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
460 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
463 int idxd_register_dma_device(struct idxd_device *idxd);
464 void idxd_unregister_dma_device(struct idxd_device *idxd);
465 int idxd_register_dma_channel(struct idxd_wq *wq);
466 void idxd_unregister_dma_channel(struct idxd_wq *wq);
467 void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
468 void idxd_dma_complete_txd(struct idxd_desc *desc,
469 enum idxd_complete_type comp_type);
472 int idxd_cdev_register(void);
473 void idxd_cdev_remove(void);
474 int idxd_cdev_get_major(struct idxd_device *idxd);
475 int idxd_wq_add_cdev(struct idxd_wq *wq);
476 void idxd_wq_del_cdev(struct idxd_wq *wq);
479 #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
480 int perfmon_pmu_init(struct idxd_device *idxd);
481 void perfmon_pmu_remove(struct idxd_device *idxd);
482 void perfmon_counter_overflow(struct idxd_device *idxd);
483 void perfmon_init(void);
484 void perfmon_exit(void);
486 static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; }
487 static inline void perfmon_pmu_remove(struct idxd_device *idxd) {}
488 static inline void perfmon_counter_overflow(struct idxd_device *idxd) {}
489 static inline void perfmon_init(void) {}
490 static inline void perfmon_exit(void) {}
493 static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
495 idxd_dma_complete_txd(desc, reason);
496 idxd_free_desc(desc->wq, desc);