1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2017 IBM Corp.
3 #include <linux/sched/mm.h>
4 #include <linux/mutex.h>
5 #include <linux/mm_types.h>
6 #include <linux/mmu_context.h>
8 #include <asm/pnv-ocxl.h>
10 #include <misc/ocxl.h>
11 #include "ocxl_internal.h"
15 #define SPA_PASID_BITS 15
16 #define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1)
17 #define SPA_PE_MASK SPA_PASID_MAX
18 #define SPA_SPA_SIZE_LOG 22 /* Each SPA is 4 Mb */
20 #define SPA_CFG_SF (1ull << (63-0))
21 #define SPA_CFG_TA (1ull << (63-1))
22 #define SPA_CFG_HV (1ull << (63-3))
23 #define SPA_CFG_UV (1ull << (63-4))
24 #define SPA_CFG_XLAT_hpt (0ull << (63-6)) /* Hashed page table (HPT) mode */
25 #define SPA_CFG_XLAT_roh (2ull << (63-6)) /* Radix on HPT mode */
26 #define SPA_CFG_XLAT_ror (3ull << (63-6)) /* Radix on Radix mode */
27 #define SPA_CFG_PR (1ull << (63-49))
28 #define SPA_CFG_TC (1ull << (63-54))
29 #define SPA_CFG_DR (1ull << (63-59))
31 #define SPA_XSL_TF (1ull << (63-3)) /* Translation fault */
32 #define SPA_XSL_S (1ull << (63-38)) /* Store operation */
34 #define SPA_PE_VALID 0x80000000
39 /* callback to trigger when a translation fault occurs */
40 void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr);
41 /* opaque pointer to be passed to the above callback */
47 struct ocxl_process_element *spa_mem;
49 struct mutex spa_lock;
50 struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */
53 void __iomem *reg_dsisr;
54 void __iomem *reg_dar;
55 void __iomem *reg_tfc;
56 void __iomem *reg_pe_handle;
58 * The following field are used by the memory fault
59 * interrupt handler. We can only have one interrupt at a
60 * time. The NPU won't raise another interrupt until the
61 * previous one has been ack'd by writing to the TFC register
64 struct work_struct fault_work;
68 struct pe_data pe_data;
73 * A opencapi link can be used be by several PCI functions. We have
74 * one link per device slot.
76 * A linked list of opencapi links should suffice, as there's a
77 * limited number of opencapi slots on a system and lookup is only
78 * done when the device is probed
81 struct list_head list;
86 atomic_t irq_available;
90 static struct list_head links_list = LIST_HEAD_INIT(links_list);
91 static DEFINE_MUTEX(links_list_lock);
100 static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe)
104 *dsisr = in_be64(spa->reg_dsisr);
105 *dar = in_be64(spa->reg_dar);
106 reg = in_be64(spa->reg_pe_handle);
107 *pe = reg & SPA_PE_MASK;
110 static void ack_irq(struct spa *spa, enum xsl_response r)
114 /* continue is not supported */
117 else if (r == ADDRESS_ERROR)
120 WARN(1, "Invalid irq response %d\n", r);
123 trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe,
124 spa->xsl_fault.dsisr, spa->xsl_fault.dar, reg);
125 out_be64(spa->reg_tfc, reg);
129 static void xsl_fault_handler_bh(struct work_struct *fault_work)
132 unsigned long access, flags, inv_flags = 0;
134 struct xsl_fault *fault = container_of(fault_work, struct xsl_fault,
136 struct spa *spa = container_of(fault, struct spa, xsl_fault);
141 * We must release a reference on mm_users whenever exiting this
142 * function (taken in the memory fault interrupt handler)
144 rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr,
147 pr_debug("copro_handle_mm_fault failed: %d\n", rc);
148 if (fault->pe_data.xsl_err_cb) {
149 fault->pe_data.xsl_err_cb(
150 fault->pe_data.xsl_err_data,
151 fault->dar, fault->dsisr);
157 if (!radix_enabled()) {
159 * update_mmu_cache() will not have loaded the hash
160 * since current->trap is not a 0x400 or 0x300, so
161 * just call hash_page_mm() here.
163 access = _PAGE_PRESENT | _PAGE_READ;
164 if (fault->dsisr & SPA_XSL_S)
165 access |= _PAGE_WRITE;
167 if (get_region_id(fault->dar) != USER_REGION_ID)
168 access |= _PAGE_PRIVILEGED;
170 local_irq_save(flags);
171 hash_page_mm(fault->pe_data.mm, fault->dar, access, 0x300,
173 local_irq_restore(flags);
177 mmput(fault->pe_data.mm);
181 static irqreturn_t xsl_fault_handler(int irq, void *data)
183 struct ocxl_link *link = (struct ocxl_link *) data;
184 struct spa *spa = link->spa;
185 u64 dsisr, dar, pe_handle;
186 struct pe_data *pe_data;
187 struct ocxl_process_element *pe;
189 bool schedule = false;
191 read_irq(spa, &dsisr, &dar, &pe_handle);
192 trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1);
194 WARN_ON(pe_handle > SPA_PE_MASK);
195 pe = spa->spa_mem + pe_handle;
196 pid = be32_to_cpu(pe->pid);
197 /* We could be reading all null values here if the PE is being
198 * removed while an interrupt kicks in. It's not supposed to
199 * happen if the driver notified the AFU to terminate the
200 * PASID, and the AFU waited for pending operations before
201 * acknowledging. But even if it happens, we won't find a
202 * memory context below and fail silently, so it should be ok.
204 if (!(dsisr & SPA_XSL_TF)) {
205 WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr);
206 ack_irq(spa, ADDRESS_ERROR);
211 pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle);
214 * Could only happen if the driver didn't notify the
215 * AFU about PASID termination before removing the PE,
216 * or the AFU didn't wait for all memory access to
219 * Either way, we fail early, but we shouldn't log an
220 * error message, as it is a valid (if unexpected)
224 pr_debug("Unknown mm context for xsl interrupt\n");
225 ack_irq(spa, ADDRESS_ERROR);
231 * translation fault from a kernel context - an OpenCAPI
232 * device tried to access a bad kernel address
235 pr_warn("Unresolved OpenCAPI xsl fault in kernel context\n");
236 ack_irq(spa, ADDRESS_ERROR);
239 WARN_ON(pe_data->mm->context.id != pid);
241 if (mmget_not_zero(pe_data->mm)) {
242 spa->xsl_fault.pe = pe_handle;
243 spa->xsl_fault.dar = dar;
244 spa->xsl_fault.dsisr = dsisr;
245 spa->xsl_fault.pe_data = *pe_data;
247 /* mm_users count released by bottom half */
251 schedule_work(&spa->xsl_fault.fault_work);
253 ack_irq(spa, ADDRESS_ERROR);
257 static void unmap_irq_registers(struct spa *spa)
259 pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc,
263 static int map_irq_registers(struct pci_dev *dev, struct spa *spa)
265 return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar,
266 &spa->reg_tfc, &spa->reg_pe_handle);
269 static int setup_xsl_irq(struct pci_dev *dev, struct ocxl_link *link)
271 struct spa *spa = link->spa;
275 rc = pnv_ocxl_get_xsl_irq(dev, &hwirq);
279 rc = map_irq_registers(dev, spa);
283 spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x",
284 link->domain, link->bus, link->dev);
285 if (!spa->irq_name) {
286 dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n");
291 * At some point, we'll need to look into allowing a higher
292 * number of interrupts. Could we have an IRQ domain per link?
294 spa->virq = irq_create_mapping(NULL, hwirq);
297 "irq_create_mapping failed for translation interrupt\n");
302 dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq);
304 rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name,
308 "request_irq failed for translation interrupt: %d\n",
316 irq_dispose_mapping(spa->virq);
318 kfree(spa->irq_name);
320 unmap_irq_registers(spa);
324 static void release_xsl_irq(struct ocxl_link *link)
326 struct spa *spa = link->spa;
329 free_irq(spa->virq, link);
330 irq_dispose_mapping(spa->virq);
332 kfree(spa->irq_name);
333 unmap_irq_registers(spa);
336 static int alloc_spa(struct pci_dev *dev, struct ocxl_link *link)
340 spa = kzalloc(sizeof(struct spa), GFP_KERNEL);
344 mutex_init(&spa->spa_lock);
345 INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL);
346 INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh);
348 spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT;
349 spa->spa_mem = (struct ocxl_process_element *)
350 __get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order);
352 dev_err(&dev->dev, "Can't allocate Shared Process Area\n");
356 pr_debug("Allocated SPA for %x:%x:%x at %p\n", link->domain, link->bus,
357 link->dev, spa->spa_mem);
363 static void free_spa(struct ocxl_link *link)
365 struct spa *spa = link->spa;
367 pr_debug("Freeing SPA for %x:%x:%x\n", link->domain, link->bus,
370 if (spa && spa->spa_mem) {
371 free_pages((unsigned long) spa->spa_mem, spa->spa_order);
377 static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_link)
379 struct ocxl_link *link;
382 link = kzalloc(sizeof(struct ocxl_link), GFP_KERNEL);
386 kref_init(&link->ref);
387 link->domain = pci_domain_nr(dev->bus);
388 link->bus = dev->bus->number;
389 link->dev = PCI_SLOT(dev->devfn);
390 atomic_set(&link->irq_available, MAX_IRQ_PER_LINK);
392 rc = alloc_spa(dev, link);
396 rc = setup_xsl_irq(dev, link);
400 /* platform specific hook */
401 rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask,
402 &link->platform_data);
410 release_xsl_irq(link);
418 static void free_link(struct ocxl_link *link)
420 release_xsl_irq(link);
425 int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle)
428 struct ocxl_link *link;
430 mutex_lock(&links_list_lock);
431 list_for_each_entry(link, &links_list, list) {
432 /* The functions of a device all share the same link */
433 if (link->domain == pci_domain_nr(dev->bus) &&
434 link->bus == dev->bus->number &&
435 link->dev == PCI_SLOT(dev->devfn)) {
436 kref_get(&link->ref);
441 rc = alloc_link(dev, PE_mask, &link);
445 list_add(&link->list, &links_list);
448 mutex_unlock(&links_list_lock);
451 EXPORT_SYMBOL_GPL(ocxl_link_setup);
453 static void release_xsl(struct kref *ref)
455 struct ocxl_link *link = container_of(ref, struct ocxl_link, ref);
457 list_del(&link->list);
458 /* call platform code before releasing data */
459 pnv_ocxl_spa_release(link->platform_data);
463 void ocxl_link_release(struct pci_dev *dev, void *link_handle)
465 struct ocxl_link *link = (struct ocxl_link *) link_handle;
467 mutex_lock(&links_list_lock);
468 kref_put(&link->ref, release_xsl);
469 mutex_unlock(&links_list_lock);
471 EXPORT_SYMBOL_GPL(ocxl_link_release);
473 static u64 calculate_cfg_state(bool kernel)
478 if (mfspr(SPRN_LPCR) & LPCR_TC)
481 state |= SPA_CFG_XLAT_ror;
483 state |= SPA_CFG_XLAT_hpt;
486 if (mfmsr() & MSR_SF)
490 if (!test_tsk_thread_flag(current, TIF_32BIT))
496 int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
497 u64 amr, struct mm_struct *mm,
498 void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
501 struct ocxl_link *link = (struct ocxl_link *) link_handle;
502 struct spa *spa = link->spa;
503 struct ocxl_process_element *pe;
504 int pe_handle, rc = 0;
505 struct pe_data *pe_data;
507 BUILD_BUG_ON(sizeof(struct ocxl_process_element) != 128);
508 if (pasid > SPA_PASID_MAX)
511 mutex_lock(&spa->spa_lock);
512 pe_handle = pasid & SPA_PE_MASK;
513 pe = spa->spa_mem + pe_handle;
515 if (pe->software_state) {
520 pe_data = kmalloc(sizeof(*pe_data), GFP_KERNEL);
527 pe_data->xsl_err_cb = xsl_err_cb;
528 pe_data->xsl_err_data = xsl_err_data;
530 memset(pe, 0, sizeof(struct ocxl_process_element));
531 pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0));
532 pe->lpid = cpu_to_be32(mfspr(SPRN_LPID));
533 pe->pid = cpu_to_be32(pidr);
534 pe->tid = cpu_to_be32(tidr);
535 pe->amr = cpu_to_be64(amr);
536 pe->software_state = cpu_to_be32(SPA_PE_VALID);
539 * For user contexts, register a copro so that TLBIs are seen
540 * by the nest MMU. If we have a kernel context, TLBIs are
544 mm_context_add_copro(mm);
546 * Barrier is to make sure PE is visible in the SPA before it
547 * is used by the device. It also helps with the global TLBI
551 radix_tree_insert(&spa->pe_tree, pe_handle, pe_data);
554 * The mm must stay valid for as long as the device uses it. We
555 * lower the count when the context is removed from the SPA.
557 * We grab mm_count (and not mm_users), as we don't want to
558 * end up in a circular dependency if a process mmaps its
559 * mmio, therefore incrementing the file ref count when
560 * calling mmap(), and forgets to unmap before exiting. In
561 * that scenario, when the kernel handles the death of the
562 * process, the file is not cleaned because unmap was not
563 * called, and the mm wouldn't be freed because we would still
564 * have a reference on mm_users. Incrementing mm_count solves
569 trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr);
571 mutex_unlock(&spa->spa_lock);
574 EXPORT_SYMBOL_GPL(ocxl_link_add_pe);
576 int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
578 struct ocxl_link *link = (struct ocxl_link *) link_handle;
579 struct spa *spa = link->spa;
580 struct ocxl_process_element *pe;
583 if (pasid > SPA_PASID_MAX)
586 pe_handle = pasid & SPA_PE_MASK;
587 pe = spa->spa_mem + pe_handle;
589 mutex_lock(&spa->spa_lock);
591 pe->tid = cpu_to_be32(tid);
594 * The barrier makes sure the PE is updated
595 * before we clear the NPU context cache below, so that the
596 * old PE cannot be reloaded erroneously.
601 * hook to platform code
602 * On powerpc, the entry needs to be cleared from the context
605 rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle);
608 mutex_unlock(&spa->spa_lock);
612 int ocxl_link_remove_pe(void *link_handle, int pasid)
614 struct ocxl_link *link = (struct ocxl_link *) link_handle;
615 struct spa *spa = link->spa;
616 struct ocxl_process_element *pe;
617 struct pe_data *pe_data;
620 if (pasid > SPA_PASID_MAX)
624 * About synchronization with our memory fault handler:
626 * Before removing the PE, the driver is supposed to have
627 * notified the AFU, which should have cleaned up and make
628 * sure the PASID is no longer in use, including pending
629 * interrupts. However, there's no way to be sure...
631 * We clear the PE and remove the context from our radix
632 * tree. From that point on, any new interrupt for that
633 * context will fail silently, which is ok. As mentioned
634 * above, that's not expected, but it could happen if the
635 * driver or AFU didn't do the right thing.
637 * There could still be a bottom half running, but we don't
638 * need to wait/flush, as it is managing a reference count on
639 * the mm it reads from the radix tree.
641 pe_handle = pasid & SPA_PE_MASK;
642 pe = spa->spa_mem + pe_handle;
644 mutex_lock(&spa->spa_lock);
646 if (!(be32_to_cpu(pe->software_state) & SPA_PE_VALID)) {
651 trace_ocxl_context_remove(current->pid, spa->spa_mem, pasid,
652 be32_to_cpu(pe->pid), be32_to_cpu(pe->tid));
654 memset(pe, 0, sizeof(struct ocxl_process_element));
656 * The barrier makes sure the PE is removed from the SPA
657 * before we clear the NPU context cache below, so that the
658 * old PE cannot be reloaded erroneously.
663 * hook to platform code
664 * On powerpc, the entry needs to be cleared from the context
667 rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle);
670 pe_data = radix_tree_delete(&spa->pe_tree, pe_handle);
672 WARN(1, "Couldn't find pe data when removing PE\n");
675 mm_context_remove_copro(pe_data->mm);
678 kfree_rcu(pe_data, rcu);
681 mutex_unlock(&spa->spa_lock);
684 EXPORT_SYMBOL_GPL(ocxl_link_remove_pe);
686 int ocxl_link_irq_alloc(void *link_handle, int *hw_irq)
688 struct ocxl_link *link = (struct ocxl_link *) link_handle;
691 if (atomic_dec_if_positive(&link->irq_available) < 0)
694 irq = xive_native_alloc_irq();
696 atomic_inc(&link->irq_available);
703 EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc);
705 void ocxl_link_free_irq(void *link_handle, int hw_irq)
707 struct ocxl_link *link = (struct ocxl_link *) link_handle;
709 xive_native_free_irq(hw_irq);
710 atomic_inc(&link->irq_available);
712 EXPORT_SYMBOL_GPL(ocxl_link_free_irq);