1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2015 IBM Corp.
6 #include <linux/spinlock.h>
7 #include <linux/uaccess.h>
8 #include <linux/delay.h>
14 #define CXL_ERROR_DETECTED_EVENT 1
15 #define CXL_SLOT_RESET_EVENT 2
16 #define CXL_RESUME_EVENT 3
18 static void pci_error_handlers(struct cxl_afu *afu,
20 pci_channel_state_t state)
22 struct pci_dev *afu_dev;
27 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
31 switch (bus_error_event) {
32 case CXL_ERROR_DETECTED_EVENT:
33 afu_dev->error_state = state;
35 if (afu_dev->driver->err_handler &&
36 afu_dev->driver->err_handler->error_detected)
37 afu_dev->driver->err_handler->error_detected(afu_dev, state);
39 case CXL_SLOT_RESET_EVENT:
40 afu_dev->error_state = state;
42 if (afu_dev->driver->err_handler &&
43 afu_dev->driver->err_handler->slot_reset)
44 afu_dev->driver->err_handler->slot_reset(afu_dev);
46 case CXL_RESUME_EVENT:
47 if (afu_dev->driver->err_handler &&
48 afu_dev->driver->err_handler->resume)
49 afu_dev->driver->err_handler->resume(afu_dev);
55 static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
58 pr_devel("in %s\n", __func__);
59 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
61 return cxl_ops->ack_irq(ctx, 0, errstat);
64 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
65 void *buf, size_t len)
67 unsigned int entries, mod;
68 unsigned long **vpd_buf = NULL;
70 int rc = 0, i, tocopy;
76 /* number of entries in the list */
77 entries = len / SG_BUFFER_SIZE;
78 mod = len % SG_BUFFER_SIZE;
82 if (entries > SG_MAX_ENTRIES) {
83 entries = SG_MAX_ENTRIES;
84 len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
88 vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL);
92 le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
98 for (i = 0; i < entries; i++) {
99 vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
104 le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
105 le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
106 if ((i == (entries - 1)) && mod)
107 le[i].len = cpu_to_be64(mod);
111 rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
112 virt_to_phys(le), entries, &out);
114 rc = cxl_h_collect_vpd(afu->guest->handle, 0,
115 virt_to_phys(le), entries, &out);
116 pr_devel("length of available (entries: %i), vpd: %#llx\n",
121 * hcall returns in 'out' the size of available VPDs.
122 * It fills the buffer with as much data as possible.
128 for (i = 0; i < entries; i++) {
129 if (len < SG_BUFFER_SIZE)
132 tocopy = SG_BUFFER_SIZE;
133 memcpy(buf, vpd_buf[i], tocopy);
140 for (i = 0; i < entries; i++) {
142 free_page((unsigned long) vpd_buf[i]);
144 free_page((unsigned long) le);
150 static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
152 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
155 static irqreturn_t guest_psl_irq(int irq, void *data)
157 struct cxl_context *ctx = data;
158 struct cxl_irq_info irq_info;
161 pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
162 rc = guest_get_irq_info(ctx, &irq_info);
164 WARN(1, "Unable to get IRQ info: %i\n", rc);
168 rc = cxl_irq_psl8(irq, ctx, &irq_info);
172 static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
180 rc = cxl_h_read_error_state(afu->guest->handle, &state);
182 WARN_ON(state != H_STATE_NORMAL &&
183 state != H_STATE_DISABLE &&
184 state != H_STATE_TEMP_UNAVAILABLE &&
185 state != H_STATE_PERM_UNAVAILABLE);
186 *state_out = state & 0xffffffff;
191 static irqreturn_t guest_slice_irq_err(int irq, void *data)
193 struct cxl_afu *afu = data;
195 u64 serr, afu_error, dsisr;
197 rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
199 dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
202 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
203 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
204 cxl_afu_decode_psl_serr(afu, serr);
205 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
206 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
208 rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
210 dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
217 static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
220 struct irq_avail *cur;
222 for (i = 0; i < adapter->guest->irq_nranges; i++) {
223 cur = &adapter->guest->irq_avail[i];
224 n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
226 if (n < cur->range) {
227 bitmap_set(cur->bitmap, n, len);
228 *irq = cur->offset + n;
229 pr_devel("guest: allocate IRQs %#x->%#x\n",
230 *irq, *irq + len - 1);
238 static int irq_free_range(struct cxl *adapter, int irq, int len)
241 struct irq_avail *cur;
246 for (i = 0; i < adapter->guest->irq_nranges; i++) {
247 cur = &adapter->guest->irq_avail[i];
248 if (irq >= cur->offset &&
249 (irq + len) <= (cur->offset + cur->range)) {
250 n = irq - cur->offset;
251 bitmap_clear(cur->bitmap, n, len);
252 pr_devel("guest: release IRQs %#x->%#x\n",
260 static int guest_reset(struct cxl *adapter)
262 struct cxl_afu *afu = NULL;
265 pr_devel("Adapter reset request\n");
266 spin_lock(&adapter->afu_list_lock);
267 for (i = 0; i < adapter->slices; i++) {
268 if ((afu = adapter->afu[i])) {
269 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
270 pci_channel_io_frozen);
271 cxl_context_detach_all(afu);
275 rc = cxl_h_reset_adapter(adapter->guest->handle);
276 for (i = 0; i < adapter->slices; i++) {
277 if (!rc && (afu = adapter->afu[i])) {
278 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
279 pci_channel_io_normal);
280 pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
283 spin_unlock(&adapter->afu_list_lock);
287 static int guest_alloc_one_irq(struct cxl *adapter)
291 spin_lock(&adapter->guest->irq_alloc_lock);
292 if (irq_alloc_range(adapter, 1, &irq))
294 spin_unlock(&adapter->guest->irq_alloc_lock);
298 static void guest_release_one_irq(struct cxl *adapter, int irq)
300 spin_lock(&adapter->guest->irq_alloc_lock);
301 irq_free_range(adapter, irq, 1);
302 spin_unlock(&adapter->guest->irq_alloc_lock);
305 static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
306 struct cxl *adapter, unsigned int num)
310 memset(irqs, 0, sizeof(struct cxl_irq_ranges));
312 spin_lock(&adapter->guest->irq_alloc_lock);
313 for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
316 if (irq_alloc_range(adapter, try, &irq) == 0)
322 irqs->offset[i] = irq;
323 irqs->range[i] = try;
328 spin_unlock(&adapter->guest->irq_alloc_lock);
332 for (i = 0; i < CXL_IRQ_RANGES; i++)
333 irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
334 spin_unlock(&adapter->guest->irq_alloc_lock);
338 static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
343 spin_lock(&adapter->guest->irq_alloc_lock);
344 for (i = 0; i < CXL_IRQ_RANGES; i++)
345 irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
346 spin_unlock(&adapter->guest->irq_alloc_lock);
349 static int guest_register_serr_irq(struct cxl_afu *afu)
351 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
352 dev_name(&afu->dev));
353 if (!afu->err_irq_name)
356 if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
357 guest_slice_irq_err, afu, afu->err_irq_name))) {
358 kfree(afu->err_irq_name);
359 afu->err_irq_name = NULL;
366 static void guest_release_serr_irq(struct cxl_afu *afu)
368 cxl_unmap_irq(afu->serr_virq, afu);
369 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
370 kfree(afu->err_irq_name);
373 static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
375 return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
376 tfc >> 32, (psl_reset_mask != 0));
379 static void disable_afu_irqs(struct cxl_context *ctx)
381 irq_hw_number_t hwirq;
385 pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
386 for (r = 0; r < CXL_IRQ_RANGES; r++) {
387 hwirq = ctx->irqs.offset[r];
388 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
389 virq = irq_find_mapping(NULL, hwirq);
395 static void enable_afu_irqs(struct cxl_context *ctx)
397 irq_hw_number_t hwirq;
401 pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
402 for (r = 0; r < CXL_IRQ_RANGES; r++) {
403 hwirq = ctx->irqs.offset[r];
404 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
405 virq = irq_find_mapping(NULL, hwirq);
411 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
412 u64 offset, u64 *val)
418 if (afu->crs_len < sz)
421 if (unlikely(offset >= afu->crs_len))
424 cr = get_zeroed_page(GFP_KERNEL);
428 rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
429 virt_to_phys((void *)cr), sz);
439 *val = in_le16((u16 *)cr);
442 *val = in_le32((unsigned *)cr);
445 *val = in_le64((u64 *)cr);
455 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
461 rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
467 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
473 rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
479 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
485 rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
491 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
494 return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
497 static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
499 /* config record is not writable from guest */
503 static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
505 /* config record is not writable from guest */
509 static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
511 /* config record is not writable from guest */
515 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
517 struct cxl_process_element_hcall *elem;
518 struct cxl *adapter = ctx->afu->adapter;
519 const struct cred *cred;
522 u64 mmio_addr, mmio_size;
525 /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
526 if (!(elem = (struct cxl_process_element_hcall *)
527 get_zeroed_page(GFP_KERNEL)))
530 elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
533 flags |= CXL_PE_TRANSLATION_ENABLED;
534 flags |= CXL_PE_PRIVILEGED_PROCESS;
535 if (mfmsr() & MSR_SF)
536 flags |= CXL_PE_64_BIT;
539 flags |= CXL_PE_PROBLEM_STATE;
540 flags |= CXL_PE_TRANSLATION_ENABLED;
541 if (!test_tsk_thread_flag(current, TIF_32BIT))
542 flags |= CXL_PE_64_BIT;
543 cred = get_current_cred();
544 if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
545 flags |= CXL_PE_PRIVILEGED_PROCESS;
548 elem->flags = cpu_to_be64(flags);
549 elem->common.tid = cpu_to_be32(0); /* Unused */
550 elem->common.pid = cpu_to_be32(pid);
551 elem->common.csrp = cpu_to_be64(0); /* disable */
552 elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */
553 elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */
555 cxl_prefault(ctx, wed);
557 elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
558 elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
561 * Ensure we have at least one interrupt allocated to take faults for
562 * kernel contexts that may not have allocated any AFU IRQs at all:
564 if (ctx->irqs.range[0] == 0) {
565 rc = afu_register_irqs(ctx, 0);
570 for (r = 0; r < CXL_IRQ_RANGES; r++) {
571 for (i = 0; i < ctx->irqs.range[r]; i++) {
572 if (r == 0 && i == 0) {
573 elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
575 idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
576 elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
580 elem->common.amr = cpu_to_be64(amr);
581 elem->common.wed = cpu_to_be64(wed);
583 disable_afu_irqs(ctx);
585 rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
586 &ctx->process_token, &mmio_addr, &mmio_size);
587 if (rc == H_SUCCESS) {
588 if (ctx->master || !ctx->afu->pp_psa) {
589 ctx->psn_phys = ctx->afu->psn_phys;
590 ctx->psn_size = ctx->afu->adapter->ps_size;
592 ctx->psn_phys = mmio_addr;
593 ctx->psn_size = mmio_size;
595 if (ctx->afu->pp_psa && mmio_size &&
596 ctx->afu->pp_size == 0) {
598 * There's no property in the device tree to read the
599 * pp_size. We only find out at the 1st attach.
600 * Compared to bare-metal, it is too late and we
601 * should really lock here. However, on powerVM,
602 * pp_size is really only used to display in /sys.
603 * Being discussed with pHyp for their next release.
605 ctx->afu->pp_size = mmio_size;
607 /* from PAPR: process element is bytes 4-7 of process token */
608 ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
609 pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
610 ctx->pe, ctx->external_pe, ctx->psn_size);
611 ctx->pe_inserted = true;
612 enable_afu_irqs(ctx);
616 free_page((u64)elem);
620 static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
622 pr_devel("in %s\n", __func__);
624 ctx->kernel = kernel;
625 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
626 return attach_afu_directed(ctx, wed, amr);
628 /* dedicated mode not supported on FW840 */
633 static int detach_afu_directed(struct cxl_context *ctx)
635 if (!ctx->pe_inserted)
637 if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
642 static int guest_detach_process(struct cxl_context *ctx)
644 pr_devel("in %s\n", __func__);
645 trace_cxl_detach(ctx);
647 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
650 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
651 return detach_afu_directed(ctx);
656 static void guest_release_afu(struct device *dev)
658 struct cxl_afu *afu = to_cxl_afu(dev);
660 pr_devel("%s\n", __func__);
662 idr_destroy(&afu->contexts_idr);
668 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
670 return guest_collect_vpd(NULL, afu, buf, len);
673 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
674 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
675 loff_t off, size_t count)
680 tbuf = (void *) get_zeroed_page(GFP_KERNEL);
684 rc = cxl_h_get_afu_err(afu->guest->handle,
691 if (count > ERR_BUFF_MAX_COPY_SIZE)
692 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
693 memcpy(buf, tbuf, count);
695 free_page((u64)tbuf);
700 static int guest_afu_check_and_enable(struct cxl_afu *afu)
705 static bool guest_support_attributes(const char *attr_name,
709 case CXL_ADAPTER_ATTRS:
710 if ((strcmp(attr_name, "base_image") == 0) ||
711 (strcmp(attr_name, "load_image_on_perst") == 0) ||
712 (strcmp(attr_name, "perst_reloads_same_image") == 0) ||
713 (strcmp(attr_name, "image_loaded") == 0))
716 case CXL_AFU_MASTER_ATTRS:
717 if ((strcmp(attr_name, "pp_mmio_off") == 0))
729 static int activate_afu_directed(struct cxl_afu *afu)
733 dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
735 afu->current_mode = CXL_MODE_DIRECTED;
737 afu->num_procs = afu->max_procs_virtualised;
739 if ((rc = cxl_chardev_m_afu_add(afu)))
742 if ((rc = cxl_sysfs_afu_m_add(afu)))
745 if ((rc = cxl_chardev_s_afu_add(afu)))
750 cxl_sysfs_afu_m_remove(afu);
752 cxl_chardev_afu_remove(afu);
756 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
760 if (!(mode & afu->modes_supported))
763 if (mode == CXL_MODE_DIRECTED)
764 return activate_afu_directed(afu);
766 if (mode == CXL_MODE_DEDICATED)
767 dev_err(&afu->dev, "Dedicated mode not supported\n");
772 static int deactivate_afu_directed(struct cxl_afu *afu)
774 dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
776 afu->current_mode = 0;
779 cxl_sysfs_afu_m_remove(afu);
780 cxl_chardev_afu_remove(afu);
782 cxl_ops->afu_reset(afu);
787 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
791 if (!(mode & afu->modes_supported))
794 if (mode == CXL_MODE_DIRECTED)
795 return deactivate_afu_directed(afu);
799 static int guest_afu_reset(struct cxl_afu *afu)
801 pr_devel("AFU(%d) reset request\n", afu->slice);
802 return cxl_h_reset_afu(afu->guest->handle);
805 static int guest_map_slice_regs(struct cxl_afu *afu)
807 if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
808 dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
815 static void guest_unmap_slice_regs(struct cxl_afu *afu)
818 iounmap(afu->p2n_mmio);
821 static int afu_update_state(struct cxl_afu *afu)
825 rc = afu_read_error_state(afu, &cur_state);
829 if (afu->guest->previous_state == cur_state)
832 pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state);
836 afu->guest->previous_state = cur_state;
839 case H_STATE_DISABLE:
840 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
841 pci_channel_io_frozen);
843 cxl_context_detach_all(afu);
844 if ((rc = cxl_ops->afu_reset(afu)))
845 pr_devel("reset hcall failed %d\n", rc);
847 rc = afu_read_error_state(afu, &cur_state);
848 if (!rc && cur_state == H_STATE_NORMAL) {
849 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
850 pci_channel_io_normal);
851 pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
853 afu->guest->previous_state = 0;
856 case H_STATE_TEMP_UNAVAILABLE:
857 afu->guest->previous_state = cur_state;
860 case H_STATE_PERM_UNAVAILABLE:
861 dev_err(&afu->dev, "AFU is in permanent error state\n");
862 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
863 pci_channel_io_perm_failure);
864 afu->guest->previous_state = cur_state;
868 pr_err("Unexpected AFU(%d) error state: %#x\n",
869 afu->slice, cur_state);
876 static void afu_handle_errstate(struct work_struct *work)
878 struct cxl_afu_guest *afu_guest =
879 container_of(to_delayed_work(work), struct cxl_afu_guest, work_err);
881 if (!afu_update_state(afu_guest->parent) &&
882 afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
885 if (afu_guest->handle_err)
886 schedule_delayed_work(&afu_guest->work_err,
887 msecs_to_jiffies(3000));
890 static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
894 if (afu && (!afu_read_error_state(afu, &state))) {
895 if (state == H_STATE_NORMAL)
902 static int afu_properties_look_ok(struct cxl_afu *afu)
904 if (afu->pp_irqs < 0) {
905 dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
909 if (afu->max_procs_virtualised < 1) {
910 dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
917 int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
923 pr_devel("in %s - AFU(%d)\n", __func__, slice);
924 if (!(afu = cxl_alloc_afu(adapter, slice)))
927 if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
932 if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
933 adapter->adapter_num,
939 if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
942 if ((rc = cxl_ops->afu_reset(afu)))
945 if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
948 if ((rc = afu_properties_look_ok(afu)))
951 if ((rc = guest_map_slice_regs(afu)))
954 if ((rc = guest_register_serr_irq(afu)))
958 * After we call this function we must not free the afu directly, even
959 * if it returns an error!
961 if ((rc = cxl_register_afu(afu)))
964 if ((rc = cxl_sysfs_afu_add(afu)))
968 * pHyp doesn't expose the programming models supported by the
969 * AFU. pHyp currently only supports directed mode. If it adds
970 * dedicated mode later, this version of cxl has no way to
971 * detect it. So we'll initialize the driver, but the first
973 * Being discussed with pHyp to do better (likely new property)
975 if (afu->max_procs_virtualised == 1)
976 afu->modes_supported = CXL_MODE_DEDICATED;
978 afu->modes_supported = CXL_MODE_DIRECTED;
980 if ((rc = cxl_afu_select_best_mode(afu)))
983 adapter->afu[afu->slice] = afu;
988 * wake up the cpu periodically to check the state
989 * of the AFU using "afu" stored in the guest structure.
991 afu->guest->parent = afu;
992 afu->guest->handle_err = true;
993 INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate);
994 schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000));
996 if ((rc = cxl_pci_vphb_add(afu)))
997 dev_info(&afu->dev, "Can't register vPHB\n");
1002 cxl_sysfs_afu_remove(afu);
1004 device_unregister(&afu->dev);
1006 guest_release_serr_irq(afu);
1008 guest_unmap_slice_regs(afu);
1017 void cxl_guest_remove_afu(struct cxl_afu *afu)
1022 /* flush and stop pending job */
1023 afu->guest->handle_err = false;
1024 flush_delayed_work(&afu->guest->work_err);
1026 cxl_pci_vphb_remove(afu);
1027 cxl_sysfs_afu_remove(afu);
1029 spin_lock(&afu->adapter->afu_list_lock);
1030 afu->adapter->afu[afu->slice] = NULL;
1031 spin_unlock(&afu->adapter->afu_list_lock);
1033 cxl_context_detach_all(afu);
1034 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1035 guest_release_serr_irq(afu);
1036 guest_unmap_slice_regs(afu);
1038 device_unregister(&afu->dev);
1041 static void free_adapter(struct cxl *adapter)
1043 struct irq_avail *cur;
1046 if (adapter->guest) {
1047 if (adapter->guest->irq_avail) {
1048 for (i = 0; i < adapter->guest->irq_nranges; i++) {
1049 cur = &adapter->guest->irq_avail[i];
1052 kfree(adapter->guest->irq_avail);
1054 kfree(adapter->guest->status);
1055 kfree(adapter->guest);
1057 cxl_remove_adapter_nr(adapter);
1061 static int properties_look_ok(struct cxl *adapter)
1063 /* The absence of this property means that the operational
1064 * status is unknown or okay
1066 if (strlen(adapter->guest->status) &&
1067 strcmp(adapter->guest->status, "okay")) {
1068 pr_err("ABORTING:Bad operational status of the device\n");
1075 ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
1077 return guest_collect_vpd(adapter, NULL, buf, len);
1080 void cxl_guest_remove_adapter(struct cxl *adapter)
1082 pr_devel("in %s\n", __func__);
1084 cxl_sysfs_adapter_remove(adapter);
1086 cxl_guest_remove_chardev(adapter);
1087 device_unregister(&adapter->dev);
1090 static void release_adapter(struct device *dev)
1092 free_adapter(to_cxl_adapter(dev));
1095 struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
1097 struct cxl *adapter;
1101 if (!(adapter = cxl_alloc_adapter()))
1102 return ERR_PTR(-ENOMEM);
1104 if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
1105 free_adapter(adapter);
1106 return ERR_PTR(-ENOMEM);
1109 adapter->slices = 0;
1110 adapter->guest->pdev = pdev;
1111 adapter->dev.parent = &pdev->dev;
1112 adapter->dev.release = release_adapter;
1113 dev_set_drvdata(&pdev->dev, adapter);
1116 * Hypervisor controls PSL timebase initialization (p1 register).
1117 * On FW840, PSL is initialized.
1119 adapter->psl_timebase_synced = true;
1121 if ((rc = cxl_of_read_adapter_handle(adapter, np)))
1124 if ((rc = cxl_of_read_adapter_properties(adapter, np)))
1127 if ((rc = properties_look_ok(adapter)))
1130 if ((rc = cxl_guest_add_chardev(adapter)))
1134 * After we call this function we must not free the adapter directly,
1135 * even if it returns an error!
1137 if ((rc = cxl_register_adapter(adapter)))
1140 if ((rc = cxl_sysfs_adapter_add(adapter)))
1143 /* release the context lock as the adapter is configured */
1144 cxl_adapter_context_unlock(adapter);
1149 device_unregister(&adapter->dev);
1151 cxl_guest_remove_chardev(adapter);
1154 free_adapter(adapter);
1158 void cxl_guest_reload_module(struct cxl *adapter)
1160 struct platform_device *pdev;
1162 pdev = adapter->guest->pdev;
1163 cxl_guest_remove_adapter(adapter);
1168 const struct cxl_backend_ops cxl_guest_ops = {
1169 .module = THIS_MODULE,
1170 .adapter_reset = guest_reset,
1171 .alloc_one_irq = guest_alloc_one_irq,
1172 .release_one_irq = guest_release_one_irq,
1173 .alloc_irq_ranges = guest_alloc_irq_ranges,
1174 .release_irq_ranges = guest_release_irq_ranges,
1176 .handle_psl_slice_error = guest_handle_psl_slice_error,
1177 .psl_interrupt = guest_psl_irq,
1178 .ack_irq = guest_ack_irq,
1179 .attach_process = guest_attach_process,
1180 .detach_process = guest_detach_process,
1181 .update_ivtes = NULL,
1182 .support_attributes = guest_support_attributes,
1183 .link_ok = guest_link_ok,
1184 .release_afu = guest_release_afu,
1185 .afu_read_err_buffer = guest_afu_read_err_buffer,
1186 .afu_check_and_enable = guest_afu_check_and_enable,
1187 .afu_activate_mode = guest_afu_activate_mode,
1188 .afu_deactivate_mode = guest_afu_deactivate_mode,
1189 .afu_reset = guest_afu_reset,
1190 .afu_cr_read8 = guest_afu_cr_read8,
1191 .afu_cr_read16 = guest_afu_cr_read16,
1192 .afu_cr_read32 = guest_afu_cr_read32,
1193 .afu_cr_read64 = guest_afu_cr_read64,
1194 .afu_cr_write8 = guest_afu_cr_write8,
1195 .afu_cr_write16 = guest_afu_cr_write16,
1196 .afu_cr_write32 = guest_afu_cr_write32,
1197 .read_adapter_vpd = cxl_guest_read_adapter_vpd,