1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2012
6 * Jan Glauber <jang@linux.vnet.ibm.com>
8 * The System z PCI code is a rewrite from a prototype by
9 * the following people (Kudoz!):
19 #define KMSG_COMPONENT "zpci"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/err.h>
25 #include <linux/export.h>
26 #include <linux/delay.h>
27 #include <linux/seq_file.h>
28 #include <linux/jump_label.h>
29 #include <linux/pci.h>
30 #include <linux/printk.h>
31 #include <linux/lockdep.h>
35 #include <asm/facility.h>
36 #include <asm/pci_insn.h>
37 #include <asm/pci_clp.h>
38 #include <asm/pci_dma.h>
43 /* list of all detected zpci devices */
44 static LIST_HEAD(zpci_list);
45 static DEFINE_SPINLOCK(zpci_list_lock);
47 static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
48 static DEFINE_SPINLOCK(zpci_domain_lock);
50 #define ZPCI_IOMAP_ENTRIES \
51 min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
52 ZPCI_IOMAP_MAX_ENTRIES)
54 unsigned int s390_pci_no_rid;
56 static DEFINE_SPINLOCK(zpci_iomap_lock);
57 static unsigned long *zpci_iomap_bitmap;
58 struct zpci_iomap_entry *zpci_iomap_start;
59 EXPORT_SYMBOL_GPL(zpci_iomap_start);
61 DEFINE_STATIC_KEY_FALSE(have_mio);
63 static struct kmem_cache *zdev_fmb_cache;
65 /* AEN structures that must be preserved over KVM module re-insertion */
66 union zpci_sic_iib *zpci_aipb;
67 EXPORT_SYMBOL_GPL(zpci_aipb);
68 struct airq_iv *zpci_aif_sbv;
69 EXPORT_SYMBOL_GPL(zpci_aif_sbv);
71 struct zpci_dev *get_zdev_by_fid(u32 fid)
73 struct zpci_dev *tmp, *zdev = NULL;
75 spin_lock(&zpci_list_lock);
76 list_for_each_entry(tmp, &zpci_list, entry) {
77 if (tmp->fid == fid) {
83 spin_unlock(&zpci_list_lock);
87 void zpci_remove_reserved_devices(void)
89 struct zpci_dev *tmp, *zdev;
90 enum zpci_state state;
93 spin_lock(&zpci_list_lock);
94 list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
95 if (zdev->state == ZPCI_FN_STATE_STANDBY &&
96 !clp_get_state(zdev->fid, &state) &&
97 state == ZPCI_FN_STATE_RESERVED)
98 list_move_tail(&zdev->entry, &remove);
100 spin_unlock(&zpci_list_lock);
102 list_for_each_entry_safe(zdev, tmp, &remove, entry)
103 zpci_device_reserved(zdev);
106 int pci_domain_nr(struct pci_bus *bus)
108 return ((struct zpci_bus *) bus->sysdata)->domain_nr;
110 EXPORT_SYMBOL_GPL(pci_domain_nr);
112 int pci_proc_domain(struct pci_bus *bus)
114 return pci_domain_nr(bus);
116 EXPORT_SYMBOL_GPL(pci_proc_domain);
118 /* Modify PCI: Register I/O address translation parameters */
119 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
120 u64 base, u64 limit, u64 iota, u8 *status)
122 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
123 struct zpci_fib fib = {0};
126 WARN_ON_ONCE(iota & 0x3fff);
128 /* Work around off by one in ISM virt device */
129 if (zdev->pft == PCI_FUNC_TYPE_ISM && limit > base)
130 fib.pal = limit + (1 << 12);
133 fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
135 cc = zpci_mod_fc(req, &fib, status);
137 zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, *status);
140 EXPORT_SYMBOL_GPL(zpci_register_ioat);
142 /* Modify PCI: Unregister I/O address translation parameters */
143 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
145 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
146 struct zpci_fib fib = {0};
151 cc = zpci_mod_fc(req, &fib, &status);
153 zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
157 /* Modify PCI: Set PCI function measurement parameters */
158 int zpci_fmb_enable_device(struct zpci_dev *zdev)
160 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
161 struct zpci_iommu_ctrs *ctrs;
162 struct zpci_fib fib = {0};
165 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
168 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
171 WARN_ON((u64) zdev->fmb & 0xf);
173 /* reset software counters */
174 ctrs = zpci_get_iommu_ctrs(zdev);
176 atomic64_set(&ctrs->mapped_pages, 0);
177 atomic64_set(&ctrs->unmapped_pages, 0);
178 atomic64_set(&ctrs->global_rpcits, 0);
179 atomic64_set(&ctrs->sync_map_rpcits, 0);
180 atomic64_set(&ctrs->sync_rpcits, 0);
184 fib.fmb_addr = virt_to_phys(zdev->fmb);
186 cc = zpci_mod_fc(req, &fib, &status);
188 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
191 return cc ? -EIO : 0;
194 /* Modify PCI: Disable PCI function measurement */
195 int zpci_fmb_disable_device(struct zpci_dev *zdev)
197 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
198 struct zpci_fib fib = {0};
206 /* Function measurement is disabled if fmb address is zero */
207 cc = zpci_mod_fc(req, &fib, &status);
208 if (cc == 3) /* Function already gone. */
212 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
215 return cc ? -EIO : 0;
218 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
220 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
224 rc = __zpci_load(&data, req, offset);
226 data = le64_to_cpu((__force __le64) data);
227 data >>= (8 - len) * 8;
234 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
236 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
240 data <<= (8 - len) * 8;
241 data = (__force u64) cpu_to_le64(data);
242 rc = __zpci_store(data, req, offset);
246 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
247 resource_size_t size,
248 resource_size_t align)
253 /* combine single writes by using store-block insn */
254 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
256 zpci_memcpy_toio(to, from, count * 8);
259 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
263 * When PCI MIO instructions are unavailable the "physical" address
264 * encodes a hint for accessing the PCI memory space it represents.
265 * Just pass it unchanged such that ioread/iowrite can decode it.
267 if (!static_branch_unlikely(&have_mio))
268 return (void __iomem *)phys_addr;
270 return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
272 EXPORT_SYMBOL(ioremap_prot);
274 void iounmap(volatile void __iomem *addr)
276 if (static_branch_likely(&have_mio))
277 generic_iounmap(addr);
279 EXPORT_SYMBOL(iounmap);
281 /* Create a virtual mapping cookie for a PCI BAR */
282 static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
283 unsigned long offset, unsigned long max)
285 struct zpci_dev *zdev = to_zpci(pdev);
288 idx = zdev->bars[bar].map_idx;
289 spin_lock(&zpci_iomap_lock);
291 WARN_ON(!++zpci_iomap_start[idx].count);
292 zpci_iomap_start[idx].fh = zdev->fh;
293 zpci_iomap_start[idx].bar = bar;
294 spin_unlock(&zpci_iomap_lock);
296 return (void __iomem *) ZPCI_ADDR(idx) + offset;
299 static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
300 unsigned long offset,
303 unsigned long barsize = pci_resource_len(pdev, bar);
304 struct zpci_dev *zdev = to_zpci(pdev);
307 iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
308 return iova ? iova + offset : iova;
311 void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
312 unsigned long offset, unsigned long max)
314 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
317 if (static_branch_likely(&have_mio))
318 return pci_iomap_range_mio(pdev, bar, offset, max);
320 return pci_iomap_range_fh(pdev, bar, offset, max);
322 EXPORT_SYMBOL(pci_iomap_range);
324 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
326 return pci_iomap_range(dev, bar, 0, maxlen);
328 EXPORT_SYMBOL(pci_iomap);
330 static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
331 unsigned long offset, unsigned long max)
333 unsigned long barsize = pci_resource_len(pdev, bar);
334 struct zpci_dev *zdev = to_zpci(pdev);
337 iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
338 return iova ? iova + offset : iova;
341 void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
342 unsigned long offset, unsigned long max)
344 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
347 if (static_branch_likely(&have_mio))
348 return pci_iomap_wc_range_mio(pdev, bar, offset, max);
350 return pci_iomap_range_fh(pdev, bar, offset, max);
352 EXPORT_SYMBOL(pci_iomap_wc_range);
354 void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
356 return pci_iomap_wc_range(dev, bar, 0, maxlen);
358 EXPORT_SYMBOL(pci_iomap_wc);
360 static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
362 unsigned int idx = ZPCI_IDX(addr);
364 spin_lock(&zpci_iomap_lock);
365 /* Detect underrun */
366 WARN_ON(!zpci_iomap_start[idx].count);
367 if (!--zpci_iomap_start[idx].count) {
368 zpci_iomap_start[idx].fh = 0;
369 zpci_iomap_start[idx].bar = 0;
371 spin_unlock(&zpci_iomap_lock);
374 static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
379 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
381 if (static_branch_likely(&have_mio))
382 pci_iounmap_mio(pdev, addr);
384 pci_iounmap_fh(pdev, addr);
386 EXPORT_SYMBOL(pci_iounmap);
388 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
391 struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
393 return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
396 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
399 struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
401 return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
404 static struct pci_ops pci_root_ops = {
409 static void zpci_map_resources(struct pci_dev *pdev)
411 struct zpci_dev *zdev = to_zpci(pdev);
415 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
416 len = pci_resource_len(pdev, i);
420 if (zpci_use_mio(zdev))
421 pdev->resource[i].start =
422 (resource_size_t __force) zdev->bars[i].mio_wt;
424 pdev->resource[i].start = (resource_size_t __force)
425 pci_iomap_range_fh(pdev, i, 0, 0);
426 pdev->resource[i].end = pdev->resource[i].start + len - 1;
429 zpci_iov_map_resources(pdev);
432 static void zpci_unmap_resources(struct pci_dev *pdev)
434 struct zpci_dev *zdev = to_zpci(pdev);
438 if (zpci_use_mio(zdev))
441 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
442 len = pci_resource_len(pdev, i);
445 pci_iounmap_fh(pdev, (void __iomem __force *)
446 pdev->resource[i].start);
450 static int zpci_alloc_iomap(struct zpci_dev *zdev)
454 spin_lock(&zpci_iomap_lock);
455 entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
456 if (entry == ZPCI_IOMAP_ENTRIES) {
457 spin_unlock(&zpci_iomap_lock);
460 set_bit(entry, zpci_iomap_bitmap);
461 spin_unlock(&zpci_iomap_lock);
465 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
467 spin_lock(&zpci_iomap_lock);
468 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
469 clear_bit(entry, zpci_iomap_bitmap);
470 spin_unlock(&zpci_iomap_lock);
473 static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
477 spin_lock(&zpci_iomap_lock);
478 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
479 if (!zdev->bars[bar].size)
481 idx = zdev->bars[bar].map_idx;
482 if (!zpci_iomap_start[idx].count)
484 WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
486 spin_unlock(&zpci_iomap_lock);
489 void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
491 if (!fh || zdev->fh == fh)
495 if (zpci_use_mio(zdev))
497 if (zdev->has_resources && zdev_enabled(zdev))
498 zpci_do_update_iomap_fh(zdev, fh);
501 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
502 unsigned long size, unsigned long flags)
506 r = kzalloc(sizeof(*r), GFP_KERNEL);
511 r->end = r->start + size - 1;
513 r->name = zdev->res_name;
515 if (request_resource(&iomem_resource, r)) {
522 int zpci_setup_bus_resources(struct zpci_dev *zdev)
524 unsigned long addr, size, flags;
525 struct resource *res;
528 snprintf(zdev->res_name, sizeof(zdev->res_name),
529 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
531 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
532 if (!zdev->bars[i].size)
534 entry = zpci_alloc_iomap(zdev);
537 zdev->bars[i].map_idx = entry;
539 /* only MMIO is supported */
540 flags = IORESOURCE_MEM;
541 if (zdev->bars[i].val & 8)
542 flags |= IORESOURCE_PREFETCH;
543 if (zdev->bars[i].val & 4)
544 flags |= IORESOURCE_MEM_64;
546 if (zpci_use_mio(zdev))
547 addr = (unsigned long) zdev->bars[i].mio_wt;
549 addr = ZPCI_ADDR(entry);
550 size = 1UL << zdev->bars[i].size;
552 res = __alloc_res(zdev, addr, size, flags);
554 zpci_free_iomap(zdev, entry);
557 zdev->bars[i].res = res;
559 zdev->has_resources = 1;
564 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
566 struct resource *res;
569 pci_lock_rescan_remove();
570 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
571 res = zdev->bars[i].res;
575 release_resource(res);
576 pci_bus_remove_resource(zdev->zbus->bus, res);
577 zpci_free_iomap(zdev, zdev->bars[i].map_idx);
578 zdev->bars[i].res = NULL;
581 zdev->has_resources = 0;
582 pci_unlock_rescan_remove();
585 int pcibios_device_add(struct pci_dev *pdev)
587 struct zpci_dev *zdev = to_zpci(pdev);
588 struct resource *res;
591 /* The pdev has a reference to the zdev via its bus */
594 pdev->no_vf_scan = 1;
596 pdev->dev.groups = zpci_attr_groups;
597 zpci_map_resources(pdev);
599 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
600 res = &pdev->resource[i];
601 if (res->parent || !res->flags)
603 pci_claim_resource(pdev, i);
609 void pcibios_release_device(struct pci_dev *pdev)
611 struct zpci_dev *zdev = to_zpci(pdev);
613 zpci_unmap_resources(pdev);
617 int pcibios_enable_device(struct pci_dev *pdev, int mask)
619 struct zpci_dev *zdev = to_zpci(pdev);
621 zpci_debug_init_device(zdev, dev_name(&pdev->dev));
622 zpci_fmb_enable_device(zdev);
624 return pci_enable_resources(pdev, mask);
627 void pcibios_disable_device(struct pci_dev *pdev)
629 struct zpci_dev *zdev = to_zpci(pdev);
631 zpci_fmb_disable_device(zdev);
632 zpci_debug_exit_device(zdev);
635 static int __zpci_register_domain(int domain)
637 spin_lock(&zpci_domain_lock);
638 if (test_bit(domain, zpci_domain)) {
639 spin_unlock(&zpci_domain_lock);
640 pr_err("Domain %04x is already assigned\n", domain);
643 set_bit(domain, zpci_domain);
644 spin_unlock(&zpci_domain_lock);
648 static int __zpci_alloc_domain(void)
652 spin_lock(&zpci_domain_lock);
654 * We can always auto allocate domains below ZPCI_NR_DEVICES.
655 * There is either a free domain or we have reached the maximum in
656 * which case we would have bailed earlier.
658 domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
659 set_bit(domain, zpci_domain);
660 spin_unlock(&zpci_domain_lock);
664 int zpci_alloc_domain(int domain)
666 if (zpci_unique_uid) {
668 return __zpci_register_domain(domain);
669 pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
670 update_uid_checking(false);
672 return __zpci_alloc_domain();
675 void zpci_free_domain(int domain)
677 spin_lock(&zpci_domain_lock);
678 clear_bit(domain, zpci_domain);
679 spin_unlock(&zpci_domain_lock);
683 int zpci_enable_device(struct zpci_dev *zdev)
688 if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
691 zpci_update_fh(zdev, fh);
694 EXPORT_SYMBOL_GPL(zpci_enable_device);
696 int zpci_disable_device(struct zpci_dev *zdev)
701 cc = clp_disable_fh(zdev, &fh);
703 zpci_update_fh(zdev, fh);
704 } else if (cc == CLP_RC_SETPCIFN_ALRDY) {
705 pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
707 /* Function is already disabled - update handle */
708 rc = clp_refresh_fh(zdev->fid, &fh);
710 zpci_update_fh(zdev, fh);
718 EXPORT_SYMBOL_GPL(zpci_disable_device);
721 * zpci_hot_reset_device - perform a reset of the given zPCI function
722 * @zdev: the slot which should be reset
724 * Performs a low level reset of the zPCI function. The reset is low level in
725 * the sense that the zPCI function can be reset without detaching it from the
726 * common PCI subsystem. The reset may be performed while under control of
727 * either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
728 * table is reinstated at the end of the reset.
730 * After the reset the functions internal state is reset to an initial state
731 * equivalent to its state during boot when first probing a driver.
732 * Consequently after reset the PCI function requires re-initialization via the
733 * common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
734 * and enabling the function via e.g. pci_enable_device_flags(). The caller
735 * must guard against concurrent reset attempts.
737 * In most cases this function should not be called directly but through
738 * pci_reset_function() or pci_reset_bus() which handle the save/restore and
739 * locking - asserted by lockdep.
741 * Return: 0 on success and an error value otherwise
743 int zpci_hot_reset_device(struct zpci_dev *zdev)
748 lockdep_assert_held(&zdev->state_lock);
749 zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
750 if (zdev_enabled(zdev)) {
751 /* Disables device access, DMAs and IRQs (reset state) */
752 rc = zpci_disable_device(zdev);
754 * Due to a z/VM vs LPAR inconsistency in the error state the
755 * FH may indicate an enabled device but disable says the
756 * device is already disabled don't treat it as an error here.
764 rc = zpci_enable_device(zdev);
769 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
770 virt_to_phys(zdev->dma_table), &status);
772 zpci_disable_device(zdev);
780 * zpci_create_device() - Create a new zpci_dev and add it to the zbus
781 * @fid: Function ID of the device to be created
782 * @fh: Current Function Handle of the device to be created
783 * @state: Initial state after creation either Standby or Configured
785 * Creates a new zpci device and adds it to its, possibly newly created, zbus
786 * as well as zpci_list.
788 * Returns: the zdev on success or an error pointer otherwise
790 struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
792 struct zpci_dev *zdev;
795 zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
796 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
798 return ERR_PTR(-ENOMEM);
800 /* FID and Function Handle are the static/dynamic identifiers */
804 /* Query function properties and update zdev */
805 rc = clp_query_pci_fn(zdev);
810 kref_init(&zdev->kref);
811 mutex_init(&zdev->state_lock);
812 mutex_init(&zdev->fmb_lock);
813 mutex_init(&zdev->kzdev_lock);
815 rc = zpci_init_iommu(zdev);
819 rc = zpci_bus_device_register(zdev, &pci_root_ops);
821 goto error_destroy_iommu;
823 spin_lock(&zpci_list_lock);
824 list_add_tail(&zdev->entry, &zpci_list);
825 spin_unlock(&zpci_list_lock);
830 zpci_destroy_iommu(zdev);
832 zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
837 bool zpci_is_device_configured(struct zpci_dev *zdev)
839 enum zpci_state state = zdev->state;
841 return state != ZPCI_FN_STATE_RESERVED &&
842 state != ZPCI_FN_STATE_STANDBY;
846 * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
847 * @zdev: The zpci_dev to be configured
848 * @fh: The general function handle supplied by the platform
850 * Given a device in the configuration state Configured, enables, scans and
851 * adds it to the common code PCI subsystem if possible. If any failure occurs,
852 * the zpci_dev is left disabled.
854 * Return: 0 on success, or an error code otherwise
856 int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
858 zpci_update_fh(zdev, fh);
859 return zpci_bus_scan_device(zdev);
863 * zpci_deconfigure_device() - Deconfigure a zpci_dev
864 * @zdev: The zpci_dev to configure
866 * Deconfigure a zPCI function that is currently configured and possibly known
867 * to the common code PCI subsystem.
868 * If any failure occurs the device is left as is.
870 * Return: 0 on success, or an error code otherwise
872 int zpci_deconfigure_device(struct zpci_dev *zdev)
876 lockdep_assert_held(&zdev->state_lock);
877 if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
881 zpci_bus_remove_device(zdev, false);
883 if (zdev_enabled(zdev)) {
884 rc = zpci_disable_device(zdev);
889 rc = sclp_pci_deconfigure(zdev->fid);
890 zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
893 zdev->state = ZPCI_FN_STATE_STANDBY;
899 * zpci_device_reserved() - Mark device as reserved
900 * @zdev: the zpci_dev that was reserved
902 * Handle the case that a given zPCI function was reserved by another system.
903 * After a call to this function the zpci_dev can not be found via
904 * get_zdev_by_fid() anymore but may still be accessible via existing
905 * references though it will not be functional anymore.
907 void zpci_device_reserved(struct zpci_dev *zdev)
910 * Remove device from zpci_list as it is going away. This also
911 * makes sure we ignore subsequent zPCI events for this device.
913 spin_lock(&zpci_list_lock);
914 list_del(&zdev->entry);
915 spin_unlock(&zpci_list_lock);
916 zdev->state = ZPCI_FN_STATE_RESERVED;
917 zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
921 void zpci_release_device(struct kref *kref)
923 struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
926 if (zdev->has_hp_slot)
927 zpci_exit_slot(zdev);
930 zpci_bus_remove_device(zdev, false);
932 if (zdev_enabled(zdev))
933 zpci_disable_device(zdev);
935 switch (zdev->state) {
936 case ZPCI_FN_STATE_CONFIGURED:
937 ret = sclp_pci_deconfigure(zdev->fid);
938 zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
940 case ZPCI_FN_STATE_STANDBY:
941 if (zdev->has_hp_slot)
942 zpci_exit_slot(zdev);
943 spin_lock(&zpci_list_lock);
944 list_del(&zdev->entry);
945 spin_unlock(&zpci_list_lock);
946 zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
948 case ZPCI_FN_STATE_RESERVED:
949 if (zdev->has_resources)
950 zpci_cleanup_bus_resources(zdev);
951 zpci_bus_device_unregister(zdev);
952 zpci_destroy_iommu(zdev);
957 zpci_dbg(3, "rem fid:%x\n", zdev->fid);
958 kfree_rcu(zdev, rcu);
961 int zpci_report_error(struct pci_dev *pdev,
962 struct zpci_report_error_header *report)
964 struct zpci_dev *zdev = to_zpci(pdev);
966 return sclp_pci_report(report, zdev->fh, zdev->fid);
968 EXPORT_SYMBOL(zpci_report_error);
971 * zpci_clear_error_state() - Clears the zPCI error state of the device
972 * @zdev: The zdev for which the zPCI error state should be reset
974 * Clear the zPCI error state of the device. If clearing the zPCI error state
975 * fails the device is left in the error state. In this case it may make sense
976 * to call zpci_io_perm_failure() on the associated pdev if it exists.
978 * Returns: 0 on success, -EIO otherwise
980 int zpci_clear_error_state(struct zpci_dev *zdev)
982 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
983 struct zpci_fib fib = {0};
987 cc = zpci_mod_fc(req, &fib, &status);
989 zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
997 * zpci_reset_load_store_blocked() - Re-enables L/S from error state
998 * @zdev: The zdev for which to unblock load/store access
1000 * Re-enables load/store access for a PCI function in the error state while
1001 * keeping DMA blocked. In this state drivers can poke MMIO space to determine
1002 * if error recovery is possible while catching any rogue DMA access from the
1005 * Returns: 0 on success, -EIO otherwise
1007 int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
1009 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
1010 struct zpci_fib fib = {0};
1014 cc = zpci_mod_fc(req, &fib, &status);
1016 zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
1023 static int zpci_mem_init(void)
1025 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
1026 __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
1028 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
1029 __alignof__(struct zpci_fmb), 0, NULL);
1030 if (!zdev_fmb_cache)
1033 zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
1034 sizeof(*zpci_iomap_start), GFP_KERNEL);
1035 if (!zpci_iomap_start)
1038 zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
1039 sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
1040 if (!zpci_iomap_bitmap)
1041 goto error_iomap_bitmap;
1043 if (static_branch_likely(&have_mio))
1044 clp_setup_writeback_mio();
1048 kfree(zpci_iomap_start);
1050 kmem_cache_destroy(zdev_fmb_cache);
1055 static void zpci_mem_exit(void)
1057 kfree(zpci_iomap_bitmap);
1058 kfree(zpci_iomap_start);
1059 kmem_cache_destroy(zdev_fmb_cache);
1062 static unsigned int s390_pci_probe __initdata = 1;
1063 unsigned int s390_pci_force_floating __initdata;
1064 static unsigned int s390_pci_initialized;
1066 char * __init pcibios_setup(char *str)
1068 if (!strcmp(str, "off")) {
1072 if (!strcmp(str, "nomio")) {
1073 S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
1076 if (!strcmp(str, "force_floating")) {
1077 s390_pci_force_floating = 1;
1080 if (!strcmp(str, "norid")) {
1081 s390_pci_no_rid = 1;
1087 bool zpci_is_enabled(void)
1089 return s390_pci_initialized;
1092 static int __init pci_base_init(void)
1096 if (!s390_pci_probe)
1099 if (!test_facility(69) || !test_facility(71)) {
1100 pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
1104 if (MACHINE_HAS_PCI_MIO) {
1105 static_branch_enable(&have_mio);
1106 system_ctl_set_bit(2, CR2_MIO_ADDRESSING_BIT);
1109 rc = zpci_debug_init();
1113 rc = zpci_mem_init();
1117 rc = zpci_irq_init();
1121 rc = clp_scan_pci_devices();
1124 zpci_bus_scan_busses();
1126 s390_pci_initialized = 1;
1138 subsys_initcall_sync(pci_base_init);