1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2012
6 * Jan Glauber <jang@linux.vnet.ibm.com>
8 * The System z PCI code is a rewrite from a prototype by
9 * the following people (Kudoz!):
19 #define KMSG_COMPONENT "zpci"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/err.h>
25 #include <linux/export.h>
26 #include <linux/delay.h>
27 #include <linux/seq_file.h>
28 #include <linux/jump_label.h>
29 #include <linux/pci.h>
30 #include <linux/printk.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
42 /* list of all detected zpci devices */
43 static LIST_HEAD(zpci_list);
44 static DEFINE_SPINLOCK(zpci_list_lock);
46 static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
47 static DEFINE_SPINLOCK(zpci_domain_lock);
49 #define ZPCI_IOMAP_ENTRIES \
50 min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
51 ZPCI_IOMAP_MAX_ENTRIES)
53 unsigned int s390_pci_no_rid;
55 static DEFINE_SPINLOCK(zpci_iomap_lock);
56 static unsigned long *zpci_iomap_bitmap;
57 struct zpci_iomap_entry *zpci_iomap_start;
58 EXPORT_SYMBOL_GPL(zpci_iomap_start);
60 DEFINE_STATIC_KEY_FALSE(have_mio);
62 static struct kmem_cache *zdev_fmb_cache;
64 /* AEN structures that must be preserved over KVM module re-insertion */
65 union zpci_sic_iib *zpci_aipb;
66 EXPORT_SYMBOL_GPL(zpci_aipb);
67 struct airq_iv *zpci_aif_sbv;
68 EXPORT_SYMBOL_GPL(zpci_aif_sbv);
70 struct zpci_dev *get_zdev_by_fid(u32 fid)
72 struct zpci_dev *tmp, *zdev = NULL;
74 spin_lock(&zpci_list_lock);
75 list_for_each_entry(tmp, &zpci_list, entry) {
76 if (tmp->fid == fid) {
82 spin_unlock(&zpci_list_lock);
86 void zpci_remove_reserved_devices(void)
88 struct zpci_dev *tmp, *zdev;
89 enum zpci_state state;
92 spin_lock(&zpci_list_lock);
93 list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
94 if (zdev->state == ZPCI_FN_STATE_STANDBY &&
95 !clp_get_state(zdev->fid, &state) &&
96 state == ZPCI_FN_STATE_RESERVED)
97 list_move_tail(&zdev->entry, &remove);
99 spin_unlock(&zpci_list_lock);
101 list_for_each_entry_safe(zdev, tmp, &remove, entry)
102 zpci_device_reserved(zdev);
105 int pci_domain_nr(struct pci_bus *bus)
107 return ((struct zpci_bus *) bus->sysdata)->domain_nr;
109 EXPORT_SYMBOL_GPL(pci_domain_nr);
111 int pci_proc_domain(struct pci_bus *bus)
113 return pci_domain_nr(bus);
115 EXPORT_SYMBOL_GPL(pci_proc_domain);
117 /* Modify PCI: Register I/O address translation parameters */
118 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
119 u64 base, u64 limit, u64 iota, u8 *status)
121 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
122 struct zpci_fib fib = {0};
125 WARN_ON_ONCE(iota & 0x3fff);
127 /* Work around off by one in ISM virt device */
128 if (zdev->pft == PCI_FUNC_TYPE_ISM && limit > base)
129 fib.pal = limit + (1 << 12);
132 fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
134 cc = zpci_mod_fc(req, &fib, status);
136 zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, *status);
139 EXPORT_SYMBOL_GPL(zpci_register_ioat);
141 /* Modify PCI: Unregister I/O address translation parameters */
142 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
144 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
145 struct zpci_fib fib = {0};
150 cc = zpci_mod_fc(req, &fib, &status);
152 zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
156 /* Modify PCI: Set PCI function measurement parameters */
157 int zpci_fmb_enable_device(struct zpci_dev *zdev)
159 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
160 struct zpci_iommu_ctrs *ctrs;
161 struct zpci_fib fib = {0};
164 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
167 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
170 WARN_ON((u64) zdev->fmb & 0xf);
172 /* reset software counters */
173 ctrs = zpci_get_iommu_ctrs(zdev);
175 atomic64_set(&ctrs->mapped_pages, 0);
176 atomic64_set(&ctrs->unmapped_pages, 0);
177 atomic64_set(&ctrs->global_rpcits, 0);
178 atomic64_set(&ctrs->sync_map_rpcits, 0);
179 atomic64_set(&ctrs->sync_rpcits, 0);
183 fib.fmb_addr = virt_to_phys(zdev->fmb);
185 cc = zpci_mod_fc(req, &fib, &status);
187 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
190 return cc ? -EIO : 0;
193 /* Modify PCI: Disable PCI function measurement */
194 int zpci_fmb_disable_device(struct zpci_dev *zdev)
196 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
197 struct zpci_fib fib = {0};
205 /* Function measurement is disabled if fmb address is zero */
206 cc = zpci_mod_fc(req, &fib, &status);
207 if (cc == 3) /* Function already gone. */
211 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
214 return cc ? -EIO : 0;
217 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
219 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
223 rc = __zpci_load(&data, req, offset);
225 data = le64_to_cpu((__force __le64) data);
226 data >>= (8 - len) * 8;
233 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
235 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
239 data <<= (8 - len) * 8;
240 data = (__force u64) cpu_to_le64(data);
241 rc = __zpci_store(data, req, offset);
245 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
246 resource_size_t size,
247 resource_size_t align)
252 /* combine single writes by using store-block insn */
253 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
255 zpci_memcpy_toio(to, from, count * 8);
258 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
262 * When PCI MIO instructions are unavailable the "physical" address
263 * encodes a hint for accessing the PCI memory space it represents.
264 * Just pass it unchanged such that ioread/iowrite can decode it.
266 if (!static_branch_unlikely(&have_mio))
267 return (void __iomem *)phys_addr;
269 return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
271 EXPORT_SYMBOL(ioremap_prot);
273 void iounmap(volatile void __iomem *addr)
275 if (static_branch_likely(&have_mio))
276 generic_iounmap(addr);
278 EXPORT_SYMBOL(iounmap);
280 /* Create a virtual mapping cookie for a PCI BAR */
281 static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
282 unsigned long offset, unsigned long max)
284 struct zpci_dev *zdev = to_zpci(pdev);
287 idx = zdev->bars[bar].map_idx;
288 spin_lock(&zpci_iomap_lock);
290 WARN_ON(!++zpci_iomap_start[idx].count);
291 zpci_iomap_start[idx].fh = zdev->fh;
292 zpci_iomap_start[idx].bar = bar;
293 spin_unlock(&zpci_iomap_lock);
295 return (void __iomem *) ZPCI_ADDR(idx) + offset;
298 static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
299 unsigned long offset,
302 unsigned long barsize = pci_resource_len(pdev, bar);
303 struct zpci_dev *zdev = to_zpci(pdev);
306 iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
307 return iova ? iova + offset : iova;
310 void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
311 unsigned long offset, unsigned long max)
313 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
316 if (static_branch_likely(&have_mio))
317 return pci_iomap_range_mio(pdev, bar, offset, max);
319 return pci_iomap_range_fh(pdev, bar, offset, max);
321 EXPORT_SYMBOL(pci_iomap_range);
323 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
325 return pci_iomap_range(dev, bar, 0, maxlen);
327 EXPORT_SYMBOL(pci_iomap);
329 static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
330 unsigned long offset, unsigned long max)
332 unsigned long barsize = pci_resource_len(pdev, bar);
333 struct zpci_dev *zdev = to_zpci(pdev);
336 iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
337 return iova ? iova + offset : iova;
340 void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
341 unsigned long offset, unsigned long max)
343 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
346 if (static_branch_likely(&have_mio))
347 return pci_iomap_wc_range_mio(pdev, bar, offset, max);
349 return pci_iomap_range_fh(pdev, bar, offset, max);
351 EXPORT_SYMBOL(pci_iomap_wc_range);
353 void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
355 return pci_iomap_wc_range(dev, bar, 0, maxlen);
357 EXPORT_SYMBOL(pci_iomap_wc);
359 static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
361 unsigned int idx = ZPCI_IDX(addr);
363 spin_lock(&zpci_iomap_lock);
364 /* Detect underrun */
365 WARN_ON(!zpci_iomap_start[idx].count);
366 if (!--zpci_iomap_start[idx].count) {
367 zpci_iomap_start[idx].fh = 0;
368 zpci_iomap_start[idx].bar = 0;
370 spin_unlock(&zpci_iomap_lock);
373 static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
378 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
380 if (static_branch_likely(&have_mio))
381 pci_iounmap_mio(pdev, addr);
383 pci_iounmap_fh(pdev, addr);
385 EXPORT_SYMBOL(pci_iounmap);
387 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
390 struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
392 return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
395 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
398 struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
400 return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
403 static struct pci_ops pci_root_ops = {
408 static void zpci_map_resources(struct pci_dev *pdev)
410 struct zpci_dev *zdev = to_zpci(pdev);
414 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
415 len = pci_resource_len(pdev, i);
419 if (zpci_use_mio(zdev))
420 pdev->resource[i].start =
421 (resource_size_t __force) zdev->bars[i].mio_wt;
423 pdev->resource[i].start = (resource_size_t __force)
424 pci_iomap_range_fh(pdev, i, 0, 0);
425 pdev->resource[i].end = pdev->resource[i].start + len - 1;
428 zpci_iov_map_resources(pdev);
431 static void zpci_unmap_resources(struct pci_dev *pdev)
433 struct zpci_dev *zdev = to_zpci(pdev);
437 if (zpci_use_mio(zdev))
440 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
441 len = pci_resource_len(pdev, i);
444 pci_iounmap_fh(pdev, (void __iomem __force *)
445 pdev->resource[i].start);
449 static int zpci_alloc_iomap(struct zpci_dev *zdev)
453 spin_lock(&zpci_iomap_lock);
454 entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
455 if (entry == ZPCI_IOMAP_ENTRIES) {
456 spin_unlock(&zpci_iomap_lock);
459 set_bit(entry, zpci_iomap_bitmap);
460 spin_unlock(&zpci_iomap_lock);
464 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
466 spin_lock(&zpci_iomap_lock);
467 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
468 clear_bit(entry, zpci_iomap_bitmap);
469 spin_unlock(&zpci_iomap_lock);
472 static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
476 spin_lock(&zpci_iomap_lock);
477 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
478 if (!zdev->bars[bar].size)
480 idx = zdev->bars[bar].map_idx;
481 if (!zpci_iomap_start[idx].count)
483 WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
485 spin_unlock(&zpci_iomap_lock);
488 void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
490 if (!fh || zdev->fh == fh)
494 if (zpci_use_mio(zdev))
496 if (zdev->has_resources && zdev_enabled(zdev))
497 zpci_do_update_iomap_fh(zdev, fh);
500 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
501 unsigned long size, unsigned long flags)
505 r = kzalloc(sizeof(*r), GFP_KERNEL);
510 r->end = r->start + size - 1;
512 r->name = zdev->res_name;
514 if (request_resource(&iomem_resource, r)) {
521 int zpci_setup_bus_resources(struct zpci_dev *zdev)
523 unsigned long addr, size, flags;
524 struct resource *res;
527 snprintf(zdev->res_name, sizeof(zdev->res_name),
528 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
530 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
531 if (!zdev->bars[i].size)
533 entry = zpci_alloc_iomap(zdev);
536 zdev->bars[i].map_idx = entry;
538 /* only MMIO is supported */
539 flags = IORESOURCE_MEM;
540 if (zdev->bars[i].val & 8)
541 flags |= IORESOURCE_PREFETCH;
542 if (zdev->bars[i].val & 4)
543 flags |= IORESOURCE_MEM_64;
545 if (zpci_use_mio(zdev))
546 addr = (unsigned long) zdev->bars[i].mio_wt;
548 addr = ZPCI_ADDR(entry);
549 size = 1UL << zdev->bars[i].size;
551 res = __alloc_res(zdev, addr, size, flags);
553 zpci_free_iomap(zdev, entry);
556 zdev->bars[i].res = res;
558 zdev->has_resources = 1;
563 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
565 struct resource *res;
568 pci_lock_rescan_remove();
569 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
570 res = zdev->bars[i].res;
574 release_resource(res);
575 pci_bus_remove_resource(zdev->zbus->bus, res);
576 zpci_free_iomap(zdev, zdev->bars[i].map_idx);
577 zdev->bars[i].res = NULL;
580 zdev->has_resources = 0;
581 pci_unlock_rescan_remove();
584 int pcibios_device_add(struct pci_dev *pdev)
586 struct zpci_dev *zdev = to_zpci(pdev);
587 struct resource *res;
590 /* The pdev has a reference to the zdev via its bus */
593 pdev->no_vf_scan = 1;
595 pdev->dev.groups = zpci_attr_groups;
596 zpci_map_resources(pdev);
598 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
599 res = &pdev->resource[i];
600 if (res->parent || !res->flags)
602 pci_claim_resource(pdev, i);
608 void pcibios_release_device(struct pci_dev *pdev)
610 struct zpci_dev *zdev = to_zpci(pdev);
612 zpci_unmap_resources(pdev);
616 int pcibios_enable_device(struct pci_dev *pdev, int mask)
618 struct zpci_dev *zdev = to_zpci(pdev);
620 zpci_debug_init_device(zdev, dev_name(&pdev->dev));
621 zpci_fmb_enable_device(zdev);
623 return pci_enable_resources(pdev, mask);
626 void pcibios_disable_device(struct pci_dev *pdev)
628 struct zpci_dev *zdev = to_zpci(pdev);
630 zpci_fmb_disable_device(zdev);
631 zpci_debug_exit_device(zdev);
634 static int __zpci_register_domain(int domain)
636 spin_lock(&zpci_domain_lock);
637 if (test_bit(domain, zpci_domain)) {
638 spin_unlock(&zpci_domain_lock);
639 pr_err("Domain %04x is already assigned\n", domain);
642 set_bit(domain, zpci_domain);
643 spin_unlock(&zpci_domain_lock);
647 static int __zpci_alloc_domain(void)
651 spin_lock(&zpci_domain_lock);
653 * We can always auto allocate domains below ZPCI_NR_DEVICES.
654 * There is either a free domain or we have reached the maximum in
655 * which case we would have bailed earlier.
657 domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
658 set_bit(domain, zpci_domain);
659 spin_unlock(&zpci_domain_lock);
663 int zpci_alloc_domain(int domain)
665 if (zpci_unique_uid) {
667 return __zpci_register_domain(domain);
668 pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
669 update_uid_checking(false);
671 return __zpci_alloc_domain();
674 void zpci_free_domain(int domain)
676 spin_lock(&zpci_domain_lock);
677 clear_bit(domain, zpci_domain);
678 spin_unlock(&zpci_domain_lock);
682 int zpci_enable_device(struct zpci_dev *zdev)
687 if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
690 zpci_update_fh(zdev, fh);
693 EXPORT_SYMBOL_GPL(zpci_enable_device);
695 int zpci_disable_device(struct zpci_dev *zdev)
700 cc = clp_disable_fh(zdev, &fh);
702 zpci_update_fh(zdev, fh);
703 } else if (cc == CLP_RC_SETPCIFN_ALRDY) {
704 pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
706 /* Function is already disabled - update handle */
707 rc = clp_refresh_fh(zdev->fid, &fh);
709 zpci_update_fh(zdev, fh);
717 EXPORT_SYMBOL_GPL(zpci_disable_device);
720 * zpci_hot_reset_device - perform a reset of the given zPCI function
721 * @zdev: the slot which should be reset
723 * Performs a low level reset of the zPCI function. The reset is low level in
724 * the sense that the zPCI function can be reset without detaching it from the
725 * common PCI subsystem. The reset may be performed while under control of
726 * either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
727 * table is reinstated at the end of the reset.
729 * After the reset the functions internal state is reset to an initial state
730 * equivalent to its state during boot when first probing a driver.
731 * Consequently after reset the PCI function requires re-initialization via the
732 * common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
733 * and enabling the function via e.g.pci_enablde_device_flags().The caller
734 * must guard against concurrent reset attempts.
736 * In most cases this function should not be called directly but through
737 * pci_reset_function() or pci_reset_bus() which handle the save/restore and
740 * Return: 0 on success and an error value otherwise
742 int zpci_hot_reset_device(struct zpci_dev *zdev)
747 zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
748 if (zdev_enabled(zdev)) {
749 /* Disables device access, DMAs and IRQs (reset state) */
750 rc = zpci_disable_device(zdev);
752 * Due to a z/VM vs LPAR inconsistency in the error state the
753 * FH may indicate an enabled device but disable says the
754 * device is already disabled don't treat it as an error here.
762 rc = zpci_enable_device(zdev);
767 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
768 virt_to_phys(zdev->dma_table), &status);
770 zpci_disable_device(zdev);
778 * zpci_create_device() - Create a new zpci_dev and add it to the zbus
779 * @fid: Function ID of the device to be created
780 * @fh: Current Function Handle of the device to be created
781 * @state: Initial state after creation either Standby or Configured
783 * Creates a new zpci device and adds it to its, possibly newly created, zbus
784 * as well as zpci_list.
786 * Returns: the zdev on success or an error pointer otherwise
788 struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
790 struct zpci_dev *zdev;
793 zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
794 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
796 return ERR_PTR(-ENOMEM);
798 /* FID and Function Handle are the static/dynamic identifiers */
802 /* Query function properties and update zdev */
803 rc = clp_query_pci_fn(zdev);
808 kref_init(&zdev->kref);
809 mutex_init(&zdev->lock);
810 mutex_init(&zdev->kzdev_lock);
812 rc = zpci_init_iommu(zdev);
816 rc = zpci_bus_device_register(zdev, &pci_root_ops);
818 goto error_destroy_iommu;
820 spin_lock(&zpci_list_lock);
821 list_add_tail(&zdev->entry, &zpci_list);
822 spin_unlock(&zpci_list_lock);
827 zpci_destroy_iommu(zdev);
829 zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
834 bool zpci_is_device_configured(struct zpci_dev *zdev)
836 enum zpci_state state = zdev->state;
838 return state != ZPCI_FN_STATE_RESERVED &&
839 state != ZPCI_FN_STATE_STANDBY;
843 * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
844 * @zdev: The zpci_dev to be configured
845 * @fh: The general function handle supplied by the platform
847 * Given a device in the configuration state Configured, enables, scans and
848 * adds it to the common code PCI subsystem if possible. If any failure occurs,
849 * the zpci_dev is left disabled.
851 * Return: 0 on success, or an error code otherwise
853 int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
855 zpci_update_fh(zdev, fh);
856 return zpci_bus_scan_device(zdev);
860 * zpci_deconfigure_device() - Deconfigure a zpci_dev
861 * @zdev: The zpci_dev to configure
863 * Deconfigure a zPCI function that is currently configured and possibly known
864 * to the common code PCI subsystem.
865 * If any failure occurs the device is left as is.
867 * Return: 0 on success, or an error code otherwise
869 int zpci_deconfigure_device(struct zpci_dev *zdev)
874 zpci_bus_remove_device(zdev, false);
876 if (zdev_enabled(zdev)) {
877 rc = zpci_disable_device(zdev);
882 rc = sclp_pci_deconfigure(zdev->fid);
883 zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
886 zdev->state = ZPCI_FN_STATE_STANDBY;
892 * zpci_device_reserved() - Mark device as resverved
893 * @zdev: the zpci_dev that was reserved
895 * Handle the case that a given zPCI function was reserved by another system.
896 * After a call to this function the zpci_dev can not be found via
897 * get_zdev_by_fid() anymore but may still be accessible via existing
898 * references though it will not be functional anymore.
900 void zpci_device_reserved(struct zpci_dev *zdev)
902 if (zdev->has_hp_slot)
903 zpci_exit_slot(zdev);
905 * Remove device from zpci_list as it is going away. This also
906 * makes sure we ignore subsequent zPCI events for this device.
908 spin_lock(&zpci_list_lock);
909 list_del(&zdev->entry);
910 spin_unlock(&zpci_list_lock);
911 zdev->state = ZPCI_FN_STATE_RESERVED;
912 zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
916 void zpci_release_device(struct kref *kref)
918 struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
922 zpci_bus_remove_device(zdev, false);
924 if (zdev_enabled(zdev))
925 zpci_disable_device(zdev);
927 switch (zdev->state) {
928 case ZPCI_FN_STATE_CONFIGURED:
929 ret = sclp_pci_deconfigure(zdev->fid);
930 zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
932 case ZPCI_FN_STATE_STANDBY:
933 if (zdev->has_hp_slot)
934 zpci_exit_slot(zdev);
935 spin_lock(&zpci_list_lock);
936 list_del(&zdev->entry);
937 spin_unlock(&zpci_list_lock);
938 zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
940 case ZPCI_FN_STATE_RESERVED:
941 if (zdev->has_resources)
942 zpci_cleanup_bus_resources(zdev);
943 zpci_bus_device_unregister(zdev);
944 zpci_destroy_iommu(zdev);
949 zpci_dbg(3, "rem fid:%x\n", zdev->fid);
950 kfree_rcu(zdev, rcu);
953 int zpci_report_error(struct pci_dev *pdev,
954 struct zpci_report_error_header *report)
956 struct zpci_dev *zdev = to_zpci(pdev);
958 return sclp_pci_report(report, zdev->fh, zdev->fid);
960 EXPORT_SYMBOL(zpci_report_error);
963 * zpci_clear_error_state() - Clears the zPCI error state of the device
964 * @zdev: The zdev for which the zPCI error state should be reset
966 * Clear the zPCI error state of the device. If clearing the zPCI error state
967 * fails the device is left in the error state. In this case it may make sense
968 * to call zpci_io_perm_failure() on the associated pdev if it exists.
970 * Returns: 0 on success, -EIO otherwise
972 int zpci_clear_error_state(struct zpci_dev *zdev)
974 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
975 struct zpci_fib fib = {0};
979 cc = zpci_mod_fc(req, &fib, &status);
981 zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
989 * zpci_reset_load_store_blocked() - Re-enables L/S from error state
990 * @zdev: The zdev for which to unblock load/store access
992 * Re-enables load/store access for a PCI function in the error state while
993 * keeping DMA blocked. In this state drivers can poke MMIO space to determine
994 * if error recovery is possible while catching any rogue DMA access from the
997 * Returns: 0 on success, -EIO otherwise
999 int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
1001 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
1002 struct zpci_fib fib = {0};
1006 cc = zpci_mod_fc(req, &fib, &status);
1008 zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
1015 static int zpci_mem_init(void)
1017 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
1018 __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
1020 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
1021 __alignof__(struct zpci_fmb), 0, NULL);
1022 if (!zdev_fmb_cache)
1025 zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
1026 sizeof(*zpci_iomap_start), GFP_KERNEL);
1027 if (!zpci_iomap_start)
1030 zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
1031 sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
1032 if (!zpci_iomap_bitmap)
1033 goto error_iomap_bitmap;
1035 if (static_branch_likely(&have_mio))
1036 clp_setup_writeback_mio();
1040 kfree(zpci_iomap_start);
1042 kmem_cache_destroy(zdev_fmb_cache);
1047 static void zpci_mem_exit(void)
1049 kfree(zpci_iomap_bitmap);
1050 kfree(zpci_iomap_start);
1051 kmem_cache_destroy(zdev_fmb_cache);
1054 static unsigned int s390_pci_probe __initdata = 1;
1055 unsigned int s390_pci_force_floating __initdata;
1056 static unsigned int s390_pci_initialized;
1058 char * __init pcibios_setup(char *str)
1060 if (!strcmp(str, "off")) {
1064 if (!strcmp(str, "nomio")) {
1065 S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
1068 if (!strcmp(str, "force_floating")) {
1069 s390_pci_force_floating = 1;
1072 if (!strcmp(str, "norid")) {
1073 s390_pci_no_rid = 1;
1079 bool zpci_is_enabled(void)
1081 return s390_pci_initialized;
1084 static int __init pci_base_init(void)
1088 if (!s390_pci_probe)
1091 if (!test_facility(69) || !test_facility(71)) {
1092 pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
1096 if (MACHINE_HAS_PCI_MIO) {
1097 static_branch_enable(&have_mio);
1098 system_ctl_set_bit(2, CR2_MIO_ADDRESSING_BIT);
1101 rc = zpci_debug_init();
1105 rc = zpci_mem_init();
1109 rc = zpci_irq_init();
1113 rc = clp_scan_pci_devices();
1116 zpci_bus_scan_busses();
1118 s390_pci_initialized = 1;
1130 subsys_initcall_sync(pci_base_init);