1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IBM PowerPC Virtual I/O Infrastructure Support.
5 * Copyright (c) 2003,2008 IBM Corp.
6 * Dave Engebretsen engebret@us.ibm.com
7 * Santiago Leon santil@us.ibm.com
8 * Hollis Blanchard <hollisb@us.ibm.com>
10 * Robert Jennings <rcjenn@us.ibm.com>
13 #include <linux/cpu.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/stat.h>
17 #include <linux/device.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <linux/console.h>
21 #include <linux/export.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/kobject.h>
26 #include <asm/iommu.h>
30 #include <asm/firmware.h>
33 #include <asm/hvcall.h>
35 static struct vio_dev vio_bus_device = { /* fake "parent" device */
38 .dev.init_name = "vio",
39 .dev.bus = &vio_bus_type,
42 #ifdef CONFIG_PPC_SMLPAR
44 * vio_cmo_pool - A pool of IO memory for CMO use
46 * @size: The size of the pool in bytes
47 * @free: The amount of free memory in the pool
54 /* How many ms to delay queued balance work */
55 #define VIO_CMO_BALANCE_DELAY 100
57 /* Portion out IO memory to CMO devices by this chunk size */
58 #define VIO_CMO_BALANCE_CHUNK 131072
61 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
63 * @vio_dev: struct vio_dev pointer
64 * @list: pointer to other devices on bus that are being tracked
66 struct vio_cmo_dev_entry {
67 struct vio_dev *viodev;
68 struct list_head list;
72 * vio_cmo - VIO bus accounting structure for CMO entitlement
74 * @lock: spinlock for entire structure
75 * @balance_q: work queue for balancing system entitlement
76 * @device_list: list of CMO-enabled devices requiring entitlement
77 * @entitled: total system entitlement in bytes
78 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
79 * @excess: pool of excess entitlement not needed for device reserves or spare
80 * @spare: IO memory for device hotplug functionality
81 * @min: minimum necessary for system operation
82 * @desired: desired memory for system operation
83 * @curr: bytes currently allocated
84 * @high: high water mark for IO data usage
86 static struct vio_cmo {
88 struct delayed_work balance_q;
89 struct list_head device_list;
91 struct vio_cmo_pool reserve;
92 struct vio_cmo_pool excess;
101 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
103 static int vio_cmo_num_OF_devs(void)
105 struct device_node *node_vroot;
109 * Count the number of vdevice entries with an
110 * ibm,my-dma-window OF property
112 node_vroot = of_find_node_by_name(NULL, "vdevice");
114 struct device_node *of_node;
115 struct property *prop;
117 for_each_child_of_node(node_vroot, of_node) {
118 prop = of_find_property(of_node, "ibm,my-dma-window",
124 of_node_put(node_vroot);
129 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
131 * @viodev: VIO device requesting IO memory
132 * @size: size of allocation requested
134 * Allocations come from memory reserved for the devices and any excess
135 * IO memory available to all devices. The spare pool used to service
136 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
140 * 0 for successful allocation and -ENOMEM for a failure
142 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
145 size_t reserve_free = 0;
146 size_t excess_free = 0;
149 spin_lock_irqsave(&vio_cmo.lock, flags);
151 /* Determine the amount of free entitlement available in reserve */
152 if (viodev->cmo.entitled > viodev->cmo.allocated)
153 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
155 /* If spare is not fulfilled, the excess pool can not be used. */
156 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
157 excess_free = vio_cmo.excess.free;
159 /* The request can be satisfied */
160 if ((reserve_free + excess_free) >= size) {
161 vio_cmo.curr += size;
162 if (vio_cmo.curr > vio_cmo.high)
163 vio_cmo.high = vio_cmo.curr;
164 viodev->cmo.allocated += size;
165 size -= min(reserve_free, size);
166 vio_cmo.excess.free -= size;
170 spin_unlock_irqrestore(&vio_cmo.lock, flags);
175 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
176 * @viodev: VIO device freeing IO memory
177 * @size: size of deallocation
179 * IO memory is freed by the device back to the correct memory pools.
180 * The spare pool is replenished first from either memory pool, then
181 * the reserve pool is used to reduce device entitlement, the excess
182 * pool is used to increase the reserve pool toward the desired entitlement
183 * target, and then the remaining memory is returned to the pools.
186 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
189 size_t spare_needed = 0;
190 size_t excess_freed = 0;
191 size_t reserve_freed = size;
195 spin_lock_irqsave(&vio_cmo.lock, flags);
196 vio_cmo.curr -= size;
198 /* Amount of memory freed from the excess pool */
199 if (viodev->cmo.allocated > viodev->cmo.entitled) {
200 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
201 viodev->cmo.entitled));
202 reserve_freed -= excess_freed;
205 /* Remove allocation from device */
206 viodev->cmo.allocated -= (reserve_freed + excess_freed);
208 /* Spare is a subset of the reserve pool, replenish it first. */
209 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
212 * Replenish the spare in the reserve pool from the excess pool.
213 * This moves entitlement into the reserve pool.
215 if (spare_needed && excess_freed) {
216 tmp = min(excess_freed, spare_needed);
217 vio_cmo.excess.size -= tmp;
218 vio_cmo.reserve.size += tmp;
219 vio_cmo.spare += tmp;
226 * Replenish the spare in the reserve pool from the reserve pool.
227 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
228 * if needed, and gives it to the spare pool. The amount of used
229 * memory in this pool does not change.
231 if (spare_needed && reserve_freed) {
232 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
234 vio_cmo.spare += tmp;
235 viodev->cmo.entitled -= tmp;
236 reserve_freed -= tmp;
242 * Increase the reserve pool until the desired allocation is met.
243 * Move an allocation freed from the excess pool into the reserve
244 * pool and schedule a balance operation.
246 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
247 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
249 vio_cmo.excess.size -= tmp;
250 vio_cmo.reserve.size += tmp;
255 /* Return memory from the excess pool to that pool */
257 vio_cmo.excess.free += excess_freed;
260 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
261 spin_unlock_irqrestore(&vio_cmo.lock, flags);
265 * vio_cmo_entitlement_update - Manage system entitlement changes
267 * @new_entitlement: new system entitlement to attempt to accommodate
269 * Increases in entitlement will be used to fulfill the spare entitlement
270 * and the rest is given to the excess pool. Decreases, if they are
271 * possible, come from the excess pool and from unused device entitlement
273 * Returns: 0 on success, -ENOMEM when change can not be made
275 int vio_cmo_entitlement_update(size_t new_entitlement)
277 struct vio_dev *viodev;
278 struct vio_cmo_dev_entry *dev_ent;
280 size_t avail, delta, tmp;
282 spin_lock_irqsave(&vio_cmo.lock, flags);
284 /* Entitlement increases */
285 if (new_entitlement > vio_cmo.entitled) {
286 delta = new_entitlement - vio_cmo.entitled;
288 /* Fulfill spare allocation */
289 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
290 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
291 vio_cmo.spare += tmp;
292 vio_cmo.reserve.size += tmp;
296 /* Remaining new allocation goes to the excess pool */
297 vio_cmo.entitled += delta;
298 vio_cmo.excess.size += delta;
299 vio_cmo.excess.free += delta;
304 /* Entitlement decreases */
305 delta = vio_cmo.entitled - new_entitlement;
306 avail = vio_cmo.excess.free;
309 * Need to check how much unused entitlement each device can
310 * sacrifice to fulfill entitlement change.
312 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
316 viodev = dev_ent->viodev;
317 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
318 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
319 avail += viodev->cmo.entitled -
320 max_t(size_t, viodev->cmo.allocated,
324 if (delta <= avail) {
325 vio_cmo.entitled -= delta;
327 /* Take entitlement from the excess pool first */
328 tmp = min(vio_cmo.excess.free, delta);
329 vio_cmo.excess.size -= tmp;
330 vio_cmo.excess.free -= tmp;
334 * Remove all but VIO_CMO_MIN_ENT bytes from devices
335 * until entitlement change is served
337 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
341 viodev = dev_ent->viodev;
343 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
344 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
345 tmp = viodev->cmo.entitled -
346 max_t(size_t, viodev->cmo.allocated,
348 viodev->cmo.entitled -= min(tmp, delta);
349 delta -= min(tmp, delta);
352 spin_unlock_irqrestore(&vio_cmo.lock, flags);
357 schedule_delayed_work(&vio_cmo.balance_q, 0);
358 spin_unlock_irqrestore(&vio_cmo.lock, flags);
363 * vio_cmo_balance - Balance entitlement among devices
365 * @work: work queue structure for this operation
367 * Any system entitlement above the minimum needed for devices, or
368 * already allocated to devices, can be distributed to the devices.
369 * The list of devices is iterated through to recalculate the desired
370 * entitlement level and to determine how much entitlement above the
371 * minimum entitlement is allocated to devices.
373 * Small chunks of the available entitlement are given to devices until
374 * their requirements are fulfilled or there is no entitlement left to give.
375 * Upon completion sizes of the reserve and excess pools are calculated.
377 * The system minimum entitlement level is also recalculated here.
378 * Entitlement will be reserved for devices even after vio_bus_remove to
379 * accommodate reloading the driver. The OF tree is walked to count the
380 * number of devices present and this will remove entitlement for devices
381 * that have actually left the system after having vio_bus_remove called.
383 static void vio_cmo_balance(struct work_struct *work)
386 struct vio_dev *viodev;
387 struct vio_cmo_dev_entry *dev_ent;
389 size_t avail = 0, level, chunk, need;
390 int devcount = 0, fulfilled;
392 cmo = container_of(work, struct vio_cmo, balance_q.work);
394 spin_lock_irqsave(&vio_cmo.lock, flags);
396 /* Calculate minimum entitlement and fulfill spare */
397 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
398 BUG_ON(cmo->min > cmo->entitled);
399 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
400 cmo->min += cmo->spare;
401 cmo->desired = cmo->min;
404 * Determine how much entitlement is available and reset device
407 avail = cmo->entitled - cmo->spare;
408 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
409 viodev = dev_ent->viodev;
411 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
412 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
413 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
417 * Having provided each device with the minimum entitlement, loop
418 * over the devices portioning out the remaining entitlement
419 * until there is nothing left.
421 level = VIO_CMO_MIN_ENT;
424 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
425 viodev = dev_ent->viodev;
427 if (viodev->cmo.desired <= level) {
433 * Give the device up to VIO_CMO_BALANCE_CHUNK
434 * bytes of entitlement, but do not exceed the
435 * desired level of entitlement for the device.
437 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
438 chunk = min(chunk, (viodev->cmo.desired -
439 viodev->cmo.entitled));
440 viodev->cmo.entitled += chunk;
443 * If the memory for this entitlement increase was
444 * already allocated to the device it does not come
445 * from the available pool being portioned out.
447 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
448 max(viodev->cmo.allocated, level);
452 if (fulfilled == devcount)
454 level += VIO_CMO_BALANCE_CHUNK;
457 /* Calculate new reserve and excess pool sizes */
458 cmo->reserve.size = cmo->min;
459 cmo->excess.free = 0;
460 cmo->excess.size = 0;
462 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
463 viodev = dev_ent->viodev;
464 /* Calculated reserve size above the minimum entitlement */
465 if (viodev->cmo.entitled)
466 cmo->reserve.size += (viodev->cmo.entitled -
468 /* Calculated used excess entitlement */
469 if (viodev->cmo.allocated > viodev->cmo.entitled)
470 need += viodev->cmo.allocated - viodev->cmo.entitled;
472 cmo->excess.size = cmo->entitled - cmo->reserve.size;
473 cmo->excess.free = cmo->excess.size - need;
475 cancel_delayed_work(to_delayed_work(work));
476 spin_unlock_irqrestore(&vio_cmo.lock, flags);
479 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
480 dma_addr_t *dma_handle, gfp_t flag,
483 struct vio_dev *viodev = to_vio_dev(dev);
486 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
487 atomic_inc(&viodev->cmo.allocs_failed);
491 ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
492 dma_handle, dev->coherent_dma_mask, flag,
494 if (unlikely(ret == NULL)) {
495 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
496 atomic_inc(&viodev->cmo.allocs_failed);
502 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
503 void *vaddr, dma_addr_t dma_handle,
506 struct vio_dev *viodev = to_vio_dev(dev);
508 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
509 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
512 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
513 unsigned long offset, size_t size,
514 enum dma_data_direction direction,
517 struct vio_dev *viodev = to_vio_dev(dev);
518 struct iommu_table *tbl = get_iommu_table_base(dev);
519 dma_addr_t ret = DMA_MAPPING_ERROR;
521 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
523 ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
525 if (unlikely(ret == DMA_MAPPING_ERROR))
530 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
532 atomic_inc(&viodev->cmo.allocs_failed);
533 return DMA_MAPPING_ERROR;
536 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
538 enum dma_data_direction direction,
541 struct vio_dev *viodev = to_vio_dev(dev);
542 struct iommu_table *tbl = get_iommu_table_base(dev);
544 iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
545 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
548 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
549 int nelems, enum dma_data_direction direction,
552 struct vio_dev *viodev = to_vio_dev(dev);
553 struct iommu_table *tbl = get_iommu_table_base(dev);
554 struct scatterlist *sgl;
556 size_t alloc_size = 0;
558 for_each_sg(sglist, sgl, nelems, count)
559 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
561 if (vio_cmo_alloc(viodev, alloc_size))
563 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
568 for_each_sg(sglist, sgl, ret, count)
569 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
571 vio_cmo_dealloc(viodev, alloc_size);
575 vio_cmo_dealloc(viodev, alloc_size);
577 atomic_inc(&viodev->cmo.allocs_failed);
581 static void vio_dma_iommu_unmap_sg(struct device *dev,
582 struct scatterlist *sglist, int nelems,
583 enum dma_data_direction direction,
586 struct vio_dev *viodev = to_vio_dev(dev);
587 struct iommu_table *tbl = get_iommu_table_base(dev);
588 struct scatterlist *sgl;
589 size_t alloc_size = 0;
592 for_each_sg(sglist, sgl, nelems, count)
593 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
595 ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
596 vio_cmo_dealloc(viodev, alloc_size);
599 static const struct dma_map_ops vio_dma_mapping_ops = {
600 .alloc = vio_dma_iommu_alloc_coherent,
601 .free = vio_dma_iommu_free_coherent,
602 .map_sg = vio_dma_iommu_map_sg,
603 .unmap_sg = vio_dma_iommu_unmap_sg,
604 .map_page = vio_dma_iommu_map_page,
605 .unmap_page = vio_dma_iommu_unmap_page,
606 .dma_supported = dma_iommu_dma_supported,
607 .get_required_mask = dma_iommu_get_required_mask,
608 .mmap = dma_common_mmap,
609 .get_sgtable = dma_common_get_sgtable,
613 * vio_cmo_set_dev_desired - Set desired entitlement for a device
615 * @viodev: struct vio_dev for device to alter
616 * @desired: new desired entitlement level in bytes
618 * For use by devices to request a change to their entitlement at runtime or
619 * through sysfs. The desired entitlement level is changed and a balancing
620 * of system resources is scheduled to run in the future.
622 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
625 struct vio_cmo_dev_entry *dev_ent;
628 if (!firmware_has_feature(FW_FEATURE_CMO))
631 spin_lock_irqsave(&vio_cmo.lock, flags);
632 if (desired < VIO_CMO_MIN_ENT)
633 desired = VIO_CMO_MIN_ENT;
636 * Changes will not be made for devices not in the device list.
637 * If it is not in the device list, then no driver is loaded
638 * for the device and it can not receive entitlement.
640 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
641 if (viodev == dev_ent->viodev) {
646 spin_unlock_irqrestore(&vio_cmo.lock, flags);
650 /* Increase/decrease in desired device entitlement */
651 if (desired >= viodev->cmo.desired) {
652 /* Just bump the bus and device values prior to a balance*/
653 vio_cmo.desired += desired - viodev->cmo.desired;
654 viodev->cmo.desired = desired;
656 /* Decrease bus and device values for desired entitlement */
657 vio_cmo.desired -= viodev->cmo.desired - desired;
658 viodev->cmo.desired = desired;
660 * If less entitlement is desired than current entitlement, move
661 * any reserve memory in the change region to the excess pool.
663 if (viodev->cmo.entitled > desired) {
664 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
665 vio_cmo.excess.size += viodev->cmo.entitled - desired;
667 * If entitlement moving from the reserve pool to the
668 * excess pool is currently unused, add to the excess
671 if (viodev->cmo.allocated < viodev->cmo.entitled)
672 vio_cmo.excess.free += viodev->cmo.entitled -
673 max(viodev->cmo.allocated, desired);
674 viodev->cmo.entitled = desired;
677 schedule_delayed_work(&vio_cmo.balance_q, 0);
678 spin_unlock_irqrestore(&vio_cmo.lock, flags);
682 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
684 * @viodev - Pointer to struct vio_dev for device
686 * Determine the devices IO memory entitlement needs, attempting
687 * to satisfy the system minimum entitlement at first and scheduling
688 * a balance operation to take care of the rest at a later time.
690 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
691 * -ENOMEM when entitlement is not available for device or
695 static int vio_cmo_bus_probe(struct vio_dev *viodev)
697 struct vio_cmo_dev_entry *dev_ent;
698 struct device *dev = &viodev->dev;
699 struct iommu_table *tbl;
700 struct vio_driver *viodrv = to_vio_driver(dev->driver);
703 bool dma_capable = false;
705 tbl = get_iommu_table_base(dev);
707 /* A device requires entitlement if it has a DMA window property */
708 switch (viodev->family) {
710 if (of_get_property(viodev->dev.of_node,
711 "ibm,my-dma-window", NULL))
718 dev_warn(dev, "unknown device family: %d\n", viodev->family);
723 /* Configure entitlement for the device. */
725 /* Check that the driver is CMO enabled and get desired DMA */
726 if (!viodrv->get_desired_dma) {
727 dev_err(dev, "%s: device driver does not support CMO\n",
732 viodev->cmo.desired =
733 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
734 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
735 viodev->cmo.desired = VIO_CMO_MIN_ENT;
736 size = VIO_CMO_MIN_ENT;
738 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
743 dev_ent->viodev = viodev;
744 spin_lock_irqsave(&vio_cmo.lock, flags);
745 list_add(&dev_ent->list, &vio_cmo.device_list);
747 viodev->cmo.desired = 0;
749 spin_lock_irqsave(&vio_cmo.lock, flags);
753 * If the needs for vio_cmo.min have not changed since they
754 * were last set, the number of devices in the OF tree has
755 * been constant and the IO memory for this is already in
758 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
760 /* Updated desired entitlement if device requires it */
762 vio_cmo.desired += (viodev->cmo.desired -
767 tmp = vio_cmo.spare + vio_cmo.excess.free;
769 dev_err(dev, "%s: insufficient free "
770 "entitlement to add device. "
771 "Need %lu, have %lu\n", __func__,
772 size, (vio_cmo.spare + tmp));
773 spin_unlock_irqrestore(&vio_cmo.lock, flags);
777 /* Use excess pool first to fulfill request */
778 tmp = min(size, vio_cmo.excess.free);
779 vio_cmo.excess.free -= tmp;
780 vio_cmo.excess.size -= tmp;
781 vio_cmo.reserve.size += tmp;
783 /* Use spare if excess pool was insufficient */
784 vio_cmo.spare -= size - tmp;
786 /* Update bus accounting */
788 vio_cmo.desired += viodev->cmo.desired;
790 spin_unlock_irqrestore(&vio_cmo.lock, flags);
795 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
797 * @viodev - Pointer to struct vio_dev for device
799 * Remove the device from the cmo device list. The minimum entitlement
800 * will be reserved for the device as long as it is in the system. The
801 * rest of the entitlement the device had been allocated will be returned
804 static void vio_cmo_bus_remove(struct vio_dev *viodev)
806 struct vio_cmo_dev_entry *dev_ent;
810 spin_lock_irqsave(&vio_cmo.lock, flags);
811 if (viodev->cmo.allocated) {
812 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
813 "allocated after remove operation.\n",
814 __func__, viodev->cmo.allocated);
819 * Remove the device from the device list being maintained for
820 * CMO enabled devices.
822 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
823 if (viodev == dev_ent->viodev) {
824 list_del(&dev_ent->list);
830 * Devices may not require any entitlement and they do not need
831 * to be processed. Otherwise, return the device's entitlement
834 if (viodev->cmo.entitled) {
836 * This device has not yet left the OF tree, it's
837 * minimum entitlement remains in vio_cmo.min and
840 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
843 * Save min allocation for device in reserve as long
844 * as it exists in OF tree as determined by later
847 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
849 /* Replenish spare from freed reserve pool */
850 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
851 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
853 vio_cmo.spare += tmp;
854 viodev->cmo.entitled -= tmp;
857 /* Remaining reserve goes to excess pool */
858 vio_cmo.excess.size += viodev->cmo.entitled;
859 vio_cmo.excess.free += viodev->cmo.entitled;
860 vio_cmo.reserve.size -= viodev->cmo.entitled;
863 * Until the device is removed it will keep a
864 * minimum entitlement; this will guarantee that
865 * a module unload/load will result in a success.
867 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
868 viodev->cmo.desired = VIO_CMO_MIN_ENT;
869 atomic_set(&viodev->cmo.allocs_failed, 0);
872 spin_unlock_irqrestore(&vio_cmo.lock, flags);
875 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
877 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
881 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
883 * Set up the reserve and excess entitlement pools based on available
884 * system entitlement and the number of devices in the OF tree that
885 * require entitlement in the reserve pool.
887 static void vio_cmo_bus_init(void)
889 struct hvcall_mpp_data mpp_data;
892 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
893 spin_lock_init(&vio_cmo.lock);
894 INIT_LIST_HEAD(&vio_cmo.device_list);
895 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
897 /* Get current system entitlement */
898 err = h_get_mpp(&mpp_data);
901 * On failure, continue with entitlement set to 0, will panic()
902 * later when spare is reserved.
904 if (err != H_SUCCESS) {
905 printk(KERN_ERR "%s: unable to determine system IO "\
906 "entitlement. (%d)\n", __func__, err);
907 vio_cmo.entitled = 0;
909 vio_cmo.entitled = mpp_data.entitled_mem;
912 /* Set reservation and check against entitlement */
913 vio_cmo.spare = VIO_CMO_MIN_ENT;
914 vio_cmo.reserve.size = vio_cmo.spare;
915 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
917 if (vio_cmo.reserve.size > vio_cmo.entitled) {
918 printk(KERN_ERR "%s: insufficient system entitlement\n",
920 panic("%s: Insufficient system entitlement", __func__);
923 /* Set the remaining accounting variables */
924 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
925 vio_cmo.excess.free = vio_cmo.excess.size;
926 vio_cmo.min = vio_cmo.reserve.size;
927 vio_cmo.desired = vio_cmo.reserve.size;
930 /* sysfs device functions and data structures for CMO */
932 #define viodev_cmo_rd_attr(name) \
933 static ssize_t cmo_##name##_show(struct device *dev, \
934 struct device_attribute *attr, \
937 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
940 static ssize_t cmo_allocs_failed_show(struct device *dev,
941 struct device_attribute *attr, char *buf)
943 struct vio_dev *viodev = to_vio_dev(dev);
944 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
947 static ssize_t cmo_allocs_failed_store(struct device *dev,
948 struct device_attribute *attr, const char *buf, size_t count)
950 struct vio_dev *viodev = to_vio_dev(dev);
951 atomic_set(&viodev->cmo.allocs_failed, 0);
955 static ssize_t cmo_desired_store(struct device *dev,
956 struct device_attribute *attr, const char *buf, size_t count)
958 struct vio_dev *viodev = to_vio_dev(dev);
962 ret = kstrtoul(buf, 10, &new_desired);
966 vio_cmo_set_dev_desired(viodev, new_desired);
970 viodev_cmo_rd_attr(desired);
971 viodev_cmo_rd_attr(entitled);
972 viodev_cmo_rd_attr(allocated);
974 static ssize_t name_show(struct device *, struct device_attribute *, char *);
975 static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
976 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
979 static struct device_attribute dev_attr_name;
980 static struct device_attribute dev_attr_devspec;
981 static struct device_attribute dev_attr_modalias;
983 static DEVICE_ATTR_RO(cmo_entitled);
984 static DEVICE_ATTR_RO(cmo_allocated);
985 static DEVICE_ATTR_RW(cmo_desired);
986 static DEVICE_ATTR_RW(cmo_allocs_failed);
988 static struct attribute *vio_cmo_dev_attrs[] = {
990 &dev_attr_devspec.attr,
991 &dev_attr_modalias.attr,
992 &dev_attr_cmo_entitled.attr,
993 &dev_attr_cmo_allocated.attr,
994 &dev_attr_cmo_desired.attr,
995 &dev_attr_cmo_allocs_failed.attr,
998 ATTRIBUTE_GROUPS(vio_cmo_dev);
1000 /* sysfs bus functions and data structures for CMO */
1002 #define viobus_cmo_rd_attr(name) \
1003 static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf) \
1005 return sprintf(buf, "%lu\n", vio_cmo.name); \
1007 static struct bus_attribute bus_attr_cmo_bus_##name = \
1008 __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL)
1010 #define viobus_cmo_pool_rd_attr(name, var) \
1012 cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1014 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1016 static BUS_ATTR_RO(cmo_##name##_##var)
1018 viobus_cmo_rd_attr(entitled);
1019 viobus_cmo_rd_attr(spare);
1020 viobus_cmo_rd_attr(min);
1021 viobus_cmo_rd_attr(desired);
1022 viobus_cmo_rd_attr(curr);
1023 viobus_cmo_pool_rd_attr(reserve, size);
1024 viobus_cmo_pool_rd_attr(excess, size);
1025 viobus_cmo_pool_rd_attr(excess, free);
1027 static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1029 return sprintf(buf, "%lu\n", vio_cmo.high);
1032 static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1035 unsigned long flags;
1037 spin_lock_irqsave(&vio_cmo.lock, flags);
1038 vio_cmo.high = vio_cmo.curr;
1039 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1043 static BUS_ATTR_RW(cmo_high);
1045 static struct attribute *vio_bus_attrs[] = {
1046 &bus_attr_cmo_bus_entitled.attr,
1047 &bus_attr_cmo_bus_spare.attr,
1048 &bus_attr_cmo_bus_min.attr,
1049 &bus_attr_cmo_bus_desired.attr,
1050 &bus_attr_cmo_bus_curr.attr,
1051 &bus_attr_cmo_high.attr,
1052 &bus_attr_cmo_reserve_size.attr,
1053 &bus_attr_cmo_excess_size.attr,
1054 &bus_attr_cmo_excess_free.attr,
1057 ATTRIBUTE_GROUPS(vio_bus);
1059 static void vio_cmo_sysfs_init(void)
1061 vio_bus_type.dev_groups = vio_cmo_dev_groups;
1062 vio_bus_type.bus_groups = vio_bus_groups;
1064 #else /* CONFIG_PPC_SMLPAR */
1065 int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1066 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1067 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1068 static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1069 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1070 static void vio_cmo_bus_init(void) {}
1071 static void vio_cmo_sysfs_init(void) { }
1072 #endif /* CONFIG_PPC_SMLPAR */
1073 EXPORT_SYMBOL(vio_cmo_entitlement_update);
1074 EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1078 * Platform Facilities Option (PFO) support
1082 * vio_h_cop_sync - Perform a synchronous PFO co-processor operation
1084 * @vdev - Pointer to a struct vio_dev for device
1085 * @op - Pointer to a struct vio_pfo_op for the operation parameters
1087 * Calls the hypervisor to synchronously perform the PFO operation
1088 * described in @op. In the case of a busy response from the hypervisor,
1089 * the operation will be re-submitted indefinitely unless a non-zero timeout
1090 * is specified or an error occurs. The timeout places a limit on when to
1091 * stop re-submitting a operation, the total time can be exceeded if an
1092 * operation is in progress.
1094 * If op->hcall_ret is not NULL, this will be set to the return from the
1095 * last h_cop_op call or it will be 0 if an error not involving the h_call
1100 * -EINVAL if the h_call fails due to an invalid parameter,
1101 * -E2BIG if the h_call can not be performed synchronously,
1102 * -EBUSY if a timeout is specified and has elapsed,
1103 * -EACCES if the memory area for data/status has been rescinded, or
1104 * -EPERM if a hardware fault has been indicated
1106 int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
1108 struct device *dev = &vdev->dev;
1109 unsigned long deadline = 0;
1114 deadline = jiffies + msecs_to_jiffies(op->timeout);
1117 hret = plpar_hcall_norets(H_COP, op->flags,
1119 op->in, op->inlen, op->out,
1120 op->outlen, op->csbcpb);
1122 if (hret == H_SUCCESS ||
1123 (hret != H_NOT_ENOUGH_RESOURCES &&
1124 hret != H_BUSY && hret != H_RESOURCE) ||
1125 (op->timeout && time_after(deadline, jiffies)))
1128 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
1145 case H_NOT_ENOUGH_RESOURCES:
1156 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1157 __func__, ret, hret);
1159 op->hcall_err = hret;
1162 EXPORT_SYMBOL(vio_h_cop_sync);
1164 static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1166 const __be32 *dma_window;
1167 struct iommu_table *tbl;
1168 unsigned long offset, size;
1170 dma_window = of_get_property(dev->dev.of_node,
1171 "ibm,my-dma-window", NULL);
1175 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1179 kref_init(&tbl->it_kref);
1181 of_parse_dma_window(dev->dev.of_node, dma_window,
1182 &tbl->it_index, &offset, &size);
1184 /* TCE table size - measured in tce entries */
1185 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1186 tbl->it_size = size >> tbl->it_page_shift;
1187 /* offset for VIO should always be 0 */
1188 tbl->it_offset = offset >> tbl->it_page_shift;
1190 tbl->it_type = TCE_VB;
1191 tbl->it_blocksize = 16;
1193 if (firmware_has_feature(FW_FEATURE_LPAR))
1194 tbl->it_ops = &iommu_table_lpar_multi_ops;
1196 tbl->it_ops = &iommu_table_pseries_ops;
1198 return iommu_init_table(tbl, -1, 0, 0);
1202 * vio_match_device: - Tell if a VIO device has a matching
1203 * VIO device id structure.
1204 * @ids: array of VIO device id structures to search in
1205 * @dev: the VIO device structure to match against
1207 * Used by a driver to check whether a VIO device present in the
1208 * system is in its list of supported devices. Returns the matching
1209 * vio_device_id structure or NULL if there is no match.
1211 static const struct vio_device_id *vio_match_device(
1212 const struct vio_device_id *ids, const struct vio_dev *dev)
1214 while (ids->type[0] != '\0') {
1215 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1216 of_device_is_compatible(dev->dev.of_node,
1225 * Convert from struct device to struct vio_dev and pass to driver.
1226 * dev->driver has already been set by generic code because vio_bus_match
1229 static int vio_bus_probe(struct device *dev)
1231 struct vio_dev *viodev = to_vio_dev(dev);
1232 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1233 const struct vio_device_id *id;
1234 int error = -ENODEV;
1239 id = vio_match_device(viodrv->id_table, viodev);
1241 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1242 if (firmware_has_feature(FW_FEATURE_CMO)) {
1243 error = vio_cmo_bus_probe(viodev);
1247 error = viodrv->probe(viodev, id);
1248 if (error && firmware_has_feature(FW_FEATURE_CMO))
1249 vio_cmo_bus_remove(viodev);
1255 /* convert from struct device to struct vio_dev and pass to driver. */
1256 static int vio_bus_remove(struct device *dev)
1258 struct vio_dev *viodev = to_vio_dev(dev);
1259 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1260 struct device *devptr;
1264 * Hold a reference to the device after the remove function is called
1265 * to allow for CMO accounting cleanup for the device.
1267 devptr = get_device(dev);
1270 ret = viodrv->remove(viodev);
1272 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1273 vio_cmo_bus_remove(viodev);
1280 * vio_register_driver: - Register a new vio driver
1281 * @viodrv: The vio_driver structure to be registered.
1283 int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1284 const char *mod_name)
1286 pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1288 /* fill in 'struct driver' fields */
1289 viodrv->driver.name = viodrv->name;
1290 viodrv->driver.pm = viodrv->pm;
1291 viodrv->driver.bus = &vio_bus_type;
1292 viodrv->driver.owner = owner;
1293 viodrv->driver.mod_name = mod_name;
1295 return driver_register(&viodrv->driver);
1297 EXPORT_SYMBOL(__vio_register_driver);
1300 * vio_unregister_driver - Remove registration of vio driver.
1301 * @viodrv: The vio_driver struct to be removed form registration
1303 void vio_unregister_driver(struct vio_driver *viodrv)
1305 driver_unregister(&viodrv->driver);
1307 EXPORT_SYMBOL(vio_unregister_driver);
1309 /* vio_dev refcount hit 0 */
1310 static void vio_dev_release(struct device *dev)
1312 struct iommu_table *tbl = get_iommu_table_base(dev);
1315 iommu_tce_table_put(tbl);
1316 of_node_put(dev->of_node);
1317 kfree(to_vio_dev(dev));
1321 * vio_register_device_node: - Register a new vio device.
1322 * @of_node: The OF node for this device.
1324 * Creates and initializes a vio_dev structure from the data in
1325 * of_node and adds it to the list of virtual devices.
1326 * Returns a pointer to the created vio_dev or NULL if node has
1327 * NULL device_type or compatible fields.
1329 struct vio_dev *vio_register_device_node(struct device_node *of_node)
1331 struct vio_dev *viodev;
1332 struct device_node *parent_node;
1334 enum vio_dev_family family;
1337 * Determine if this node is a under the /vdevice node or under the
1338 * /ibm,platform-facilities node. This decides the device's family.
1340 parent_node = of_get_parent(of_node);
1342 if (of_node_is_type(parent_node, "ibm,platform-facilities"))
1344 else if (of_node_is_type(parent_node, "vdevice"))
1347 pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n",
1351 of_node_put(parent_node);
1354 of_node_put(parent_node);
1356 pr_warn("%s: could not determine the parent of node %pOFn.\n",
1361 if (family == PFO) {
1362 if (of_get_property(of_node, "interrupt-controller", NULL)) {
1363 pr_debug("%s: Skipping the interrupt controller %pOFn.\n",
1369 /* allocate a vio_dev for this node */
1370 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1371 if (viodev == NULL) {
1372 pr_warn("%s: allocation failure for VIO device.\n", __func__);
1376 /* we need the 'device_type' property, in order to match with drivers */
1377 viodev->family = family;
1378 if (viodev->family == VDEVICE) {
1379 unsigned int unit_address;
1381 viodev->type = of_node_get_device_type(of_node);
1382 if (!viodev->type) {
1383 pr_warn("%s: node %pOFn is missing the 'device_type' "
1384 "property.\n", __func__, of_node);
1388 prop = of_get_property(of_node, "reg", NULL);
1390 pr_warn("%s: node %pOFn missing 'reg'\n",
1394 unit_address = of_read_number(prop, 1);
1395 dev_set_name(&viodev->dev, "%x", unit_address);
1396 viodev->irq = irq_of_parse_and_map(of_node, 0);
1397 viodev->unit_address = unit_address;
1399 /* PFO devices need their resource_id for submitting COP_OPs
1400 * This is an optional field for devices, but is required when
1401 * performing synchronous ops */
1402 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1404 viodev->resource_id = of_read_number(prop, 1);
1406 dev_set_name(&viodev->dev, "%pOFn", of_node);
1407 viodev->type = dev_name(&viodev->dev);
1411 viodev->name = of_node->name;
1412 viodev->dev.of_node = of_node_get(of_node);
1414 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1416 /* init generic 'struct device' fields: */
1417 viodev->dev.parent = &vio_bus_device.dev;
1418 viodev->dev.bus = &vio_bus_type;
1419 viodev->dev.release = vio_dev_release;
1421 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1422 if (firmware_has_feature(FW_FEATURE_CMO))
1423 vio_cmo_set_dma_ops(viodev);
1425 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1427 set_iommu_table_base(&viodev->dev,
1428 vio_build_iommu_table(viodev));
1430 /* needed to ensure proper operation of coherent allocations
1431 * later, in case driver doesn't set it explicitly */
1432 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1433 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1436 /* register with generic device framework */
1437 if (device_register(&viodev->dev)) {
1438 printk(KERN_ERR "%s: failed to register device %s\n",
1439 __func__, dev_name(&viodev->dev));
1440 put_device(&viodev->dev);
1446 out: /* Use this exit point for any return prior to device_register */
1451 EXPORT_SYMBOL(vio_register_device_node);
1454 * vio_bus_scan_for_devices - Scan OF and register each child device
1455 * @root_name - OF node name for the root of the subtree to search.
1456 * This must be non-NULL
1458 * Starting from the root node provide, register the device node for
1459 * each child beneath the root.
1461 static void vio_bus_scan_register_devices(char *root_name)
1463 struct device_node *node_root, *node_child;
1468 node_root = of_find_node_by_name(NULL, root_name);
1472 * Create struct vio_devices for each virtual device in
1473 * the device tree. Drivers will associate with them later.
1475 node_child = of_get_next_child(node_root, NULL);
1476 while (node_child) {
1477 vio_register_device_node(node_child);
1478 node_child = of_get_next_child(node_root, node_child);
1480 of_node_put(node_root);
1485 * vio_bus_init: - Initialize the virtual IO bus
1487 static int __init vio_bus_init(void)
1491 if (firmware_has_feature(FW_FEATURE_CMO))
1492 vio_cmo_sysfs_init();
1494 err = bus_register(&vio_bus_type);
1496 printk(KERN_ERR "failed to register VIO bus\n");
1501 * The fake parent of all vio devices, just to give us
1504 err = device_register(&vio_bus_device.dev);
1506 printk(KERN_WARNING "%s: device_register returned %i\n",
1511 if (firmware_has_feature(FW_FEATURE_CMO))
1516 postcore_initcall(vio_bus_init);
1518 static int __init vio_device_init(void)
1520 vio_bus_scan_register_devices("vdevice");
1521 vio_bus_scan_register_devices("ibm,platform-facilities");
1525 device_initcall(vio_device_init);
1527 static ssize_t name_show(struct device *dev,
1528 struct device_attribute *attr, char *buf)
1530 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1532 static DEVICE_ATTR_RO(name);
1534 static ssize_t devspec_show(struct device *dev,
1535 struct device_attribute *attr, char *buf)
1537 struct device_node *of_node = dev->of_node;
1539 return sprintf(buf, "%pOF\n", of_node);
1541 static DEVICE_ATTR_RO(devspec);
1543 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1546 const struct vio_dev *vio_dev = to_vio_dev(dev);
1547 struct device_node *dn;
1555 cp = of_get_property(dn, "compatible", NULL);
1561 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1563 static DEVICE_ATTR_RO(modalias);
1565 static struct attribute *vio_dev_attrs[] = {
1566 &dev_attr_name.attr,
1567 &dev_attr_devspec.attr,
1568 &dev_attr_modalias.attr,
1571 ATTRIBUTE_GROUPS(vio_dev);
1573 void vio_unregister_device(struct vio_dev *viodev)
1575 device_unregister(&viodev->dev);
1576 if (viodev->family == VDEVICE)
1577 irq_dispose_mapping(viodev->irq);
1579 EXPORT_SYMBOL(vio_unregister_device);
1581 static int vio_bus_match(struct device *dev, struct device_driver *drv)
1583 const struct vio_dev *vio_dev = to_vio_dev(dev);
1584 struct vio_driver *vio_drv = to_vio_driver(drv);
1585 const struct vio_device_id *ids = vio_drv->id_table;
1587 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1590 static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1592 const struct vio_dev *vio_dev = to_vio_dev(dev);
1593 struct device_node *dn;
1599 cp = of_get_property(dn, "compatible", NULL);
1603 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1607 struct bus_type vio_bus_type = {
1609 .dev_groups = vio_dev_groups,
1610 .uevent = vio_hotplug,
1611 .match = vio_bus_match,
1612 .probe = vio_bus_probe,
1613 .remove = vio_bus_remove,
1617 * vio_get_attribute: - get attribute for virtual device
1618 * @vdev: The vio device to get property.
1619 * @which: The property/attribute to be extracted.
1620 * @length: Pointer to length of returned data size (unused if NULL).
1622 * Calls prom.c's of_get_property() to return the value of the
1623 * attribute specified by @which
1625 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1627 return of_get_property(vdev->dev.of_node, which, length);
1629 EXPORT_SYMBOL(vio_get_attribute);
1631 #ifdef CONFIG_PPC_PSERIES
1632 /* vio_find_name() - internal because only vio.c knows how we formatted the
1635 static struct vio_dev *vio_find_name(const char *name)
1637 struct device *found;
1639 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1643 return to_vio_dev(found);
1647 * vio_find_node - find an already-registered vio_dev
1648 * @vnode: device_node of the virtual device we're looking for
1650 * Takes a reference to the embedded struct device which needs to be dropped
1653 struct vio_dev *vio_find_node(struct device_node *vnode)
1656 struct device_node *vnode_parent;
1658 vnode_parent = of_get_parent(vnode);
1662 /* construct the kobject name from the device node */
1663 if (of_node_is_type(vnode_parent, "vdevice")) {
1666 prop = of_get_property(vnode, "reg", NULL);
1669 snprintf(kobj_name, sizeof(kobj_name), "%x",
1670 (uint32_t)of_read_number(prop, 1));
1671 } else if (of_node_is_type(vnode_parent, "ibm,platform-facilities"))
1672 snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode);
1676 of_node_put(vnode_parent);
1677 return vio_find_name(kobj_name);
1679 of_node_put(vnode_parent);
1682 EXPORT_SYMBOL(vio_find_node);
1684 int vio_enable_interrupts(struct vio_dev *dev)
1686 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1687 if (rc != H_SUCCESS)
1688 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1691 EXPORT_SYMBOL(vio_enable_interrupts);
1693 int vio_disable_interrupts(struct vio_dev *dev)
1695 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1696 if (rc != H_SUCCESS)
1697 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1700 EXPORT_SYMBOL(vio_disable_interrupts);
1701 #endif /* CONFIG_PPC_PSERIES */
1703 static int __init vio_init(void)
1705 dma_debug_add_bus(&vio_bus_type);
1708 fs_initcall(vio_init);