1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* IBM POWER Barrier Synchronization Register Driver
4 * Copyright IBM Corporation 2008
6 * Author: Sonny Rao <sonnyrao@us.ibm.com>
9 #include <linux/kernel.h>
11 #include <linux/of_address.h>
12 #include <linux/of_device.h>
13 #include <linux/of_platform.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/list.h>
19 #include <linux/slab.h>
23 This driver exposes a special register which can be used for fast
24 synchronization across a large SMP machine. The hardware is exposed
25 as an array of bytes where each process will write to one of the bytes to
26 indicate it has finished the current stage and this update is broadcast to
27 all processors without having to bounce a cacheline between them. In
28 POWER5 and POWER6 there is one of these registers per SMP, but it is
29 presented in two forms; first, it is given as a whole and then as a number
30 of smaller registers which alias to parts of the single whole register.
31 This can potentially allow multiple groups of processes to each have their
32 own private synchronization device.
34 Note that this hardware *must* be written to using *only* single byte writes.
35 It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
36 this region is treated as cache-inhibited processes should also use a
37 full sync before and after writing to the BSR to ensure all stores and
38 the BSR update have made it to all chips in the system
41 /* This is arbitrary number, up to Power6 it's been 17 or fewer */
42 #define BSR_MAX_DEVS (32)
45 u64 bsr_addr; /* Real address */
46 u64 bsr_len; /* length of mem region we can map */
47 unsigned bsr_bytes; /* size of the BSR reg itself */
48 unsigned bsr_stride; /* interval at which BSR repeats in the page */
49 unsigned bsr_type; /* maps to enum below */
50 unsigned bsr_num; /* bsr id number for its type */
53 struct list_head bsr_list;
57 struct device *bsr_device;
62 static unsigned total_bsr_devs;
63 static LIST_HEAD(bsr_devs);
64 static struct class *bsr_class;
77 static unsigned bsr_types[BSR_MAX];
80 bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
82 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
83 return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
85 static DEVICE_ATTR_RO(bsr_size);
88 bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
90 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
91 return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
93 static DEVICE_ATTR_RO(bsr_stride);
96 bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf)
98 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
99 return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
101 static DEVICE_ATTR_RO(bsr_length);
103 static struct attribute *bsr_dev_attrs[] = {
104 &dev_attr_bsr_size.attr,
105 &dev_attr_bsr_stride.attr,
106 &dev_attr_bsr_length.attr,
109 ATTRIBUTE_GROUPS(bsr_dev);
111 static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
113 unsigned long size = vma->vm_end - vma->vm_start;
114 struct bsr_dev *dev = filp->private_data;
117 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 /* check for the case of a small BSR device and map one 4k page for it*/
120 if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
121 ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
123 else if (size <= dev->bsr_len)
124 ret = io_remap_pfn_range(vma, vma->vm_start,
125 dev->bsr_addr >> PAGE_SHIFT,
126 size, vma->vm_page_prot);
136 static int bsr_open(struct inode *inode, struct file *filp)
138 struct cdev *cdev = inode->i_cdev;
139 struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
141 filp->private_data = dev;
145 static const struct file_operations bsr_fops = {
146 .owner = THIS_MODULE,
149 .llseek = noop_llseek,
152 static void bsr_cleanup_devs(void)
154 struct bsr_dev *cur, *n;
156 list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
157 if (cur->bsr_device) {
158 cdev_del(&cur->bsr_cdev);
159 device_del(cur->bsr_device);
161 list_del(&cur->bsr_list);
166 static int bsr_add_node(struct device_node *bn)
168 int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
169 const u32 *bsr_stride;
170 const u32 *bsr_bytes;
174 bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
175 bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
177 if (!bsr_stride || !bsr_bytes ||
178 (bsr_stride_len != bsr_bytes_len)) {
179 printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
183 num_bsr_devs = bsr_bytes_len / sizeof(u32);
185 for (i = 0 ; i < num_bsr_devs; i++) {
186 struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
192 printk(KERN_ERR "Unable to alloc bsr dev\n");
197 result = of_address_to_resource(bn, i, &res);
199 printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
204 cur->bsr_minor = i + total_bsr_devs;
205 cur->bsr_addr = res.start;
206 cur->bsr_len = resource_size(&res);
207 cur->bsr_bytes = bsr_bytes[i];
208 cur->bsr_stride = bsr_stride[i];
209 cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
211 /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
212 /* we can only map 4k of it, so only advertise the 4k in sysfs */
213 if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
216 switch(cur->bsr_bytes) {
218 cur->bsr_type = BSR_8;
221 cur->bsr_type = BSR_16;
224 cur->bsr_type = BSR_64;
227 cur->bsr_type = BSR_128;
230 cur->bsr_type = BSR_4096;
233 cur->bsr_type = BSR_UNKNOWN;
236 cur->bsr_num = bsr_types[cur->bsr_type];
237 snprintf(cur->bsr_name, 32, "bsr%d_%d",
238 cur->bsr_bytes, cur->bsr_num);
240 cdev_init(&cur->bsr_cdev, &bsr_fops);
241 result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
247 cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
248 cur, "%s", cur->bsr_name);
249 if (IS_ERR(cur->bsr_device)) {
250 printk(KERN_ERR "device_create failed for %s\n",
252 cdev_del(&cur->bsr_cdev);
257 bsr_types[cur->bsr_type] = cur->bsr_num + 1;
258 list_add_tail(&cur->bsr_list, &bsr_devs);
261 total_bsr_devs += num_bsr_devs;
271 static int bsr_create_devs(struct device_node *bn)
276 ret = bsr_add_node(bn);
281 bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
286 static int __init bsr_init(void)
288 struct device_node *np;
292 np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
296 bsr_class = class_create(THIS_MODULE, "bsr");
297 if (IS_ERR(bsr_class)) {
298 printk(KERN_ERR "class_create() failed for bsr_class\n");
299 ret = PTR_ERR(bsr_class);
302 bsr_class->dev_groups = bsr_dev_groups;
304 ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
305 bsr_major = MAJOR(bsr_dev);
307 printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
311 ret = bsr_create_devs(np);
320 unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
323 class_destroy(bsr_class);
333 static void __exit bsr_exit(void)
339 class_destroy(bsr_class);
342 unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
345 module_init(bsr_init);
346 module_exit(bsr_exit);
347 MODULE_LICENSE("GPL");
348 MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");