1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for the Intel SCU IPC mechanism
5 * (C) Copyright 2008-2010,2015 Intel Corporation
6 * Author: Sreedhara DS (sreedhara.ds@intel.com)
8 * SCU running in ARC processor communicates with other entity running in IA
9 * core through IPC mechanism which in turn messaging between IA core ad SCU.
10 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
11 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
12 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
13 * along with other APIs.
16 #include <linux/delay.h>
17 #include <linux/device.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
22 #include <linux/iopoll.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
26 #include <asm/intel_scu_ipc.h>
28 /* IPC defines the following message types */
29 #define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
31 /* Command id associated with message IPCMSG_PCNTRL */
32 #define IPC_CMD_PCNTRL_W 0 /* Register write */
33 #define IPC_CMD_PCNTRL_R 1 /* Register read */
34 #define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
37 * IPC register summary
39 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
40 * To read or write information to the SCU, driver writes to IPC-1 memory
41 * mapped registers. The following is the IPC mechanism
43 * 1. IA core cDMI interface claims this transaction and converts it to a
44 * Transaction Layer Packet (TLP) message which is sent across the cDMI.
46 * 2. South Complex cDMI block receives this message and writes it to
47 * the IPC-1 register block, causing an interrupt to the SCU
49 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
50 * message handler is called within firmware.
53 #define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
54 #define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
55 #define IPC_IOC 0x100 /* IPC command register IOC bit */
57 struct intel_scu_ipc_dev {
62 void __iomem *ipc_base;
63 struct completion cmd_complete;
66 #define IPC_STATUS 0x04
67 #define IPC_STATUS_IRQ BIT(2)
68 #define IPC_STATUS_ERR BIT(1)
69 #define IPC_STATUS_BUSY BIT(0)
72 * IPC Write/Read Buffers:
73 * 16 byte buffer for sending and receiving data to and from SCU.
75 #define IPC_WRITE_BUFFER 0x80
76 #define IPC_READ_BUFFER 0x90
78 /* Timeout in jiffies */
79 #define IPC_TIMEOUT (3 * HZ)
81 static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
82 static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
84 static struct class intel_scu_ipc_class = {
85 .name = "intel_scu_ipc",
90 * intel_scu_ipc_dev_get() - Get SCU IPC instance
92 * The recommended new API takes SCU IPC instance as parameter and this
93 * function can be called by driver to get the instance. This also makes
94 * sure the driver providing the IPC functionality cannot be unloaded
95 * while the caller has the instance.
97 * Call intel_scu_ipc_dev_put() to release the instance.
99 * Returns %NULL if SCU IPC is not currently available.
101 struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
103 struct intel_scu_ipc_dev *scu = NULL;
105 mutex_lock(&ipclock);
107 get_device(&ipcdev->dev);
109 * Prevent the IPC provider from being unloaded while it
112 if (!try_module_get(ipcdev->owner))
113 put_device(&ipcdev->dev);
118 mutex_unlock(&ipclock);
121 EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
124 * intel_scu_ipc_dev_put() - Put SCU IPC instance
125 * @scu: SCU IPC instance
127 * This function releases the SCU IPC instance retrieved from
128 * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
131 void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
134 module_put(scu->owner);
135 put_device(&scu->dev);
138 EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
140 struct intel_scu_ipc_devres {
141 struct intel_scu_ipc_dev *scu;
144 static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
146 struct intel_scu_ipc_devres *dr = res;
147 struct intel_scu_ipc_dev *scu = dr->scu;
149 intel_scu_ipc_dev_put(scu);
153 * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
154 * @dev: Device requesting the SCU IPC device
156 * The recommended new API takes SCU IPC instance as parameter and this
157 * function can be called by driver to get the instance. This also makes
158 * sure the driver providing the IPC functionality cannot be unloaded
159 * while the caller has the instance.
161 * Returns %NULL if SCU IPC is not currently available.
163 struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
165 struct intel_scu_ipc_devres *dr;
166 struct intel_scu_ipc_dev *scu;
168 dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
172 scu = intel_scu_ipc_dev_get();
183 EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
187 * Command Register (Write Only):
188 * A write to this register results in an interrupt to the SCU core processor
190 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
192 static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
194 reinit_completion(&scu->cmd_complete);
195 writel(cmd | IPC_IOC, scu->ipc_base);
200 * IPC Write Buffer (Write Only):
201 * 16-byte buffer for sending data associated with IPC command to
202 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
204 static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
206 writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
210 * Status Register (Read Only):
211 * Driver will read this register to get the ready/busy status of the IPC
212 * block and error status of the IPC command that was just processed by SCU
214 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
216 static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
218 return __raw_readl(scu->ipc_base + IPC_STATUS);
221 /* Read ipc byte data */
222 static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
224 return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
227 /* Read ipc u32 data */
228 static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
230 return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
233 /* Wait till scu status is busy */
234 static inline int busy_loop(struct intel_scu_ipc_dev *scu)
239 err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
240 100, jiffies_to_usecs(IPC_TIMEOUT));
244 return (status & IPC_STATUS_ERR) ? -EIO : 0;
247 /* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
248 static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
252 wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
254 status = ipc_read_status(scu);
255 if (status & IPC_STATUS_BUSY)
258 if (status & IPC_STATUS_ERR)
264 static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
266 return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
269 static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
276 return ERR_PTR(-ENODEV);
278 status = ipc_read_status(scu);
279 if (status & IPC_STATUS_BUSY) {
280 dev_dbg(&scu->dev, "device is busy\n");
281 return ERR_PTR(-EBUSY);
287 /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
288 static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
289 u32 count, u32 op, u32 id)
294 u8 cbuf[IPC_WWBUF_SIZE];
295 u32 *wbuf = (u32 *)&cbuf;
297 memset(cbuf, 0, sizeof(cbuf));
299 mutex_lock(&ipclock);
300 scu = intel_scu_ipc_get(scu);
302 mutex_unlock(&ipclock);
306 for (nc = 0; nc < count; nc++, offset += 2) {
307 cbuf[offset] = addr[nc];
308 cbuf[offset + 1] = addr[nc] >> 8;
311 if (id == IPC_CMD_PCNTRL_R) {
312 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
313 ipc_data_writel(scu, wbuf[nc], offset);
314 ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
315 } else if (id == IPC_CMD_PCNTRL_W) {
316 for (nc = 0; nc < count; nc++, offset += 1)
317 cbuf[offset] = data[nc];
318 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
319 ipc_data_writel(scu, wbuf[nc], offset);
320 ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
321 } else if (id == IPC_CMD_PCNTRL_M) {
322 cbuf[offset] = data[0];
323 cbuf[offset + 1] = data[1];
324 ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
325 ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
328 err = intel_scu_ipc_check_status(scu);
329 if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
330 /* Workaround: values are read as 0 without memcpy_fromio */
331 memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
332 for (nc = 0; nc < count; nc++)
333 data[nc] = ipc_data_readb(scu, nc);
335 mutex_unlock(&ipclock);
340 * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
341 * @scu: Optional SCU IPC instance
342 * @addr: Register on SCU
343 * @data: Return pointer for read byte
345 * Read a single register. Returns %0 on success or an error code. All
346 * locking between SCU accesses is handled for the caller.
348 * This function may sleep.
350 int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
352 return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
354 EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
357 * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
358 * @scu: Optional SCU IPC instance
359 * @addr: Register on SCU
360 * @data: Byte to write
362 * Write a single register. Returns %0 on success or an error code. All
363 * locking between SCU accesses is handled for the caller.
365 * This function may sleep.
367 int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
369 return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
371 EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
374 * intel_scu_ipc_dev_readv() - Read a set of registers
375 * @scu: Optional SCU IPC instance
376 * @addr: Register list
377 * @data: Bytes to return
378 * @len: Length of array
380 * Read registers. Returns %0 on success or an error code. All locking
381 * between SCU accesses is handled for the caller.
383 * The largest array length permitted by the hardware is 5 items.
385 * This function may sleep.
387 int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
390 return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
392 EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
395 * intel_scu_ipc_dev_writev() - Write a set of registers
396 * @scu: Optional SCU IPC instance
397 * @addr: Register list
398 * @data: Bytes to write
399 * @len: Length of array
401 * Write registers. Returns %0 on success or an error code. All locking
402 * between SCU accesses is handled for the caller.
404 * The largest array length permitted by the hardware is 5 items.
406 * This function may sleep.
408 int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
411 return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
413 EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
416 * intel_scu_ipc_dev_update() - Update a register
417 * @scu: Optional SCU IPC instance
418 * @addr: Register address
419 * @data: Bits to update
420 * @mask: Mask of bits to update
422 * Read-modify-write power control unit register. The first data argument
423 * must be register value and second is mask value mask is a bitmap that
424 * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
425 * modify this bit. returns %0 on success or an error code.
427 * This function may sleep. Locking between SCU accesses is handled
430 int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
433 u8 tmp[2] = { data, mask };
434 return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
436 EXPORT_SYMBOL(intel_scu_ipc_dev_update);
439 * intel_scu_ipc_dev_simple_command() - Send a simple command
440 * @scu: Optional SCU IPC instance
444 * Issue a simple command to the SCU. Do not use this interface if you must
445 * then access data as any data values may be overwritten by another SCU
446 * access by the time this function returns.
448 * This function may sleep. Locking for SCU accesses is handled for the
451 int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
457 mutex_lock(&ipclock);
458 scu = intel_scu_ipc_get(scu);
460 mutex_unlock(&ipclock);
464 cmdval = sub << 12 | cmd;
465 ipc_command(scu, cmdval);
466 err = intel_scu_ipc_check_status(scu);
467 mutex_unlock(&ipclock);
469 dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
472 EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
475 * intel_scu_ipc_command_with_size() - Command with data
476 * @scu: Optional SCU IPC instance
480 * @inlen: Input length in bytes
481 * @size: Input size written to the IPC command register in whatever
482 * units (dword, byte) the particular firmware requires. Normally
483 * should be the same as @inlen.
485 * @outlen: Output length in bytes
487 * Issue a command to the SCU which involves data transfers. Do the
488 * data copies under the lock but leave it for the caller to interpret.
490 int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
491 int sub, const void *in, size_t inlen,
492 size_t size, void *out, size_t outlen)
494 size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
495 size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
496 u32 cmdval, inbuf[4] = {};
499 if (inbuflen > 4 || outbuflen > 4)
502 mutex_lock(&ipclock);
503 scu = intel_scu_ipc_get(scu);
505 mutex_unlock(&ipclock);
509 memcpy(inbuf, in, inlen);
510 for (i = 0; i < inbuflen; i++)
511 ipc_data_writel(scu, inbuf[i], 4 * i);
513 cmdval = (size << 16) | (sub << 12) | cmd;
514 ipc_command(scu, cmdval);
515 err = intel_scu_ipc_check_status(scu);
520 for (i = 0; i < outbuflen; i++)
521 outbuf[i] = ipc_data_readl(scu, 4 * i);
523 memcpy(out, outbuf, outlen);
526 mutex_unlock(&ipclock);
528 dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
531 EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
534 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
535 * When ioc bit is set to 1, caller api must wait for interrupt handler called
536 * which in turn unlocks the caller api. Currently this is not used
538 * This is edge triggered so we need take no action to clear anything
540 static irqreturn_t ioc(int irq, void *dev_id)
542 struct intel_scu_ipc_dev *scu = dev_id;
543 int status = ipc_read_status(scu);
545 writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
546 complete(&scu->cmd_complete);
551 static void intel_scu_ipc_release(struct device *dev)
553 struct intel_scu_ipc_dev *scu;
555 scu = container_of(dev, struct intel_scu_ipc_dev, dev);
557 free_irq(scu->irq, scu);
558 iounmap(scu->ipc_base);
559 release_mem_region(scu->mem.start, resource_size(&scu->mem));
564 * __intel_scu_ipc_register() - Register SCU IPC device
565 * @parent: Parent device
566 * @scu_data: Data used to configure SCU IPC
567 * @owner: Module registering the SCU IPC device
569 * Call this function to register SCU IPC mechanism under @parent.
570 * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
571 * failure. The caller may use the returned instance if it needs to do
572 * SCU IPC calls itself.
574 struct intel_scu_ipc_dev *
575 __intel_scu_ipc_register(struct device *parent,
576 const struct intel_scu_ipc_data *scu_data,
577 struct module *owner)
580 struct intel_scu_ipc_dev *scu;
581 void __iomem *ipc_base;
583 mutex_lock(&ipclock);
584 /* We support only one IPC */
590 scu = kzalloc(sizeof(*scu), GFP_KERNEL);
597 scu->dev.parent = parent;
598 scu->dev.class = &intel_scu_ipc_class;
599 scu->dev.release = intel_scu_ipc_release;
601 if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
607 ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
613 scu->ipc_base = ipc_base;
614 scu->mem = scu_data->mem;
615 scu->irq = scu_data->irq;
616 init_completion(&scu->cmd_complete);
619 err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
625 * After this point intel_scu_ipc_release() takes care of
626 * releasing the SCU IPC resources once refcount drops to zero.
628 dev_set_name(&scu->dev, "intel_scu_ipc");
629 err = device_register(&scu->dev);
631 put_device(&scu->dev);
635 /* Assign device at last */
637 mutex_unlock(&ipclock);
644 release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
648 mutex_unlock(&ipclock);
652 EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
655 * intel_scu_ipc_unregister() - Unregister SCU IPC
656 * @scu: SCU IPC handle
658 * This unregisters the SCU IPC device and releases the acquired
659 * resources once the refcount goes to zero.
661 void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
663 mutex_lock(&ipclock);
664 if (!WARN_ON(!ipcdev)) {
666 device_unregister(&scu->dev);
668 mutex_unlock(&ipclock);
670 EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
672 static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
674 struct intel_scu_ipc_devres *dr = res;
675 struct intel_scu_ipc_dev *scu = dr->scu;
677 intel_scu_ipc_unregister(scu);
681 * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
682 * @parent: Parent device
683 * @scu_data: Data used to configure SCU IPC
684 * @owner: Module registering the SCU IPC device
686 * Call this function to register managed SCU IPC mechanism under
687 * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
688 * case of failure. The caller may use the returned instance if it needs
689 * to do SCU IPC calls itself.
691 struct intel_scu_ipc_dev *
692 __devm_intel_scu_ipc_register(struct device *parent,
693 const struct intel_scu_ipc_data *scu_data,
694 struct module *owner)
696 struct intel_scu_ipc_devres *dr;
697 struct intel_scu_ipc_dev *scu;
699 dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
703 scu = __intel_scu_ipc_register(parent, scu_data, owner);
710 devres_add(parent, dr);
714 EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
716 static int __init intel_scu_ipc_init(void)
718 return class_register(&intel_scu_ipc_class);
720 subsys_initcall(intel_scu_ipc_init);
722 static void __exit intel_scu_ipc_exit(void)
724 class_unregister(&intel_scu_ipc_class);
726 module_exit(intel_scu_ipc_exit);