2 * Intel Core SoC Power Management Controller Driver
4 * Copyright (c) 2016, Intel Corporation.
7 * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
8 * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 #include <linux/debugfs.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
26 #include <linux/pci.h>
27 #include <linux/uaccess.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/intel-family.h>
31 #include <asm/pmc_core.h>
33 #include "intel_pmc_core.h"
35 static struct pmc_dev pmc;
37 static const struct pmc_bit_map spt_pll_map[] = {
38 {"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0},
39 {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
40 {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
41 {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
45 static const struct pmc_bit_map spt_mphy_map[] = {
46 {"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0},
47 {"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1},
48 {"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2},
49 {"MPHY CORE LANE 3", SPT_PMC_BIT_MPHY_LANE3},
50 {"MPHY CORE LANE 4", SPT_PMC_BIT_MPHY_LANE4},
51 {"MPHY CORE LANE 5", SPT_PMC_BIT_MPHY_LANE5},
52 {"MPHY CORE LANE 6", SPT_PMC_BIT_MPHY_LANE6},
53 {"MPHY CORE LANE 7", SPT_PMC_BIT_MPHY_LANE7},
54 {"MPHY CORE LANE 8", SPT_PMC_BIT_MPHY_LANE8},
55 {"MPHY CORE LANE 9", SPT_PMC_BIT_MPHY_LANE9},
56 {"MPHY CORE LANE 10", SPT_PMC_BIT_MPHY_LANE10},
57 {"MPHY CORE LANE 11", SPT_PMC_BIT_MPHY_LANE11},
58 {"MPHY CORE LANE 12", SPT_PMC_BIT_MPHY_LANE12},
59 {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
60 {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
61 {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
65 static const struct pmc_bit_map spt_pfear_map[] = {
66 {"PMC", SPT_PMC_BIT_PMC},
67 {"OPI-DMI", SPT_PMC_BIT_OPI},
68 {"SPI / eSPI", SPT_PMC_BIT_SPI},
69 {"XHCI", SPT_PMC_BIT_XHCI},
70 {"SPA", SPT_PMC_BIT_SPA},
71 {"SPB", SPT_PMC_BIT_SPB},
72 {"SPC", SPT_PMC_BIT_SPC},
73 {"GBE", SPT_PMC_BIT_GBE},
74 {"SATA", SPT_PMC_BIT_SATA},
75 {"HDA-PGD0", SPT_PMC_BIT_HDA_PGD0},
76 {"HDA-PGD1", SPT_PMC_BIT_HDA_PGD1},
77 {"HDA-PGD2", SPT_PMC_BIT_HDA_PGD2},
78 {"HDA-PGD3", SPT_PMC_BIT_HDA_PGD3},
79 {"RSVD", SPT_PMC_BIT_RSVD_0B},
80 {"LPSS", SPT_PMC_BIT_LPSS},
81 {"LPC", SPT_PMC_BIT_LPC},
82 {"SMB", SPT_PMC_BIT_SMB},
83 {"ISH", SPT_PMC_BIT_ISH},
84 {"P2SB", SPT_PMC_BIT_P2SB},
85 {"DFX", SPT_PMC_BIT_DFX},
86 {"SCC", SPT_PMC_BIT_SCC},
87 {"RSVD", SPT_PMC_BIT_RSVD_0C},
88 {"FUSE", SPT_PMC_BIT_FUSE},
89 {"CAMERA", SPT_PMC_BIT_CAMREA},
90 {"RSVD", SPT_PMC_BIT_RSVD_0D},
91 {"USB3-OTG", SPT_PMC_BIT_USB3_OTG},
92 {"EXI", SPT_PMC_BIT_EXI},
93 {"CSE", SPT_PMC_BIT_CSE},
94 {"CSME_KVM", SPT_PMC_BIT_CSME_KVM},
95 {"CSME_PMT", SPT_PMC_BIT_CSME_PMT},
96 {"CSME_CLINK", SPT_PMC_BIT_CSME_CLINK},
97 {"CSME_PTIO", SPT_PMC_BIT_CSME_PTIO},
98 {"CSME_USBR", SPT_PMC_BIT_CSME_USBR},
99 {"CSME_SUSRAM", SPT_PMC_BIT_CSME_SUSRAM},
100 {"CSME_SMT", SPT_PMC_BIT_CSME_SMT},
101 {"RSVD", SPT_PMC_BIT_RSVD_1A},
102 {"CSME_SMS2", SPT_PMC_BIT_CSME_SMS2},
103 {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
104 {"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
105 {"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
109 static const struct pmc_reg_map spt_reg_map = {
110 .pfear_sts = spt_pfear_map,
111 .mphy_sts = spt_mphy_map,
112 .pll_sts = spt_pll_map,
113 .slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
114 .ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
115 .regmap_length = SPT_PMC_MMIO_REG_LEN,
116 .ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A,
117 .ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES,
118 .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
119 .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
122 static const struct pci_device_id pmc_pci_ids[] = {
123 { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID),
124 (kernel_ulong_t)&spt_reg_map },
128 static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
130 return readb(pmcdev->regbase + offset);
133 static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
135 return readl(pmcdev->regbase + reg_offset);
138 static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
141 writel(val, pmcdev->regbase + reg_offset);
144 static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
146 return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
150 * intel_pmc_slp_s0_counter_read() - Read SLP_S0 residency.
151 * @data: Out param that contains current SLP_S0 count.
153 * This API currently supports Intel Skylake SoC and Sunrise
154 * Point Platform Controller Hub. Future platform support
155 * should be added for platforms that support low power modes
156 * beyond Package C10 state.
158 * SLP_S0_RESIDENCY counter counts in 100 us granularity per
159 * step hence function populates the multiplied value in out
162 * Return: an error code or 0 on success.
164 int intel_pmc_slp_s0_counter_read(u32 *data)
166 struct pmc_dev *pmcdev = &pmc;
167 const struct pmc_reg_map *map = pmcdev->map;
170 if (!pmcdev->has_slp_s0_res)
173 value = pmc_core_reg_read(pmcdev, map->slp_s0_offset);
174 *data = pmc_core_adjust_slp_s0_step(value);
178 EXPORT_SYMBOL_GPL(intel_pmc_slp_s0_counter_read);
180 static int pmc_core_dev_state_get(void *data, u64 *val)
182 struct pmc_dev *pmcdev = data;
183 const struct pmc_reg_map *map = pmcdev->map;
186 value = pmc_core_reg_read(pmcdev, map->slp_s0_offset);
187 *val = pmc_core_adjust_slp_s0_step(value);
192 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
194 static int pmc_core_check_read_lock_bit(void)
196 struct pmc_dev *pmcdev = &pmc;
199 value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset);
200 return value & BIT(pmcdev->map->pm_read_disable_bit);
203 #if IS_ENABLED(CONFIG_DEBUG_FS)
204 static void pmc_core_display_map(struct seq_file *s, int index,
205 u8 pf_reg, const struct pmc_bit_map *pf_map)
207 seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
208 index, pf_map[index].name,
209 pf_map[index].bit_mask & pf_reg ? "Off" : "On");
212 static int pmc_core_ppfear_sts_show(struct seq_file *s, void *unused)
214 struct pmc_dev *pmcdev = s->private;
215 const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
216 u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
219 iter = pmcdev->map->ppfear0_offset;
221 for (index = 0; index < pmcdev->map->ppfear_buckets &&
222 index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
223 pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
225 for (index = 0; map[index].name &&
226 index < pmcdev->map->ppfear_buckets * 8; index++)
227 pmc_core_display_map(s, index, pf_regs[index / 8], map);
232 static int pmc_core_ppfear_sts_open(struct inode *inode, struct file *file)
234 return single_open(file, pmc_core_ppfear_sts_show, inode->i_private);
237 static const struct file_operations pmc_core_ppfear_ops = {
238 .open = pmc_core_ppfear_sts_open,
241 .release = single_release,
244 /* This function should return link status, 0 means ready */
245 static int pmc_core_mtpmc_link_status(void)
247 struct pmc_dev *pmcdev = &pmc;
250 value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
251 return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
254 static int pmc_core_send_msg(u32 *addr_xram)
256 struct pmc_dev *pmcdev = &pmc;
260 for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
261 if (pmc_core_mtpmc_link_status() == 0)
266 if (timeout <= 0 && pmc_core_mtpmc_link_status())
269 dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
270 pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest);
274 static int pmc_core_mphy_pg_sts_show(struct seq_file *s, void *unused)
276 struct pmc_dev *pmcdev = s->private;
277 const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
278 u32 mphy_core_reg_low, mphy_core_reg_high;
279 u32 val_low, val_high;
282 if (pmcdev->pmc_xram_read_bit) {
283 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
287 mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16);
288 mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
290 mutex_lock(&pmcdev->lock);
292 if (pmc_core_send_msg(&mphy_core_reg_low) != 0) {
298 val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
300 if (pmc_core_send_msg(&mphy_core_reg_high) != 0) {
306 val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
308 for (index = 0; map[index].name && index < 8; index++) {
309 seq_printf(s, "%-32s\tState: %s\n",
311 map[index].bit_mask & val_low ? "Not power gated" :
315 for (index = 8; map[index].name; index++) {
316 seq_printf(s, "%-32s\tState: %s\n",
318 map[index].bit_mask & val_high ? "Not power gated" :
323 mutex_unlock(&pmcdev->lock);
327 static int pmc_core_mphy_pg_sts_open(struct inode *inode, struct file *file)
329 return single_open(file, pmc_core_mphy_pg_sts_show, inode->i_private);
332 static const struct file_operations pmc_core_mphy_pg_ops = {
333 .open = pmc_core_mphy_pg_sts_open,
336 .release = single_release,
339 static int pmc_core_pll_show(struct seq_file *s, void *unused)
341 struct pmc_dev *pmcdev = s->private;
342 const struct pmc_bit_map *map = pmcdev->map->pll_sts;
343 u32 mphy_common_reg, val;
346 if (pmcdev->pmc_xram_read_bit) {
347 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
351 mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
352 mutex_lock(&pmcdev->lock);
354 if (pmc_core_send_msg(&mphy_common_reg) != 0) {
359 /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
361 val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
363 for (index = 0; map[index].name ; index++) {
364 seq_printf(s, "%-32s\tState: %s\n",
366 map[index].bit_mask & val ? "Active" : "Idle");
370 mutex_unlock(&pmcdev->lock);
374 static int pmc_core_pll_open(struct inode *inode, struct file *file)
376 return single_open(file, pmc_core_pll_show, inode->i_private);
379 static const struct file_operations pmc_core_pll_ops = {
380 .open = pmc_core_pll_open,
383 .release = single_release,
386 static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
387 *userbuf, size_t count, loff_t *ppos)
389 struct pmc_dev *pmcdev = &pmc;
390 const struct pmc_reg_map *map = pmcdev->map;
391 u32 val, buf_size, fd;
394 buf_size = count < 64 ? count : 64;
395 mutex_lock(&pmcdev->lock);
397 if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
402 if (val > NUM_IP_IGN_ALLOWED) {
407 fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
409 pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd);
412 mutex_unlock(&pmcdev->lock);
413 return err == 0 ? count : err;
416 static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
421 static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
423 return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
426 static const struct file_operations pmc_core_ltr_ignore_ops = {
427 .open = pmc_core_ltr_ignore_open,
429 .write = pmc_core_ltr_ignore_write,
431 .release = single_release,
434 static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
436 debugfs_remove_recursive(pmcdev->dbgfs_dir);
439 static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
441 struct dentry *dir, *file;
443 dir = debugfs_create_dir("pmc_core", NULL);
447 pmcdev->dbgfs_dir = dir;
448 file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO,
449 dir, pmcdev, &pmc_core_dev_state);
453 file = debugfs_create_file("pch_ip_power_gating_status",
454 S_IFREG | S_IRUGO, dir, pmcdev,
455 &pmc_core_ppfear_ops);
459 file = debugfs_create_file("mphy_core_lanes_power_gating_status",
460 S_IFREG | S_IRUGO, dir, pmcdev,
461 &pmc_core_mphy_pg_ops);
465 file = debugfs_create_file("pll_status",
466 S_IFREG | S_IRUGO, dir, pmcdev,
471 file = debugfs_create_file("ltr_ignore",
472 S_IFREG | S_IRUGO, dir, pmcdev,
473 &pmc_core_ltr_ignore_ops);
480 pmc_core_dbgfs_unregister(pmcdev);
484 static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
489 static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
492 #endif /* CONFIG_DEBUG_FS */
494 static const struct x86_cpu_id intel_pmc_core_ids[] = {
495 { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_MOBILE, X86_FEATURE_MWAIT,
496 (kernel_ulong_t)NULL},
497 { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_DESKTOP, X86_FEATURE_MWAIT,
498 (kernel_ulong_t)NULL},
499 { X86_VENDOR_INTEL, 6, INTEL_FAM6_KABYLAKE_MOBILE, X86_FEATURE_MWAIT,
500 (kernel_ulong_t)NULL},
501 { X86_VENDOR_INTEL, 6, INTEL_FAM6_KABYLAKE_DESKTOP, X86_FEATURE_MWAIT,
502 (kernel_ulong_t)NULL},
506 static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
508 struct device *ptr_dev = &dev->dev;
509 struct pmc_dev *pmcdev = &pmc;
510 const struct x86_cpu_id *cpu_id;
511 const struct pmc_reg_map *map = (struct pmc_reg_map *)id->driver_data;
514 cpu_id = x86_match_cpu(intel_pmc_core_ids);
516 dev_dbg(&dev->dev, "PMC Core: cpuid mismatch.\n");
520 err = pcim_enable_device(dev);
522 dev_dbg(&dev->dev, "PMC Core: failed to enable Power Management Controller.\n");
526 err = pci_read_config_dword(dev,
527 SPT_PMC_BASE_ADDR_OFFSET,
530 dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n");
533 pmcdev->base_addr &= PMC_BASE_ADDR_MASK;
534 dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr);
536 pmcdev->regbase = devm_ioremap_nocache(ptr_dev,
538 SPT_PMC_MMIO_REG_LEN);
539 if (!pmcdev->regbase) {
540 dev_dbg(&dev->dev, "PMC Core: ioremap failed.\n");
544 mutex_init(&pmcdev->lock);
546 pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
548 err = pmc_core_dbgfs_register(pmcdev);
550 dev_warn(&dev->dev, "PMC Core: debugfs register failed.\n");
552 pmc.has_slp_s0_res = true;
556 static struct pci_driver intel_pmc_core_driver = {
557 .name = "intel_pmc_core",
558 .id_table = pmc_pci_ids,
559 .probe = pmc_core_probe,
562 builtin_pci_driver(intel_pmc_core_driver);