4 * Copyright (C) 2012 Texas Instruments, Inc.
6 * Aneesh V <aneesh@ti.com>
7 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/kernel.h>
15 #include <linux/reboot.h>
16 #include <linux/platform_data/emif_plat.h>
18 #include <linux/device.h>
19 #include <linux/platform_device.h>
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
23 #include <linux/debugfs.h>
24 #include <linux/seq_file.h>
25 #include <linux/module.h>
26 #include <linux/list.h>
27 #include <linux/spinlock.h>
29 #include <memory/jedec_ddr.h>
31 #include "of_memory.h"
34 * struct emif_data - Per device static data for driver's use
35 * @duplicate: Whether the DDR devices attached to this EMIF
36 * instance are exactly same as that on EMIF1. In
37 * this case we can save some memory and processing
38 * @temperature_level: Maximum temperature of LPDDR2 devices attached
39 * to this EMIF - read from MR4 register. If there
40 * are two devices attached to this EMIF, this
41 * value is the maximum of the two temperature
43 * @node: node in the device list
44 * @base: base address of memory-mapped IO registers.
45 * @dev: device pointer.
46 * @addressing table with addressing information from the spec
47 * @regs_cache: An array of 'struct emif_regs' that stores
48 * calculated register values for different
49 * frequencies, to avoid re-calculating them on
50 * each DVFS transition.
51 * @curr_regs: The set of register values used in the last
52 * frequency change (i.e. corresponding to the
53 * frequency in effect at the moment)
54 * @plat_data: Pointer to saved platform data.
55 * @debugfs_root: dentry to the root folder for EMIF in debugfs
56 * @np_ddr: Pointer to ddr device tree node
62 struct list_head node;
63 unsigned long irq_state;
66 const struct lpddr2_addressing *addressing;
67 struct emif_regs *regs_cache[EMIF_MAX_NUM_FREQUENCIES];
68 struct emif_regs *curr_regs;
69 struct emif_platform_data *plat_data;
70 struct dentry *debugfs_root;
71 struct device_node *np_ddr;
74 static struct emif_data *emif1;
75 static spinlock_t emif_lock;
76 static unsigned long irq_state;
77 static u32 t_ck; /* DDR clock period in ps */
78 static LIST_HEAD(device_list);
80 #ifdef CONFIG_DEBUG_FS
81 static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif,
82 struct emif_regs *regs)
84 u32 type = emif->plat_data->device_info->type;
85 u32 ip_rev = emif->plat_data->ip_rev;
87 seq_printf(s, "EMIF register cache dump for %dMHz\n",
90 seq_printf(s, "ref_ctrl_shdw\t: 0x%08x\n", regs->ref_ctrl_shdw);
91 seq_printf(s, "sdram_tim1_shdw\t: 0x%08x\n", regs->sdram_tim1_shdw);
92 seq_printf(s, "sdram_tim2_shdw\t: 0x%08x\n", regs->sdram_tim2_shdw);
93 seq_printf(s, "sdram_tim3_shdw\t: 0x%08x\n", regs->sdram_tim3_shdw);
95 if (ip_rev == EMIF_4D) {
96 seq_printf(s, "read_idle_ctrl_shdw_normal\t: 0x%08x\n",
97 regs->read_idle_ctrl_shdw_normal);
98 seq_printf(s, "read_idle_ctrl_shdw_volt_ramp\t: 0x%08x\n",
99 regs->read_idle_ctrl_shdw_volt_ramp);
100 } else if (ip_rev == EMIF_4D5) {
101 seq_printf(s, "dll_calib_ctrl_shdw_normal\t: 0x%08x\n",
102 regs->dll_calib_ctrl_shdw_normal);
103 seq_printf(s, "dll_calib_ctrl_shdw_volt_ramp\t: 0x%08x\n",
104 regs->dll_calib_ctrl_shdw_volt_ramp);
107 if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
108 seq_printf(s, "ref_ctrl_shdw_derated\t: 0x%08x\n",
109 regs->ref_ctrl_shdw_derated);
110 seq_printf(s, "sdram_tim1_shdw_derated\t: 0x%08x\n",
111 regs->sdram_tim1_shdw_derated);
112 seq_printf(s, "sdram_tim3_shdw_derated\t: 0x%08x\n",
113 regs->sdram_tim3_shdw_derated);
117 static int emif_regdump_show(struct seq_file *s, void *unused)
119 struct emif_data *emif = s->private;
120 struct emif_regs **regs_cache;
124 regs_cache = emif1->regs_cache;
126 regs_cache = emif->regs_cache;
128 for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
129 do_emif_regdump_show(s, emif, regs_cache[i]);
136 static int emif_regdump_open(struct inode *inode, struct file *file)
138 return single_open(file, emif_regdump_show, inode->i_private);
141 static const struct file_operations emif_regdump_fops = {
142 .open = emif_regdump_open,
144 .release = single_release,
147 static int emif_mr4_show(struct seq_file *s, void *unused)
149 struct emif_data *emif = s->private;
151 seq_printf(s, "MR4=%d\n", emif->temperature_level);
155 static int emif_mr4_open(struct inode *inode, struct file *file)
157 return single_open(file, emif_mr4_show, inode->i_private);
160 static const struct file_operations emif_mr4_fops = {
161 .open = emif_mr4_open,
163 .release = single_release,
166 static int __init_or_module emif_debugfs_init(struct emif_data *emif)
168 emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
169 debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
171 debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
176 static void __exit emif_debugfs_exit(struct emif_data *emif)
178 debugfs_remove_recursive(emif->debugfs_root);
179 emif->debugfs_root = NULL;
182 static inline int __init_or_module emif_debugfs_init(struct emif_data *emif)
187 static inline void __exit emif_debugfs_exit(struct emif_data *emif)
193 * Calculate the period of DDR clock from frequency value
195 static void set_ddr_clk_period(u32 freq)
197 /* Divide 10^12 by frequency to get period in ps */
198 t_ck = (u32)DIV_ROUND_UP_ULL(1000000000000ull, freq);
202 * Get bus width used by EMIF. Note that this may be different from the
203 * bus width of the DDR devices used. For instance two 16-bit DDR devices
204 * may be connected to a given CS of EMIF. In this case bus width as far
205 * as EMIF is concerned is 32, where as the DDR bus width is 16 bits.
207 static u32 get_emif_bus_width(struct emif_data *emif)
210 void __iomem *base = emif->base;
212 width = (readl(base + EMIF_SDRAM_CONFIG) & NARROW_MODE_MASK)
213 >> NARROW_MODE_SHIFT;
214 width = width == 0 ? 32 : 16;
220 * Get the CL from SDRAM_CONFIG register
222 static u32 get_cl(struct emif_data *emif)
225 void __iomem *base = emif->base;
227 cl = (readl(base + EMIF_SDRAM_CONFIG) & CL_MASK) >> CL_SHIFT;
232 static void set_lpmode(struct emif_data *emif, u8 lpmode)
235 void __iomem *base = emif->base;
238 * Workaround for errata i743 - LPDDR2 Power-Down State is Not
242 * The EMIF supports power-down state for low power. The EMIF
243 * automatically puts the SDRAM into power-down after the memory is
244 * not accessed for a defined number of cycles and the
245 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set to 0x4.
246 * As the EMIF supports automatic output impedance calibration, a ZQ
247 * calibration long command is issued every time it exits active
248 * power-down and precharge power-down modes. The EMIF waits and
249 * blocks any other command during this calibration.
250 * The EMIF does not allow selective disabling of ZQ calibration upon
251 * exit of power-down mode. Due to very short periods of power-down
252 * cycles, ZQ calibration overhead creates bandwidth issues and
253 * increases overall system power consumption. On the other hand,
254 * issuing ZQ calibration long commands when exiting self-refresh is
258 * Because there is no power consumption benefit of the power-down due
259 * to the calibration and there is a performance risk, the guideline
260 * is to not allow power-down state and, therefore, to not have set
261 * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
263 if ((emif->plat_data->ip_rev == EMIF_4D) &&
264 (EMIF_LP_MODE_PWR_DN == lpmode)) {
266 "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by"
267 "erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
268 /* rollback LP_MODE to Self-refresh mode */
269 lpmode = EMIF_LP_MODE_SELF_REFRESH;
272 temp = readl(base + EMIF_POWER_MANAGEMENT_CONTROL);
273 temp &= ~LP_MODE_MASK;
274 temp |= (lpmode << LP_MODE_SHIFT);
275 writel(temp, base + EMIF_POWER_MANAGEMENT_CONTROL);
278 static void do_freq_update(void)
280 struct emif_data *emif;
283 * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE
286 * The EMIF automatically puts the SDRAM into self-refresh mode
287 * after the EMIF has not performed accesses during
288 * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles
289 * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set
290 * to 0x2. If during a small window the following three events
292 * - The SR_TIMING counter expires
293 * - And frequency change is requested
294 * - And OCP access is requested
295 * Then it causes instable clock on the DDR interface.
298 * To avoid the occurrence of the three events, the workaround
299 * is to disable the self-refresh when requesting a frequency
300 * change. Before requesting a frequency change the software must
301 * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the
302 * frequency change has been done, the software can reprogram
303 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
305 list_for_each_entry(emif, &device_list, node) {
306 if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
307 set_lpmode(emif, EMIF_LP_MODE_DISABLE);
311 * TODO: Do FREQ_UPDATE here when an API
312 * is available for this as part of the new
316 list_for_each_entry(emif, &device_list, node) {
317 if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
318 set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
322 /* Find addressing table entry based on the device's type and density */
323 static const struct lpddr2_addressing *get_addressing_table(
324 const struct ddr_device_info *device_info)
326 u32 index, type, density;
328 type = device_info->type;
329 density = device_info->density;
332 case DDR_TYPE_LPDDR2_S4:
335 case DDR_TYPE_LPDDR2_S2:
337 case DDR_DENSITY_1Gb:
338 case DDR_DENSITY_2Gb:
349 return &lpddr2_jedec_addressing_table[index];
353 * Find the the right timing table from the array of timing
354 * tables of the device using DDR clock frequency
356 static const struct lpddr2_timings *get_timings_table(struct emif_data *emif,
359 u32 i, min, max, freq_nearest;
360 const struct lpddr2_timings *timings = NULL;
361 const struct lpddr2_timings *timings_arr = emif->plat_data->timings;
362 struct device *dev = emif->dev;
364 /* Start with a very high frequency - 1GHz */
365 freq_nearest = 1000000000;
368 * Find the timings table such that:
369 * 1. the frequency range covers the required frequency(safe) AND
370 * 2. the max_freq is closest to the required frequency(optimal)
372 for (i = 0; i < emif->plat_data->timings_arr_size; i++) {
373 max = timings_arr[i].max_freq;
374 min = timings_arr[i].min_freq;
375 if ((freq >= min) && (freq <= max) && (max < freq_nearest)) {
377 timings = &timings_arr[i];
382 dev_err(dev, "%s: couldn't find timings for - %dHz\n",
385 dev_dbg(dev, "%s: timings table: freq %d, speed bin freq %d\n",
386 __func__, freq, freq_nearest);
391 static u32 get_sdram_ref_ctrl_shdw(u32 freq,
392 const struct lpddr2_addressing *addressing)
394 u32 ref_ctrl_shdw = 0, val = 0, freq_khz, t_refi;
396 /* Scale down frequency and t_refi to avoid overflow */
397 freq_khz = freq / 1000;
398 t_refi = addressing->tREFI_ns / 100;
401 * refresh rate to be set is 'tREFI(in us) * freq in MHz
402 * division by 10000 to account for change in units
404 val = t_refi * freq_khz / 10000;
405 ref_ctrl_shdw |= val << REFRESH_RATE_SHIFT;
407 return ref_ctrl_shdw;
410 static u32 get_sdram_tim_1_shdw(const struct lpddr2_timings *timings,
411 const struct lpddr2_min_tck *min_tck,
412 const struct lpddr2_addressing *addressing)
414 u32 tim1 = 0, val = 0;
416 val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1;
417 tim1 |= val << T_WTR_SHIFT;
419 if (addressing->num_banks == B8)
420 val = DIV_ROUND_UP(timings->tFAW, t_ck*4);
422 val = max(min_tck->tRRD, DIV_ROUND_UP(timings->tRRD, t_ck));
423 tim1 |= (val - 1) << T_RRD_SHIFT;
425 val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab, t_ck) - 1;
426 tim1 |= val << T_RC_SHIFT;
428 val = max(min_tck->tRASmin, DIV_ROUND_UP(timings->tRAS_min, t_ck));
429 tim1 |= (val - 1) << T_RAS_SHIFT;
431 val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1;
432 tim1 |= val << T_WR_SHIFT;
434 val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD, t_ck)) - 1;
435 tim1 |= val << T_RCD_SHIFT;
437 val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab, t_ck)) - 1;
438 tim1 |= val << T_RP_SHIFT;
443 static u32 get_sdram_tim_1_shdw_derated(const struct lpddr2_timings *timings,
444 const struct lpddr2_min_tck *min_tck,
445 const struct lpddr2_addressing *addressing)
447 u32 tim1 = 0, val = 0;
449 val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1;
450 tim1 = val << T_WTR_SHIFT;
453 * tFAW is approximately 4 times tRRD. So add 1875*4 = 7500ps
454 * to tFAW for de-rating
456 if (addressing->num_banks == B8) {
457 val = DIV_ROUND_UP(timings->tFAW + 7500, 4 * t_ck) - 1;
459 val = DIV_ROUND_UP(timings->tRRD + 1875, t_ck);
460 val = max(min_tck->tRRD, val) - 1;
462 tim1 |= val << T_RRD_SHIFT;
464 val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab + 1875, t_ck);
465 tim1 |= (val - 1) << T_RC_SHIFT;
467 val = DIV_ROUND_UP(timings->tRAS_min + 1875, t_ck);
468 val = max(min_tck->tRASmin, val) - 1;
469 tim1 |= val << T_RAS_SHIFT;
471 val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1;
472 tim1 |= val << T_WR_SHIFT;
474 val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD + 1875, t_ck));
475 tim1 |= (val - 1) << T_RCD_SHIFT;
477 val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab + 1875, t_ck));
478 tim1 |= (val - 1) << T_RP_SHIFT;
483 static u32 get_sdram_tim_2_shdw(const struct lpddr2_timings *timings,
484 const struct lpddr2_min_tck *min_tck,
485 const struct lpddr2_addressing *addressing,
488 u32 tim2 = 0, val = 0;
490 val = min_tck->tCKE - 1;
491 tim2 |= val << T_CKE_SHIFT;
493 val = max(min_tck->tRTP, DIV_ROUND_UP(timings->tRTP, t_ck)) - 1;
494 tim2 |= val << T_RTP_SHIFT;
496 /* tXSNR = tRFCab_ps + 10 ns(tRFCab_ps for LPDDR2). */
497 val = DIV_ROUND_UP(addressing->tRFCab_ps + 10000, t_ck) - 1;
498 tim2 |= val << T_XSNR_SHIFT;
500 /* XSRD same as XSNR for LPDDR2 */
501 tim2 |= val << T_XSRD_SHIFT;
503 val = max(min_tck->tXP, DIV_ROUND_UP(timings->tXP, t_ck)) - 1;
504 tim2 |= val << T_XP_SHIFT;
509 static u32 get_sdram_tim_3_shdw(const struct lpddr2_timings *timings,
510 const struct lpddr2_min_tck *min_tck,
511 const struct lpddr2_addressing *addressing,
512 u32 type, u32 ip_rev, u32 derated)
514 u32 tim3 = 0, val = 0, t_dqsck;
516 val = timings->tRAS_max_ns / addressing->tREFI_ns - 1;
517 val = val > 0xF ? 0xF : val;
518 tim3 |= val << T_RAS_MAX_SHIFT;
520 val = DIV_ROUND_UP(addressing->tRFCab_ps, t_ck) - 1;
521 tim3 |= val << T_RFC_SHIFT;
523 t_dqsck = (derated == EMIF_DERATED_TIMINGS) ?
524 timings->tDQSCK_max_derated : timings->tDQSCK_max;
525 if (ip_rev == EMIF_4D5)
526 val = DIV_ROUND_UP(t_dqsck + 1000, t_ck) - 1;
528 val = DIV_ROUND_UP(t_dqsck, t_ck) - 1;
530 tim3 |= val << T_TDQSCKMAX_SHIFT;
532 val = DIV_ROUND_UP(timings->tZQCS, t_ck) - 1;
533 tim3 |= val << ZQ_ZQCS_SHIFT;
535 val = DIV_ROUND_UP(timings->tCKESR, t_ck);
536 val = max(min_tck->tCKESR, val) - 1;
537 tim3 |= val << T_CKESR_SHIFT;
539 if (ip_rev == EMIF_4D5) {
540 tim3 |= (EMIF_T_CSTA - 1) << T_CSTA_SHIFT;
542 val = DIV_ROUND_UP(EMIF_T_PDLL_UL, 128) - 1;
543 tim3 |= val << T_PDLL_UL_SHIFT;
549 static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing,
550 bool cs1_used, bool cal_resistors_per_cs)
554 val = EMIF_ZQCS_INTERVAL_US * 1000 / addressing->tREFI_ns;
555 zq |= val << ZQ_REFINTERVAL_SHIFT;
557 val = DIV_ROUND_UP(T_ZQCL_DEFAULT_NS, T_ZQCS_DEFAULT_NS) - 1;
558 zq |= val << ZQ_ZQCL_MULT_SHIFT;
560 val = DIV_ROUND_UP(T_ZQINIT_DEFAULT_NS, T_ZQCL_DEFAULT_NS) - 1;
561 zq |= val << ZQ_ZQINIT_MULT_SHIFT;
563 zq |= ZQ_SFEXITEN_ENABLE << ZQ_SFEXITEN_SHIFT;
565 if (cal_resistors_per_cs)
566 zq |= ZQ_DUALCALEN_ENABLE << ZQ_DUALCALEN_SHIFT;
568 zq |= ZQ_DUALCALEN_DISABLE << ZQ_DUALCALEN_SHIFT;
570 zq |= ZQ_CS0EN_MASK; /* CS0 is used for sure */
572 val = cs1_used ? 1 : 0;
573 zq |= val << ZQ_CS1EN_SHIFT;
578 static u32 get_temp_alert_config(const struct lpddr2_addressing *addressing,
579 const struct emif_custom_configs *custom_configs, bool cs1_used,
580 u32 sdram_io_width, u32 emif_bus_width)
582 u32 alert = 0, interval, devcnt;
584 if (custom_configs && (custom_configs->mask &
585 EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL))
586 interval = custom_configs->temp_alert_poll_interval_ms;
588 interval = TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS;
590 interval *= 1000000; /* Convert to ns */
591 interval /= addressing->tREFI_ns; /* Convert to refresh cycles */
592 alert |= (interval << TA_REFINTERVAL_SHIFT);
595 * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width
596 * also to this form and subtract to get TA_DEVCNT, which is
599 emif_bus_width = __fls(emif_bus_width) - 1;
600 devcnt = emif_bus_width - sdram_io_width;
601 alert |= devcnt << TA_DEVCNT_SHIFT;
603 /* DEVWDT is in 'log2(x) - 3' form */
604 alert |= (sdram_io_width - 2) << TA_DEVWDT_SHIFT;
606 alert |= 1 << TA_SFEXITEN_SHIFT;
607 alert |= 1 << TA_CS0EN_SHIFT;
608 alert |= (cs1_used ? 1 : 0) << TA_CS1EN_SHIFT;
613 static u32 get_read_idle_ctrl_shdw(u8 volt_ramp)
615 u32 idle = 0, val = 0;
618 * Maximum value in normal conditions and increased frequency
619 * when voltage is ramping
622 val = READ_IDLE_INTERVAL_DVFS / t_ck / 64 - 1;
627 * READ_IDLE_CTRL register in EMIF4D has same offset and fields
628 * as DLL_CALIB_CTRL in EMIF4D5, so use the same shifts
630 idle |= val << DLL_CALIB_INTERVAL_SHIFT;
631 idle |= EMIF_READ_IDLE_LEN_VAL << ACK_WAIT_SHIFT;
636 static u32 get_dll_calib_ctrl_shdw(u8 volt_ramp)
638 u32 calib = 0, val = 0;
640 if (volt_ramp == DDR_VOLTAGE_RAMPING)
641 val = DLL_CALIB_INTERVAL_DVFS / t_ck / 16 - 1;
643 val = 0; /* Disabled when voltage is stable */
645 calib |= val << DLL_CALIB_INTERVAL_SHIFT;
646 calib |= DLL_CALIB_ACK_WAIT_VAL << ACK_WAIT_SHIFT;
651 static u32 get_ddr_phy_ctrl_1_attilaphy_4d(const struct lpddr2_timings *timings,
654 u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY, val = 0;
656 val = RL + DIV_ROUND_UP(timings->tDQSCK_max, t_ck) - 1;
657 phy |= val << READ_LATENCY_SHIFT_4D;
659 if (freq <= 100000000)
660 val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY;
661 else if (freq <= 200000000)
662 val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY;
664 val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY;
666 phy |= val << DLL_SLAVE_DLY_CTRL_SHIFT_4D;
671 static u32 get_phy_ctrl_1_intelliphy_4d5(u32 freq, u8 cl)
673 u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY, half_delay;
676 * DLL operates at 266 MHz. If DDR frequency is near 266 MHz,
677 * half-delay is not needed else set half-delay
679 if (freq >= 265000000 && freq < 267000000)
684 phy |= half_delay << DLL_HALF_DELAY_SHIFT_4D5;
685 phy |= ((cl + DIV_ROUND_UP(EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS,
686 t_ck) - 1) << READ_LATENCY_SHIFT_4D5);
691 static u32 get_ext_phy_ctrl_2_intelliphy_4d5(void)
693 u32 fifo_we_slave_ratio;
695 fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
696 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
698 return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 |
699 fifo_we_slave_ratio << 22;
702 static u32 get_ext_phy_ctrl_3_intelliphy_4d5(void)
704 u32 fifo_we_slave_ratio;
706 fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
707 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
709 return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 |
710 fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23;
713 static u32 get_ext_phy_ctrl_4_intelliphy_4d5(void)
715 u32 fifo_we_slave_ratio;
717 fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
718 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
720 return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 |
721 fifo_we_slave_ratio << 13;
724 static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev)
726 u32 pwr_mgmt_ctrl = 0, timeout;
727 u32 lpmode = EMIF_LP_MODE_SELF_REFRESH;
728 u32 timeout_perf = EMIF_LP_MODE_TIMEOUT_PERFORMANCE;
729 u32 timeout_pwr = EMIF_LP_MODE_TIMEOUT_POWER;
730 u32 freq_threshold = EMIF_LP_MODE_FREQ_THRESHOLD;
734 struct emif_custom_configs *cust_cfgs = emif->plat_data->custom_configs;
736 if (cust_cfgs && (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE)) {
737 lpmode = cust_cfgs->lpmode;
738 timeout_perf = cust_cfgs->lpmode_timeout_performance;
739 timeout_pwr = cust_cfgs->lpmode_timeout_power;
740 freq_threshold = cust_cfgs->lpmode_freq_threshold;
743 /* Timeout based on DDR frequency */
744 timeout = freq >= freq_threshold ? timeout_perf : timeout_pwr;
747 * The value to be set in register is "log2(timeout) - 3"
748 * if timeout < 16 load 0 in register
749 * if timeout is not a power of 2, round to next highest power of 2
754 if (timeout & (timeout - 1))
756 timeout = __fls(timeout) - 3;
760 case EMIF_LP_MODE_CLOCK_STOP:
761 shift = CS_TIM_SHIFT;
764 case EMIF_LP_MODE_SELF_REFRESH:
765 /* Workaround for errata i735 */
769 shift = SR_TIM_SHIFT;
772 case EMIF_LP_MODE_PWR_DN:
773 shift = PD_TIM_SHIFT;
776 case EMIF_LP_MODE_DISABLE:
782 /* Round to maximum in case of overflow, BUT warn! */
783 if (lpmode != EMIF_LP_MODE_DISABLE && timeout > mask >> shift) {
784 pr_err("TIMEOUT Overflow - lpmode=%d perf=%d pwr=%d freq=%d\n",
789 WARN(1, "timeout=0x%02x greater than 0x%02x. Using max\n",
790 timeout, mask >> shift);
791 timeout = mask >> shift;
794 /* Setup required timing */
795 pwr_mgmt_ctrl = (timeout << shift) & mask;
796 /* setup a default mask for rest of the modes */
797 pwr_mgmt_ctrl |= (SR_TIM_MASK | CS_TIM_MASK | PD_TIM_MASK) &
800 /* No CS_TIM in EMIF_4D5 */
801 if (ip_rev == EMIF_4D5)
802 pwr_mgmt_ctrl &= ~CS_TIM_MASK;
804 pwr_mgmt_ctrl |= lpmode << LP_MODE_SHIFT;
806 return pwr_mgmt_ctrl;
810 * Get the temperature level of the EMIF instance:
811 * Reads the MR4 register of attached SDRAM parts to find out the temperature
812 * level. If there are two parts attached(one on each CS), then the temperature
813 * level for the EMIF instance is the higher of the two temperatures.
815 static void get_temperature_level(struct emif_data *emif)
817 u32 temp, temperature_level;
822 /* Read mode register 4 */
823 writel(DDR_MR4, base + EMIF_LPDDR2_MODE_REG_CONFIG);
824 temperature_level = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
825 temperature_level = (temperature_level & MR4_SDRAM_REF_RATE_MASK) >>
826 MR4_SDRAM_REF_RATE_SHIFT;
828 if (emif->plat_data->device_info->cs1_used) {
829 writel(DDR_MR4 | CS_MASK, base + EMIF_LPDDR2_MODE_REG_CONFIG);
830 temp = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
831 temp = (temp & MR4_SDRAM_REF_RATE_MASK)
832 >> MR4_SDRAM_REF_RATE_SHIFT;
833 temperature_level = max(temp, temperature_level);
836 /* treat everything less than nominal(3) in MR4 as nominal */
837 if (unlikely(temperature_level < SDRAM_TEMP_NOMINAL))
838 temperature_level = SDRAM_TEMP_NOMINAL;
840 /* if we get reserved value in MR4 persist with the existing value */
841 if (likely(temperature_level != SDRAM_TEMP_RESERVED_4))
842 emif->temperature_level = temperature_level;
846 * Program EMIF shadow registers that are not dependent on temperature
849 static void setup_registers(struct emif_data *emif, struct emif_regs *regs)
851 void __iomem *base = emif->base;
853 writel(regs->sdram_tim2_shdw, base + EMIF_SDRAM_TIMING_2_SHDW);
854 writel(regs->phy_ctrl_1_shdw, base + EMIF_DDR_PHY_CTRL_1_SHDW);
855 writel(regs->pwr_mgmt_ctrl_shdw,
856 base + EMIF_POWER_MANAGEMENT_CTRL_SHDW);
858 /* Settings specific for EMIF4D5 */
859 if (emif->plat_data->ip_rev != EMIF_4D5)
861 writel(regs->ext_phy_ctrl_2_shdw, base + EMIF_EXT_PHY_CTRL_2_SHDW);
862 writel(regs->ext_phy_ctrl_3_shdw, base + EMIF_EXT_PHY_CTRL_3_SHDW);
863 writel(regs->ext_phy_ctrl_4_shdw, base + EMIF_EXT_PHY_CTRL_4_SHDW);
867 * When voltage ramps dll calibration and forced read idle should
870 static void setup_volt_sensitive_regs(struct emif_data *emif,
871 struct emif_regs *regs, u32 volt_state)
874 void __iomem *base = emif->base;
877 * EMIF_READ_IDLE_CTRL in EMIF4D refers to the same register as
878 * EMIF_DLL_CALIB_CTRL in EMIF4D5 and dll_calib_ctrl_shadow_*
879 * is an alias of the respective read_idle_ctrl_shdw_* (members of
880 * a union). So, the below code takes care of both cases
882 if (volt_state == DDR_VOLTAGE_RAMPING)
883 calib_ctrl = regs->dll_calib_ctrl_shdw_volt_ramp;
885 calib_ctrl = regs->dll_calib_ctrl_shdw_normal;
887 writel(calib_ctrl, base + EMIF_DLL_CALIB_CTRL_SHDW);
891 * setup_temperature_sensitive_regs() - set the timings for temperature
892 * sensitive registers. This happens once at initialisation time based
893 * on the temperature at boot time and subsequently based on the temperature
894 * alert interrupt. Temperature alert can happen when the temperature
895 * increases or drops. So this function can have the effect of either
896 * derating the timings or going back to nominal values.
898 static void setup_temperature_sensitive_regs(struct emif_data *emif,
899 struct emif_regs *regs)
901 u32 tim1, tim3, ref_ctrl, type;
902 void __iomem *base = emif->base;
905 type = emif->plat_data->device_info->type;
907 tim1 = regs->sdram_tim1_shdw;
908 tim3 = regs->sdram_tim3_shdw;
909 ref_ctrl = regs->ref_ctrl_shdw;
911 /* No de-rating for non-lpddr2 devices */
912 if (type != DDR_TYPE_LPDDR2_S2 && type != DDR_TYPE_LPDDR2_S4)
915 temperature = emif->temperature_level;
916 if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH) {
917 ref_ctrl = regs->ref_ctrl_shdw_derated;
918 } else if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS) {
919 tim1 = regs->sdram_tim1_shdw_derated;
920 tim3 = regs->sdram_tim3_shdw_derated;
921 ref_ctrl = regs->ref_ctrl_shdw_derated;
925 writel(tim1, base + EMIF_SDRAM_TIMING_1_SHDW);
926 writel(tim3, base + EMIF_SDRAM_TIMING_3_SHDW);
927 writel(ref_ctrl, base + EMIF_SDRAM_REFRESH_CTRL_SHDW);
930 static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
933 irqreturn_t ret = IRQ_HANDLED;
934 struct emif_custom_configs *custom_configs;
936 spin_lock_irqsave(&emif_lock, irq_state);
937 old_temp_level = emif->temperature_level;
938 get_temperature_level(emif);
940 if (unlikely(emif->temperature_level == old_temp_level)) {
942 } else if (!emif->curr_regs) {
943 dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
947 custom_configs = emif->plat_data->custom_configs;
950 * IF we detect higher than "nominal rating" from DDR sensor
951 * on an unsupported DDR part, shutdown system
953 if (custom_configs && !(custom_configs->mask &
954 EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
955 if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
957 "%s:NOT Extended temperature capable memory."
958 "Converting MR4=0x%02x as shutdown event\n",
959 __func__, emif->temperature_level);
961 * Temperature far too high - do kernel_power_off()
962 * from thread context
964 emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN;
965 ret = IRQ_WAKE_THREAD;
970 if (emif->temperature_level < old_temp_level ||
971 emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
973 * Temperature coming down - defer handling to thread OR
974 * Temperature far too high - do kernel_power_off() from
977 ret = IRQ_WAKE_THREAD;
979 /* Temperature is going up - handle immediately */
980 setup_temperature_sensitive_regs(emif, emif->curr_regs);
985 spin_unlock_irqrestore(&emif_lock, irq_state);
989 static irqreturn_t emif_interrupt_handler(int irq, void *dev_id)
992 struct emif_data *emif = dev_id;
993 void __iomem *base = emif->base;
994 struct device *dev = emif->dev;
995 irqreturn_t ret = IRQ_HANDLED;
997 /* Save the status and clear it */
998 interrupts = readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
999 writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
1002 * Handle temperature alert
1003 * Temperature alert should be same for all ports
1004 * So, it's enough to process it only for one of the ports
1006 if (interrupts & TA_SYS_MASK)
1007 ret = handle_temp_alert(base, emif);
1009 if (interrupts & ERR_SYS_MASK)
1010 dev_err(dev, "Access error from SYS port - %x\n", interrupts);
1012 if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
1013 /* Save the status and clear it */
1014 interrupts = readl(base + EMIF_LL_OCP_INTERRUPT_STATUS);
1015 writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_STATUS);
1017 if (interrupts & ERR_LL_MASK)
1018 dev_err(dev, "Access error from LL port - %x\n",
1025 static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
1027 struct emif_data *emif = dev_id;
1029 if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
1030 dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
1032 /* If we have Power OFF ability, use it, else try restarting */
1036 WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
1037 kernel_restart("SDRAM Over-temp Emergency restart");
1042 spin_lock_irqsave(&emif_lock, irq_state);
1044 if (emif->curr_regs) {
1045 setup_temperature_sensitive_regs(emif, emif->curr_regs);
1048 dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
1051 spin_unlock_irqrestore(&emif_lock, irq_state);
1056 static void clear_all_interrupts(struct emif_data *emif)
1058 void __iomem *base = emif->base;
1060 writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS),
1061 base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
1062 if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
1063 writel(readl(base + EMIF_LL_OCP_INTERRUPT_STATUS),
1064 base + EMIF_LL_OCP_INTERRUPT_STATUS);
1067 static void disable_and_clear_all_interrupts(struct emif_data *emif)
1069 void __iomem *base = emif->base;
1071 /* Disable all interrupts */
1072 writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET),
1073 base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR);
1074 if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
1075 writel(readl(base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET),
1076 base + EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR);
1078 /* Clear all interrupts */
1079 clear_all_interrupts(emif);
1082 static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
1084 u32 interrupts, type;
1085 void __iomem *base = emif->base;
1087 type = emif->plat_data->device_info->type;
1089 clear_all_interrupts(emif);
1091 /* Enable interrupts for SYS interface */
1092 interrupts = EN_ERR_SYS_MASK;
1093 if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4)
1094 interrupts |= EN_TA_SYS_MASK;
1095 writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET);
1097 /* Enable interrupts for LL interface */
1098 if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
1099 /* TA need not be enabled for LL */
1100 interrupts = EN_ERR_LL_MASK;
1101 writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET);
1104 /* setup IRQ handlers */
1105 return devm_request_threaded_irq(emif->dev, irq,
1106 emif_interrupt_handler,
1108 0, dev_name(emif->dev),
1113 static void __init_or_module emif_onetime_settings(struct emif_data *emif)
1115 u32 pwr_mgmt_ctrl, zq, temp_alert_cfg;
1116 void __iomem *base = emif->base;
1117 const struct lpddr2_addressing *addressing;
1118 const struct ddr_device_info *device_info;
1120 device_info = emif->plat_data->device_info;
1121 addressing = get_addressing_table(device_info);
1124 * Init power management settings
1125 * We don't know the frequency yet. Use a high frequency
1126 * value for a conservative timeout setting
1128 pwr_mgmt_ctrl = get_pwr_mgmt_ctrl(1000000000, emif,
1129 emif->plat_data->ip_rev);
1130 emif->lpmode = (pwr_mgmt_ctrl & LP_MODE_MASK) >> LP_MODE_SHIFT;
1131 writel(pwr_mgmt_ctrl, base + EMIF_POWER_MANAGEMENT_CONTROL);
1133 /* Init ZQ calibration settings */
1134 zq = get_zq_config_reg(addressing, device_info->cs1_used,
1135 device_info->cal_resistors_per_cs);
1136 writel(zq, base + EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG);
1138 /* Check temperature level temperature level*/
1139 get_temperature_level(emif);
1140 if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN)
1141 dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
1143 /* Init temperature polling */
1144 temp_alert_cfg = get_temp_alert_config(addressing,
1145 emif->plat_data->custom_configs, device_info->cs1_used,
1146 device_info->io_width, get_emif_bus_width(emif));
1147 writel(temp_alert_cfg, base + EMIF_TEMPERATURE_ALERT_CONFIG);
1150 * Program external PHY control registers that are not frequency
1153 if (emif->plat_data->phy_type != EMIF_PHY_TYPE_INTELLIPHY)
1155 writel(EMIF_EXT_PHY_CTRL_1_VAL, base + EMIF_EXT_PHY_CTRL_1_SHDW);
1156 writel(EMIF_EXT_PHY_CTRL_5_VAL, base + EMIF_EXT_PHY_CTRL_5_SHDW);
1157 writel(EMIF_EXT_PHY_CTRL_6_VAL, base + EMIF_EXT_PHY_CTRL_6_SHDW);
1158 writel(EMIF_EXT_PHY_CTRL_7_VAL, base + EMIF_EXT_PHY_CTRL_7_SHDW);
1159 writel(EMIF_EXT_PHY_CTRL_8_VAL, base + EMIF_EXT_PHY_CTRL_8_SHDW);
1160 writel(EMIF_EXT_PHY_CTRL_9_VAL, base + EMIF_EXT_PHY_CTRL_9_SHDW);
1161 writel(EMIF_EXT_PHY_CTRL_10_VAL, base + EMIF_EXT_PHY_CTRL_10_SHDW);
1162 writel(EMIF_EXT_PHY_CTRL_11_VAL, base + EMIF_EXT_PHY_CTRL_11_SHDW);
1163 writel(EMIF_EXT_PHY_CTRL_12_VAL, base + EMIF_EXT_PHY_CTRL_12_SHDW);
1164 writel(EMIF_EXT_PHY_CTRL_13_VAL, base + EMIF_EXT_PHY_CTRL_13_SHDW);
1165 writel(EMIF_EXT_PHY_CTRL_14_VAL, base + EMIF_EXT_PHY_CTRL_14_SHDW);
1166 writel(EMIF_EXT_PHY_CTRL_15_VAL, base + EMIF_EXT_PHY_CTRL_15_SHDW);
1167 writel(EMIF_EXT_PHY_CTRL_16_VAL, base + EMIF_EXT_PHY_CTRL_16_SHDW);
1168 writel(EMIF_EXT_PHY_CTRL_17_VAL, base + EMIF_EXT_PHY_CTRL_17_SHDW);
1169 writel(EMIF_EXT_PHY_CTRL_18_VAL, base + EMIF_EXT_PHY_CTRL_18_SHDW);
1170 writel(EMIF_EXT_PHY_CTRL_19_VAL, base + EMIF_EXT_PHY_CTRL_19_SHDW);
1171 writel(EMIF_EXT_PHY_CTRL_20_VAL, base + EMIF_EXT_PHY_CTRL_20_SHDW);
1172 writel(EMIF_EXT_PHY_CTRL_21_VAL, base + EMIF_EXT_PHY_CTRL_21_SHDW);
1173 writel(EMIF_EXT_PHY_CTRL_22_VAL, base + EMIF_EXT_PHY_CTRL_22_SHDW);
1174 writel(EMIF_EXT_PHY_CTRL_23_VAL, base + EMIF_EXT_PHY_CTRL_23_SHDW);
1175 writel(EMIF_EXT_PHY_CTRL_24_VAL, base + EMIF_EXT_PHY_CTRL_24_SHDW);
1178 static void get_default_timings(struct emif_data *emif)
1180 struct emif_platform_data *pd = emif->plat_data;
1182 pd->timings = lpddr2_jedec_timings;
1183 pd->timings_arr_size = ARRAY_SIZE(lpddr2_jedec_timings);
1185 dev_warn(emif->dev, "%s: using default timings\n", __func__);
1188 static int is_dev_data_valid(u32 type, u32 density, u32 io_width, u32 phy_type,
1189 u32 ip_rev, struct device *dev)
1193 valid = (type == DDR_TYPE_LPDDR2_S4 ||
1194 type == DDR_TYPE_LPDDR2_S2)
1195 && (density >= DDR_DENSITY_64Mb
1196 && density <= DDR_DENSITY_8Gb)
1197 && (io_width >= DDR_IO_WIDTH_8
1198 && io_width <= DDR_IO_WIDTH_32);
1200 /* Combinations of EMIF and PHY revisions that we support today */
1203 valid = valid && (phy_type == EMIF_PHY_TYPE_ATTILAPHY);
1206 valid = valid && (phy_type == EMIF_PHY_TYPE_INTELLIPHY);
1213 dev_err(dev, "%s: invalid DDR details\n", __func__);
1217 static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs,
1222 if ((cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE) &&
1223 (cust_cfgs->lpmode != EMIF_LP_MODE_DISABLE))
1224 valid = cust_cfgs->lpmode_freq_threshold &&
1225 cust_cfgs->lpmode_timeout_performance &&
1226 cust_cfgs->lpmode_timeout_power;
1228 if (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL)
1229 valid = valid && cust_cfgs->temp_alert_poll_interval_ms;
1232 dev_warn(dev, "%s: invalid custom configs\n", __func__);
1237 #if defined(CONFIG_OF)
1238 static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
1239 struct emif_data *emif)
1241 struct emif_custom_configs *cust_cfgs = NULL;
1243 const __be32 *lpmode, *poll_intvl;
1245 lpmode = of_get_property(np_emif, "low-power-mode", &len);
1246 poll_intvl = of_get_property(np_emif, "temp-alert-poll-interval", &len);
1248 if (lpmode || poll_intvl)
1249 cust_cfgs = devm_kzalloc(emif->dev, sizeof(*cust_cfgs),
1256 cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_LPMODE;
1257 cust_cfgs->lpmode = be32_to_cpup(lpmode);
1258 of_property_read_u32(np_emif,
1259 "low-power-mode-timeout-performance",
1260 &cust_cfgs->lpmode_timeout_performance);
1261 of_property_read_u32(np_emif,
1262 "low-power-mode-timeout-power",
1263 &cust_cfgs->lpmode_timeout_power);
1264 of_property_read_u32(np_emif,
1265 "low-power-mode-freq-threshold",
1266 &cust_cfgs->lpmode_freq_threshold);
1271 EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL;
1272 cust_cfgs->temp_alert_poll_interval_ms =
1273 be32_to_cpup(poll_intvl);
1276 if (of_find_property(np_emif, "extended-temp-part", &len))
1277 cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART;
1279 if (!is_custom_config_valid(cust_cfgs, emif->dev)) {
1280 devm_kfree(emif->dev, cust_cfgs);
1284 emif->plat_data->custom_configs = cust_cfgs;
1287 static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
1288 struct device_node *np_ddr,
1289 struct ddr_device_info *dev_info)
1291 u32 density = 0, io_width = 0;
1294 if (of_find_property(np_emif, "cs1-used", &len))
1295 dev_info->cs1_used = true;
1297 if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
1298 dev_info->cal_resistors_per_cs = true;
1300 if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s4"))
1301 dev_info->type = DDR_TYPE_LPDDR2_S4;
1302 else if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s2"))
1303 dev_info->type = DDR_TYPE_LPDDR2_S2;
1305 of_property_read_u32(np_ddr, "density", &density);
1306 of_property_read_u32(np_ddr, "io-width", &io_width);
1308 /* Convert from density in Mb to the density encoding in jedc_ddr.h */
1309 if (density & (density - 1))
1310 dev_info->density = 0;
1312 dev_info->density = __fls(density) - 5;
1314 /* Convert from io_width in bits to io_width encoding in jedc_ddr.h */
1315 if (io_width & (io_width - 1))
1316 dev_info->io_width = 0;
1318 dev_info->io_width = __fls(io_width) - 1;
1321 static struct emif_data * __init_or_module of_get_memory_device_details(
1322 struct device_node *np_emif, struct device *dev)
1324 struct emif_data *emif = NULL;
1325 struct ddr_device_info *dev_info = NULL;
1326 struct emif_platform_data *pd = NULL;
1327 struct device_node *np_ddr;
1330 np_ddr = of_parse_phandle(np_emif, "device-handle", 0);
1333 emif = devm_kzalloc(dev, sizeof(struct emif_data), GFP_KERNEL);
1334 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1335 dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
1337 if (!emif || !pd || !dev_info) {
1338 dev_err(dev, "%s: Out of memory!!\n",
1343 emif->plat_data = pd;
1344 pd->device_info = dev_info;
1346 emif->np_ddr = np_ddr;
1347 emif->temperature_level = SDRAM_TEMP_NOMINAL;
1349 if (of_device_is_compatible(np_emif, "ti,emif-4d"))
1350 emif->plat_data->ip_rev = EMIF_4D;
1351 else if (of_device_is_compatible(np_emif, "ti,emif-4d5"))
1352 emif->plat_data->ip_rev = EMIF_4D5;
1354 of_property_read_u32(np_emif, "phy-type", &pd->phy_type);
1356 if (of_find_property(np_emif, "hw-caps-ll-interface", &len))
1357 pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE;
1359 of_get_ddr_info(np_emif, np_ddr, dev_info);
1360 if (!is_dev_data_valid(pd->device_info->type, pd->device_info->density,
1361 pd->device_info->io_width, pd->phy_type, pd->ip_rev,
1363 dev_err(dev, "%s: invalid device data!!\n", __func__);
1367 * For EMIF instances other than EMIF1 see if the devices connected
1368 * are exactly same as on EMIF1(which is typically the case). If so,
1369 * mark it as a duplicate of EMIF1. This will save some memory and
1372 if (emif1 && emif1->np_ddr == np_ddr) {
1373 emif->duplicate = true;
1376 dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
1380 of_get_custom_configs(np_emif, emif);
1381 emif->plat_data->timings = of_get_ddr_timings(np_ddr, emif->dev,
1382 emif->plat_data->device_info->type,
1383 &emif->plat_data->timings_arr_size);
1385 emif->plat_data->min_tck = of_get_min_tck(np_ddr, emif->dev);
1396 static struct emif_data * __init_or_module of_get_memory_device_details(
1397 struct device_node *np_emif, struct device *dev)
1403 static struct emif_data *__init_or_module get_device_details(
1404 struct platform_device *pdev)
1407 struct emif_data *emif = NULL;
1408 struct ddr_device_info *dev_info;
1409 struct emif_custom_configs *cust_cfgs;
1410 struct emif_platform_data *pd;
1414 pd = pdev->dev.platform_data;
1417 if (!(pd && pd->device_info && is_dev_data_valid(pd->device_info->type,
1418 pd->device_info->density, pd->device_info->io_width,
1419 pd->phy_type, pd->ip_rev, dev))) {
1420 dev_err(dev, "%s: invalid device data\n", __func__);
1424 emif = devm_kzalloc(dev, sizeof(*emif), GFP_KERNEL);
1425 temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1426 dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
1428 if (!emif || !temp || !dev_info) {
1429 dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__);
1433 memcpy(temp, pd, sizeof(*pd));
1435 memcpy(dev_info, pd->device_info, sizeof(*dev_info));
1437 pd->device_info = dev_info;
1438 emif->plat_data = pd;
1440 emif->temperature_level = SDRAM_TEMP_NOMINAL;
1443 * For EMIF instances other than EMIF1 see if the devices connected
1444 * are exactly same as on EMIF1(which is typically the case). If so,
1445 * mark it as a duplicate of EMIF1 and skip copying timings data.
1446 * This will save some memory and some computation later.
1448 emif->duplicate = emif1 && (memcmp(dev_info,
1449 emif1->plat_data->device_info,
1450 sizeof(struct ddr_device_info)) == 0);
1452 if (emif->duplicate) {
1457 dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
1462 * Copy custom configs - ignore allocation error, if any, as
1463 * custom_configs is not very critical
1465 cust_cfgs = pd->custom_configs;
1466 if (cust_cfgs && is_custom_config_valid(cust_cfgs, dev)) {
1467 temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL);
1469 memcpy(temp, cust_cfgs, sizeof(*cust_cfgs));
1471 dev_warn(dev, "%s:%d: allocation error\n", __func__,
1473 pd->custom_configs = temp;
1477 * Copy timings and min-tck values from platform data. If it is not
1478 * available or if memory allocation fails, use JEDEC defaults
1480 size = sizeof(struct lpddr2_timings) * pd->timings_arr_size;
1482 temp = devm_kzalloc(dev, size, GFP_KERNEL);
1484 memcpy(temp, pd->timings, size);
1487 dev_warn(dev, "%s:%d: allocation error\n", __func__,
1489 get_default_timings(emif);
1492 get_default_timings(emif);
1496 temp = devm_kzalloc(dev, sizeof(*pd->min_tck), GFP_KERNEL);
1498 memcpy(temp, pd->min_tck, sizeof(*pd->min_tck));
1501 dev_warn(dev, "%s:%d: allocation error\n", __func__,
1503 pd->min_tck = &lpddr2_jedec_min_tck;
1506 pd->min_tck = &lpddr2_jedec_min_tck;
1516 static int __init_or_module emif_probe(struct platform_device *pdev)
1518 struct emif_data *emif;
1519 struct resource *res;
1522 if (pdev->dev.of_node)
1523 emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev);
1525 emif = get_device_details(pdev);
1528 pr_err("%s: error getting device data\n", __func__);
1532 list_add(&emif->node, &device_list);
1533 emif->addressing = get_addressing_table(emif->plat_data->device_info);
1535 /* Save pointers to each other in emif and device structures */
1536 emif->dev = &pdev->dev;
1537 platform_set_drvdata(pdev, emif);
1539 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1540 emif->base = devm_ioremap_resource(emif->dev, res);
1541 if (IS_ERR(emif->base))
1544 irq = platform_get_irq(pdev, 0);
1546 dev_err(emif->dev, "%s: error getting IRQ resource - %d\n",
1551 emif_onetime_settings(emif);
1552 emif_debugfs_init(emif);
1553 disable_and_clear_all_interrupts(emif);
1554 ret = setup_interrupts(emif, irq);
1558 /* One-time actions taken on probing the first device */
1561 spin_lock_init(&emif_lock);
1564 * TODO: register notifiers for frequency and voltage
1565 * change here once the respective frameworks are
1570 dev_info(&pdev->dev, "%s: device configured with addr = %p and IRQ%d\n",
1571 __func__, emif->base, irq);
1578 static int __exit emif_remove(struct platform_device *pdev)
1580 struct emif_data *emif = platform_get_drvdata(pdev);
1582 emif_debugfs_exit(emif);
1587 static void emif_shutdown(struct platform_device *pdev)
1589 struct emif_data *emif = platform_get_drvdata(pdev);
1591 disable_and_clear_all_interrupts(emif);
1594 static int get_emif_reg_values(struct emif_data *emif, u32 freq,
1595 struct emif_regs *regs)
1597 u32 cs1_used, ip_rev, phy_type;
1599 const struct lpddr2_timings *timings;
1600 const struct lpddr2_min_tck *min_tck;
1601 const struct ddr_device_info *device_info;
1602 const struct lpddr2_addressing *addressing;
1603 struct emif_data *emif_for_calc;
1605 const struct emif_custom_configs *custom_configs;
1609 * If the devices on this EMIF instance is duplicate of EMIF1,
1610 * use EMIF1 details for the calculation
1612 emif_for_calc = emif->duplicate ? emif1 : emif;
1613 timings = get_timings_table(emif_for_calc, freq);
1614 addressing = emif_for_calc->addressing;
1615 if (!timings || !addressing) {
1616 dev_err(dev, "%s: not enough data available for %dHz",
1621 device_info = emif_for_calc->plat_data->device_info;
1622 type = device_info->type;
1623 cs1_used = device_info->cs1_used;
1624 ip_rev = emif_for_calc->plat_data->ip_rev;
1625 phy_type = emif_for_calc->plat_data->phy_type;
1627 min_tck = emif_for_calc->plat_data->min_tck;
1628 custom_configs = emif_for_calc->plat_data->custom_configs;
1630 set_ddr_clk_period(freq);
1632 regs->ref_ctrl_shdw = get_sdram_ref_ctrl_shdw(freq, addressing);
1633 regs->sdram_tim1_shdw = get_sdram_tim_1_shdw(timings, min_tck,
1635 regs->sdram_tim2_shdw = get_sdram_tim_2_shdw(timings, min_tck,
1637 regs->sdram_tim3_shdw = get_sdram_tim_3_shdw(timings, min_tck,
1638 addressing, type, ip_rev, EMIF_NORMAL_TIMINGS);
1642 if (phy_type == EMIF_PHY_TYPE_ATTILAPHY && ip_rev == EMIF_4D) {
1643 regs->phy_ctrl_1_shdw = get_ddr_phy_ctrl_1_attilaphy_4d(
1645 } else if (phy_type == EMIF_PHY_TYPE_INTELLIPHY && ip_rev == EMIF_4D5) {
1646 regs->phy_ctrl_1_shdw = get_phy_ctrl_1_intelliphy_4d5(freq, cl);
1647 regs->ext_phy_ctrl_2_shdw = get_ext_phy_ctrl_2_intelliphy_4d5();
1648 regs->ext_phy_ctrl_3_shdw = get_ext_phy_ctrl_3_intelliphy_4d5();
1649 regs->ext_phy_ctrl_4_shdw = get_ext_phy_ctrl_4_intelliphy_4d5();
1654 /* Only timeout values in pwr_mgmt_ctrl_shdw register */
1655 regs->pwr_mgmt_ctrl_shdw =
1656 get_pwr_mgmt_ctrl(freq, emif_for_calc, ip_rev) &
1657 (CS_TIM_MASK | SR_TIM_MASK | PD_TIM_MASK);
1659 if (ip_rev & EMIF_4D) {
1660 regs->read_idle_ctrl_shdw_normal =
1661 get_read_idle_ctrl_shdw(DDR_VOLTAGE_STABLE);
1663 regs->read_idle_ctrl_shdw_volt_ramp =
1664 get_read_idle_ctrl_shdw(DDR_VOLTAGE_RAMPING);
1665 } else if (ip_rev & EMIF_4D5) {
1666 regs->dll_calib_ctrl_shdw_normal =
1667 get_dll_calib_ctrl_shdw(DDR_VOLTAGE_STABLE);
1669 regs->dll_calib_ctrl_shdw_volt_ramp =
1670 get_dll_calib_ctrl_shdw(DDR_VOLTAGE_RAMPING);
1673 if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
1674 regs->ref_ctrl_shdw_derated = get_sdram_ref_ctrl_shdw(freq / 4,
1677 regs->sdram_tim1_shdw_derated =
1678 get_sdram_tim_1_shdw_derated(timings, min_tck,
1681 regs->sdram_tim3_shdw_derated = get_sdram_tim_3_shdw(timings,
1682 min_tck, addressing, type, ip_rev,
1683 EMIF_DERATED_TIMINGS);
1692 * get_regs() - gets the cached emif_regs structure for a given EMIF instance
1693 * given frequency(freq):
1695 * As an optimisation, every EMIF instance other than EMIF1 shares the
1696 * register cache with EMIF1 if the devices connected on this instance
1697 * are same as that on EMIF1(indicated by the duplicate flag)
1699 * If we do not have an entry corresponding to the frequency given, we
1700 * allocate a new entry and calculate the values
1702 * Upon finding the right reg dump, save it in curr_regs. It can be
1703 * directly used for thermal de-rating and voltage ramping changes.
1705 static struct emif_regs *get_regs(struct emif_data *emif, u32 freq)
1708 struct emif_regs **regs_cache;
1709 struct emif_regs *regs = NULL;
1713 if (emif->curr_regs && emif->curr_regs->freq == freq) {
1714 dev_dbg(dev, "%s: using curr_regs - %u Hz", __func__, freq);
1715 return emif->curr_regs;
1718 if (emif->duplicate)
1719 regs_cache = emif1->regs_cache;
1721 regs_cache = emif->regs_cache;
1723 for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
1724 if (regs_cache[i]->freq == freq) {
1725 regs = regs_cache[i];
1727 "%s: reg dump found in reg cache for %u Hz\n",
1734 * If we don't have an entry for this frequency in the cache create one
1735 * and calculate the values
1738 regs = devm_kzalloc(emif->dev, sizeof(*regs), GFP_ATOMIC);
1742 if (get_emif_reg_values(emif, freq, regs)) {
1743 devm_kfree(emif->dev, regs);
1748 * Now look for an un-used entry in the cache and save the
1749 * newly created struct. If there are no free entries
1750 * over-write the last entry
1752 for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++)
1755 if (i >= EMIF_MAX_NUM_FREQUENCIES) {
1756 dev_warn(dev, "%s: regs_cache full - reusing a slot!!\n",
1758 i = EMIF_MAX_NUM_FREQUENCIES - 1;
1759 devm_kfree(emif->dev, regs_cache[i]);
1761 regs_cache[i] = regs;
1767 static void do_volt_notify_handling(struct emif_data *emif, u32 volt_state)
1769 dev_dbg(emif->dev, "%s: voltage notification : %d", __func__,
1772 if (!emif->curr_regs) {
1774 "%s: volt-notify before registers are ready: %d\n",
1775 __func__, volt_state);
1779 setup_volt_sensitive_regs(emif, emif->curr_regs, volt_state);
1783 * TODO: voltage notify handling should be hooked up to
1784 * regulator framework as soon as the necessary support
1785 * is available in mainline kernel. This function is un-used
1788 static void __attribute__((unused)) volt_notify_handling(u32 volt_state)
1790 struct emif_data *emif;
1792 spin_lock_irqsave(&emif_lock, irq_state);
1794 list_for_each_entry(emif, &device_list, node)
1795 do_volt_notify_handling(emif, volt_state);
1798 spin_unlock_irqrestore(&emif_lock, irq_state);
1801 static void do_freq_pre_notify_handling(struct emif_data *emif, u32 new_freq)
1803 struct emif_regs *regs;
1805 regs = get_regs(emif, new_freq);
1809 emif->curr_regs = regs;
1812 * Update the shadow registers:
1813 * Temperature and voltage-ramp sensitive settings are also configured
1814 * in terms of DDR cycles. So, we need to update them too when there
1817 dev_dbg(emif->dev, "%s: setting up shadow registers for %uHz",
1818 __func__, new_freq);
1819 setup_registers(emif, regs);
1820 setup_temperature_sensitive_regs(emif, regs);
1821 setup_volt_sensitive_regs(emif, regs, DDR_VOLTAGE_STABLE);
1824 * Part of workaround for errata i728. See do_freq_update()
1827 if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
1828 set_lpmode(emif, EMIF_LP_MODE_DISABLE);
1832 * TODO: frequency notify handling should be hooked up to
1833 * clock framework as soon as the necessary support is
1834 * available in mainline kernel. This function is un-used
1837 static void __attribute__((unused)) freq_pre_notify_handling(u32 new_freq)
1839 struct emif_data *emif;
1842 * NOTE: we are taking the spin-lock here and releases it
1843 * only in post-notifier. This doesn't look good and
1844 * Sparse complains about it, but this seems to be
1845 * un-avoidable. We need to lock a sequence of events
1846 * that is split between EMIF and clock framework.
1848 * 1. EMIF driver updates EMIF timings in shadow registers in the
1849 * frequency pre-notify callback from clock framework
1850 * 2. clock framework sets up the registers for the new frequency
1851 * 3. clock framework initiates a hw-sequence that updates
1852 * the frequency EMIF timings synchronously.
1854 * All these 3 steps should be performed as an atomic operation
1855 * vis-a-vis similar sequence in the EMIF interrupt handler
1856 * for temperature events. Otherwise, there could be race
1857 * conditions that could result in incorrect EMIF timings for
1860 spin_lock_irqsave(&emif_lock, irq_state);
1862 list_for_each_entry(emif, &device_list, node)
1863 do_freq_pre_notify_handling(emif, new_freq);
1866 static void do_freq_post_notify_handling(struct emif_data *emif)
1869 * Part of workaround for errata i728. See do_freq_update()
1872 if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
1873 set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
1877 * TODO: frequency notify handling should be hooked up to
1878 * clock framework as soon as the necessary support is
1879 * available in mainline kernel. This function is un-used
1882 static void __attribute__((unused)) freq_post_notify_handling(void)
1884 struct emif_data *emif;
1886 list_for_each_entry(emif, &device_list, node)
1887 do_freq_post_notify_handling(emif);
1890 * Lock is done in pre-notify handler. See freq_pre_notify_handling()
1893 spin_unlock_irqrestore(&emif_lock, irq_state);
1896 #if defined(CONFIG_OF)
1897 static const struct of_device_id emif_of_match[] = {
1898 { .compatible = "ti,emif-4d" },
1899 { .compatible = "ti,emif-4d5" },
1902 MODULE_DEVICE_TABLE(of, emif_of_match);
1905 static struct platform_driver emif_driver = {
1906 .remove = __exit_p(emif_remove),
1907 .shutdown = emif_shutdown,
1910 .of_match_table = of_match_ptr(emif_of_match),
1914 module_platform_driver_probe(emif_driver, emif_probe);
1916 MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
1917 MODULE_LICENSE("GPL");
1918 MODULE_ALIAS("platform:emif");
1919 MODULE_AUTHOR("Texas Instruments Inc");