GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / mtd / spi-nor / controllers / intel-spi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel PCH/PCU SPI flash driver.
4  *
5  * Copyright (C) 2016, Intel Corporation
6  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7  */
8
9 #include <linux/err.h>
10 #include <linux/io.h>
11 #include <linux/iopoll.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/sizes.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/mtd/partitions.h>
17 #include <linux/mtd/spi-nor.h>
18 #include <linux/platform_data/intel-spi.h>
19
20 #include "intel-spi.h"
21
22 /* Offsets are from @ispi->base */
23 #define BFPREG                          0x00
24
25 #define HSFSTS_CTL                      0x04
26 #define HSFSTS_CTL_FSMIE                BIT(31)
27 #define HSFSTS_CTL_FDBC_SHIFT           24
28 #define HSFSTS_CTL_FDBC_MASK            (0x3f << HSFSTS_CTL_FDBC_SHIFT)
29
30 #define HSFSTS_CTL_FCYCLE_SHIFT         17
31 #define HSFSTS_CTL_FCYCLE_MASK          (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
32 /* HW sequencer opcodes */
33 #define HSFSTS_CTL_FCYCLE_READ          (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
34 #define HSFSTS_CTL_FCYCLE_WRITE         (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
35 #define HSFSTS_CTL_FCYCLE_ERASE         (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
36 #define HSFSTS_CTL_FCYCLE_ERASE_64K     (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
37 #define HSFSTS_CTL_FCYCLE_RDID          (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
38 #define HSFSTS_CTL_FCYCLE_WRSR          (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
39 #define HSFSTS_CTL_FCYCLE_RDSR          (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
40
41 #define HSFSTS_CTL_FGO                  BIT(16)
42 #define HSFSTS_CTL_FLOCKDN              BIT(15)
43 #define HSFSTS_CTL_FDV                  BIT(14)
44 #define HSFSTS_CTL_SCIP                 BIT(5)
45 #define HSFSTS_CTL_AEL                  BIT(2)
46 #define HSFSTS_CTL_FCERR                BIT(1)
47 #define HSFSTS_CTL_FDONE                BIT(0)
48
49 #define FADDR                           0x08
50 #define DLOCK                           0x0c
51 #define FDATA(n)                        (0x10 + ((n) * 4))
52
53 #define FRACC                           0x50
54
55 #define FREG(n)                         (0x54 + ((n) * 4))
56 #define FREG_BASE_MASK                  GENMASK(14, 0)
57 #define FREG_LIMIT_SHIFT                16
58 #define FREG_LIMIT_MASK                 GENMASK(30, 16)
59
60 /* Offset is from @ispi->pregs */
61 #define PR(n)                           ((n) * 4)
62 #define PR_WPE                          BIT(31)
63 #define PR_LIMIT_SHIFT                  16
64 #define PR_LIMIT_MASK                   GENMASK(30, 16)
65 #define PR_RPE                          BIT(15)
66 #define PR_BASE_MASK                    GENMASK(14, 0)
67
68 /* Offsets are from @ispi->sregs */
69 #define SSFSTS_CTL                      0x00
70 #define SSFSTS_CTL_FSMIE                BIT(23)
71 #define SSFSTS_CTL_DS                   BIT(22)
72 #define SSFSTS_CTL_DBC_SHIFT            16
73 #define SSFSTS_CTL_SPOP                 BIT(11)
74 #define SSFSTS_CTL_ACS                  BIT(10)
75 #define SSFSTS_CTL_SCGO                 BIT(9)
76 #define SSFSTS_CTL_COP_SHIFT            12
77 #define SSFSTS_CTL_FRS                  BIT(7)
78 #define SSFSTS_CTL_DOFRS                BIT(6)
79 #define SSFSTS_CTL_AEL                  BIT(4)
80 #define SSFSTS_CTL_FCERR                BIT(3)
81 #define SSFSTS_CTL_FDONE                BIT(2)
82 #define SSFSTS_CTL_SCIP                 BIT(0)
83
84 #define PREOP_OPTYPE                    0x04
85 #define OPMENU0                         0x08
86 #define OPMENU1                         0x0c
87
88 #define OPTYPE_READ_NO_ADDR             0
89 #define OPTYPE_WRITE_NO_ADDR            1
90 #define OPTYPE_READ_WITH_ADDR           2
91 #define OPTYPE_WRITE_WITH_ADDR          3
92
93 /* CPU specifics */
94 #define BYT_PR                          0x74
95 #define BYT_SSFSTS_CTL                  0x90
96 #define BYT_BCR                         0xfc
97 #define BYT_BCR_WPD                     BIT(0)
98 #define BYT_FREG_NUM                    5
99 #define BYT_PR_NUM                      5
100
101 #define LPT_PR                          0x74
102 #define LPT_SSFSTS_CTL                  0x90
103 #define LPT_FREG_NUM                    5
104 #define LPT_PR_NUM                      5
105
106 #define BXT_PR                          0x84
107 #define BXT_SSFSTS_CTL                  0xa0
108 #define BXT_FREG_NUM                    12
109 #define BXT_PR_NUM                      6
110
111 #define CNL_PR                          0x84
112 #define CNL_FREG_NUM                    6
113 #define CNL_PR_NUM                      5
114
115 #define LVSCC                           0xc4
116 #define UVSCC                           0xc8
117 #define ERASE_OPCODE_SHIFT              8
118 #define ERASE_OPCODE_MASK               (0xff << ERASE_OPCODE_SHIFT)
119 #define ERASE_64K_OPCODE_SHIFT          16
120 #define ERASE_64K_OPCODE_MASK           (0xff << ERASE_64K_OPCODE_SHIFT)
121
122 #define INTEL_SPI_TIMEOUT               5000 /* ms */
123 #define INTEL_SPI_FIFO_SZ               64
124
125 /**
126  * struct intel_spi - Driver private data
127  * @dev: Device pointer
128  * @info: Pointer to board specific info
129  * @nor: SPI NOR layer structure
130  * @base: Beginning of MMIO space
131  * @pregs: Start of protection registers
132  * @sregs: Start of software sequencer registers
133  * @nregions: Maximum number of regions
134  * @pr_num: Maximum number of protected range registers
135  * @locked: Is SPI setting locked
136  * @swseq_reg: Use SW sequencer in register reads/writes
137  * @swseq_erase: Use SW sequencer in erase operation
138  * @erase_64k: 64k erase supported
139  * @atomic_preopcode: Holds preopcode when atomic sequence is requested
140  * @opcodes: Opcodes which are supported. This are programmed by BIOS
141  *           before it locks down the controller.
142  */
143 struct intel_spi {
144         struct device *dev;
145         const struct intel_spi_boardinfo *info;
146         struct spi_nor nor;
147         void __iomem *base;
148         void __iomem *pregs;
149         void __iomem *sregs;
150         size_t nregions;
151         size_t pr_num;
152         bool locked;
153         bool swseq_reg;
154         bool swseq_erase;
155         bool erase_64k;
156         u8 atomic_preopcode;
157         u8 opcodes[8];
158 };
159
160 static bool writeable;
161 module_param(writeable, bool, 0);
162 MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
163
164 static void intel_spi_dump_regs(struct intel_spi *ispi)
165 {
166         u32 value;
167         int i;
168
169         dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
170
171         value = readl(ispi->base + HSFSTS_CTL);
172         dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
173         if (value & HSFSTS_CTL_FLOCKDN)
174                 dev_dbg(ispi->dev, "-> Locked\n");
175
176         dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
177         dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
178
179         for (i = 0; i < 16; i++)
180                 dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
181                         i, readl(ispi->base + FDATA(i)));
182
183         dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
184
185         for (i = 0; i < ispi->nregions; i++)
186                 dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
187                         readl(ispi->base + FREG(i)));
188         for (i = 0; i < ispi->pr_num; i++)
189                 dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
190                         readl(ispi->pregs + PR(i)));
191
192         if (ispi->sregs) {
193                 value = readl(ispi->sregs + SSFSTS_CTL);
194                 dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
195                 dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
196                         readl(ispi->sregs + PREOP_OPTYPE));
197                 dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
198                         readl(ispi->sregs + OPMENU0));
199                 dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
200                         readl(ispi->sregs + OPMENU1));
201         }
202
203         if (ispi->info->type == INTEL_SPI_BYT)
204                 dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
205
206         dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
207         dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
208
209         dev_dbg(ispi->dev, "Protected regions:\n");
210         for (i = 0; i < ispi->pr_num; i++) {
211                 u32 base, limit;
212
213                 value = readl(ispi->pregs + PR(i));
214                 if (!(value & (PR_WPE | PR_RPE)))
215                         continue;
216
217                 limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
218                 base = value & PR_BASE_MASK;
219
220                 dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
221                          i, base << 12, (limit << 12) | 0xfff,
222                          value & PR_WPE ? 'W' : '.',
223                          value & PR_RPE ? 'R' : '.');
224         }
225
226         dev_dbg(ispi->dev, "Flash regions:\n");
227         for (i = 0; i < ispi->nregions; i++) {
228                 u32 region, base, limit;
229
230                 region = readl(ispi->base + FREG(i));
231                 base = region & FREG_BASE_MASK;
232                 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
233
234                 if (base >= limit || (i > 0 && limit == 0))
235                         dev_dbg(ispi->dev, " %02d disabled\n", i);
236                 else
237                         dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
238                                  i, base << 12, (limit << 12) | 0xfff);
239         }
240
241         dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
242                 ispi->swseq_reg ? 'S' : 'H');
243         dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
244                 ispi->swseq_erase ? 'S' : 'H');
245 }
246
247 /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
248 static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
249 {
250         size_t bytes;
251         int i = 0;
252
253         if (size > INTEL_SPI_FIFO_SZ)
254                 return -EINVAL;
255
256         while (size > 0) {
257                 bytes = min_t(size_t, size, 4);
258                 memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
259                 size -= bytes;
260                 buf += bytes;
261                 i++;
262         }
263
264         return 0;
265 }
266
267 /* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
268 static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
269                                  size_t size)
270 {
271         size_t bytes;
272         int i = 0;
273
274         if (size > INTEL_SPI_FIFO_SZ)
275                 return -EINVAL;
276
277         while (size > 0) {
278                 bytes = min_t(size_t, size, 4);
279                 memcpy_toio(ispi->base + FDATA(i), buf, bytes);
280                 size -= bytes;
281                 buf += bytes;
282                 i++;
283         }
284
285         return 0;
286 }
287
288 static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
289 {
290         u32 val;
291
292         return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
293                                   !(val & HSFSTS_CTL_SCIP), 0,
294                                   INTEL_SPI_TIMEOUT * 1000);
295 }
296
297 static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
298 {
299         u32 val;
300
301         return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
302                                   !(val & SSFSTS_CTL_SCIP), 0,
303                                   INTEL_SPI_TIMEOUT * 1000);
304 }
305
306 static bool intel_spi_set_writeable(struct intel_spi *ispi)
307 {
308         if (!ispi->info->set_writeable)
309                 return false;
310
311         return ispi->info->set_writeable(ispi->base, ispi->info->data);
312 }
313
314 static int intel_spi_init(struct intel_spi *ispi)
315 {
316         u32 opmenu0, opmenu1, lvscc, uvscc, val;
317         int i;
318
319         switch (ispi->info->type) {
320         case INTEL_SPI_BYT:
321                 ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
322                 ispi->pregs = ispi->base + BYT_PR;
323                 ispi->nregions = BYT_FREG_NUM;
324                 ispi->pr_num = BYT_PR_NUM;
325                 ispi->swseq_reg = true;
326                 break;
327
328         case INTEL_SPI_LPT:
329                 ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
330                 ispi->pregs = ispi->base + LPT_PR;
331                 ispi->nregions = LPT_FREG_NUM;
332                 ispi->pr_num = LPT_PR_NUM;
333                 ispi->swseq_reg = true;
334                 break;
335
336         case INTEL_SPI_BXT:
337                 ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
338                 ispi->pregs = ispi->base + BXT_PR;
339                 ispi->nregions = BXT_FREG_NUM;
340                 ispi->pr_num = BXT_PR_NUM;
341                 ispi->erase_64k = true;
342                 break;
343
344         case INTEL_SPI_CNL:
345                 ispi->sregs = NULL;
346                 ispi->pregs = ispi->base + CNL_PR;
347                 ispi->nregions = CNL_FREG_NUM;
348                 ispi->pr_num = CNL_PR_NUM;
349                 break;
350
351         default:
352                 return -EINVAL;
353         }
354
355         /* Try to disable write protection if user asked to do so */
356         if (writeable && !intel_spi_set_writeable(ispi)) {
357                 dev_warn(ispi->dev, "can't disable chip write protection\n");
358                 writeable = false;
359         }
360
361         /* Disable #SMI generation from HW sequencer */
362         val = readl(ispi->base + HSFSTS_CTL);
363         val &= ~HSFSTS_CTL_FSMIE;
364         writel(val, ispi->base + HSFSTS_CTL);
365
366         /*
367          * Determine whether erase operation should use HW or SW sequencer.
368          *
369          * The HW sequencer has a predefined list of opcodes, with only the
370          * erase opcode being programmable in LVSCC and UVSCC registers.
371          * If these registers don't contain a valid erase opcode, erase
372          * cannot be done using HW sequencer.
373          */
374         lvscc = readl(ispi->base + LVSCC);
375         uvscc = readl(ispi->base + UVSCC);
376         if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
377                 ispi->swseq_erase = true;
378         /* SPI controller on Intel BXT supports 64K erase opcode */
379         if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
380                 if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
381                     !(uvscc & ERASE_64K_OPCODE_MASK))
382                         ispi->erase_64k = false;
383
384         if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) {
385                 dev_err(ispi->dev, "software sequencer not supported, but required\n");
386                 return -EINVAL;
387         }
388
389         /*
390          * Some controllers can only do basic operations using hardware
391          * sequencer. All other operations are supposed to be carried out
392          * using software sequencer.
393          */
394         if (ispi->swseq_reg) {
395                 /* Disable #SMI generation from SW sequencer */
396                 val = readl(ispi->sregs + SSFSTS_CTL);
397                 val &= ~SSFSTS_CTL_FSMIE;
398                 writel(val, ispi->sregs + SSFSTS_CTL);
399         }
400
401         /* Check controller's lock status */
402         val = readl(ispi->base + HSFSTS_CTL);
403         ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
404
405         if (ispi->locked && ispi->sregs) {
406                 /*
407                  * BIOS programs allowed opcodes and then locks down the
408                  * register. So read back what opcodes it decided to support.
409                  * That's the set we are going to support as well.
410                  */
411                 opmenu0 = readl(ispi->sregs + OPMENU0);
412                 opmenu1 = readl(ispi->sregs + OPMENU1);
413
414                 if (opmenu0 && opmenu1) {
415                         for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
416                                 ispi->opcodes[i] = opmenu0 >> i * 8;
417                                 ispi->opcodes[i + 4] = opmenu1 >> i * 8;
418                         }
419                 }
420         }
421
422         intel_spi_dump_regs(ispi);
423
424         return 0;
425 }
426
427 static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
428 {
429         int i;
430         int preop;
431
432         if (ispi->locked) {
433                 for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
434                         if (ispi->opcodes[i] == opcode)
435                                 return i;
436
437                 return -EINVAL;
438         }
439
440         /* The lock is off, so just use index 0 */
441         writel(opcode, ispi->sregs + OPMENU0);
442         preop = readw(ispi->sregs + PREOP_OPTYPE);
443         writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
444
445         return 0;
446 }
447
448 static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len)
449 {
450         u32 val, status;
451         int ret;
452
453         val = readl(ispi->base + HSFSTS_CTL);
454         val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
455
456         switch (opcode) {
457         case SPINOR_OP_RDID:
458                 val |= HSFSTS_CTL_FCYCLE_RDID;
459                 break;
460         case SPINOR_OP_WRSR:
461                 val |= HSFSTS_CTL_FCYCLE_WRSR;
462                 break;
463         case SPINOR_OP_RDSR:
464                 val |= HSFSTS_CTL_FCYCLE_RDSR;
465                 break;
466         default:
467                 return -EINVAL;
468         }
469
470         if (len > INTEL_SPI_FIFO_SZ)
471                 return -EINVAL;
472
473         val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
474         val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
475         val |= HSFSTS_CTL_FGO;
476         writel(val, ispi->base + HSFSTS_CTL);
477
478         ret = intel_spi_wait_hw_busy(ispi);
479         if (ret)
480                 return ret;
481
482         status = readl(ispi->base + HSFSTS_CTL);
483         if (status & HSFSTS_CTL_FCERR)
484                 return -EIO;
485         else if (status & HSFSTS_CTL_AEL)
486                 return -EACCES;
487
488         return 0;
489 }
490
491 static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
492                               int optype)
493 {
494         u32 val = 0, status;
495         u8 atomic_preopcode;
496         int ret;
497
498         ret = intel_spi_opcode_index(ispi, opcode, optype);
499         if (ret < 0)
500                 return ret;
501
502         if (len > INTEL_SPI_FIFO_SZ)
503                 return -EINVAL;
504
505         /*
506          * Always clear it after each SW sequencer operation regardless
507          * of whether it is successful or not.
508          */
509         atomic_preopcode = ispi->atomic_preopcode;
510         ispi->atomic_preopcode = 0;
511
512         /* Only mark 'Data Cycle' bit when there is data to be transferred */
513         if (len > 0)
514                 val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
515         val |= ret << SSFSTS_CTL_COP_SHIFT;
516         val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
517         val |= SSFSTS_CTL_SCGO;
518         if (atomic_preopcode) {
519                 u16 preop;
520
521                 switch (optype) {
522                 case OPTYPE_WRITE_NO_ADDR:
523                 case OPTYPE_WRITE_WITH_ADDR:
524                         /* Pick matching preopcode for the atomic sequence */
525                         preop = readw(ispi->sregs + PREOP_OPTYPE);
526                         if ((preop & 0xff) == atomic_preopcode)
527                                 ; /* Do nothing */
528                         else if ((preop >> 8) == atomic_preopcode)
529                                 val |= SSFSTS_CTL_SPOP;
530                         else
531                                 return -EINVAL;
532
533                         /* Enable atomic sequence */
534                         val |= SSFSTS_CTL_ACS;
535                         break;
536
537                 default:
538                         return -EINVAL;
539                 }
540
541         }
542         writel(val, ispi->sregs + SSFSTS_CTL);
543
544         ret = intel_spi_wait_sw_busy(ispi);
545         if (ret)
546                 return ret;
547
548         status = readl(ispi->sregs + SSFSTS_CTL);
549         if (status & SSFSTS_CTL_FCERR)
550                 return -EIO;
551         else if (status & SSFSTS_CTL_AEL)
552                 return -EACCES;
553
554         return 0;
555 }
556
557 static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
558                               size_t len)
559 {
560         struct intel_spi *ispi = nor->priv;
561         int ret;
562
563         /* Address of the first chip */
564         writel(0, ispi->base + FADDR);
565
566         if (ispi->swseq_reg)
567                 ret = intel_spi_sw_cycle(ispi, opcode, len,
568                                          OPTYPE_READ_NO_ADDR);
569         else
570                 ret = intel_spi_hw_cycle(ispi, opcode, len);
571
572         if (ret)
573                 return ret;
574
575         return intel_spi_read_block(ispi, buf, len);
576 }
577
578 static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
579                                size_t len)
580 {
581         struct intel_spi *ispi = nor->priv;
582         int ret;
583
584         /*
585          * This is handled with atomic operation and preop code in Intel
586          * controller so we only verify that it is available. If the
587          * controller is not locked, program the opcode to the PREOP
588          * register for later use.
589          *
590          * When hardware sequencer is used there is no need to program
591          * any opcodes (it handles them automatically as part of a command).
592          */
593         if (opcode == SPINOR_OP_WREN) {
594                 u16 preop;
595
596                 if (!ispi->swseq_reg)
597                         return 0;
598
599                 preop = readw(ispi->sregs + PREOP_OPTYPE);
600                 if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
601                         if (ispi->locked)
602                                 return -EINVAL;
603                         writel(opcode, ispi->sregs + PREOP_OPTYPE);
604                 }
605
606                 /*
607                  * This enables atomic sequence on next SW sycle. Will
608                  * be cleared after next operation.
609                  */
610                 ispi->atomic_preopcode = opcode;
611                 return 0;
612         }
613
614         /*
615          * We hope that HW sequencer will do the right thing automatically and
616          * with the SW sequencer we cannot use preopcode anyway, so just ignore
617          * the Write Disable operation and pretend it was completed
618          * successfully.
619          */
620         if (opcode == SPINOR_OP_WRDI)
621                 return 0;
622
623         writel(0, ispi->base + FADDR);
624
625         /* Write the value beforehand */
626         ret = intel_spi_write_block(ispi, buf, len);
627         if (ret)
628                 return ret;
629
630         if (ispi->swseq_reg)
631                 return intel_spi_sw_cycle(ispi, opcode, len,
632                                           OPTYPE_WRITE_NO_ADDR);
633         return intel_spi_hw_cycle(ispi, opcode, len);
634 }
635
636 static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
637                               u_char *read_buf)
638 {
639         struct intel_spi *ispi = nor->priv;
640         size_t block_size, retlen = 0;
641         u32 val, status;
642         ssize_t ret;
643
644         /*
645          * Atomic sequence is not expected with HW sequencer reads. Make
646          * sure it is cleared regardless.
647          */
648         if (WARN_ON_ONCE(ispi->atomic_preopcode))
649                 ispi->atomic_preopcode = 0;
650
651         switch (nor->read_opcode) {
652         case SPINOR_OP_READ:
653         case SPINOR_OP_READ_FAST:
654         case SPINOR_OP_READ_4B:
655         case SPINOR_OP_READ_FAST_4B:
656                 break;
657         default:
658                 return -EINVAL;
659         }
660
661         while (len > 0) {
662                 block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
663
664                 /* Read cannot cross 4K boundary */
665                 block_size = min_t(loff_t, from + block_size,
666                                    round_up(from + 1, SZ_4K)) - from;
667
668                 writel(from, ispi->base + FADDR);
669
670                 val = readl(ispi->base + HSFSTS_CTL);
671                 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
672                 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
673                 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
674                 val |= HSFSTS_CTL_FCYCLE_READ;
675                 val |= HSFSTS_CTL_FGO;
676                 writel(val, ispi->base + HSFSTS_CTL);
677
678                 ret = intel_spi_wait_hw_busy(ispi);
679                 if (ret)
680                         return ret;
681
682                 status = readl(ispi->base + HSFSTS_CTL);
683                 if (status & HSFSTS_CTL_FCERR)
684                         ret = -EIO;
685                 else if (status & HSFSTS_CTL_AEL)
686                         ret = -EACCES;
687
688                 if (ret < 0) {
689                         dev_err(ispi->dev, "read error: %llx: %#x\n", from,
690                                 status);
691                         return ret;
692                 }
693
694                 ret = intel_spi_read_block(ispi, read_buf, block_size);
695                 if (ret)
696                         return ret;
697
698                 len -= block_size;
699                 from += block_size;
700                 retlen += block_size;
701                 read_buf += block_size;
702         }
703
704         return retlen;
705 }
706
707 static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
708                                const u_char *write_buf)
709 {
710         struct intel_spi *ispi = nor->priv;
711         size_t block_size, retlen = 0;
712         u32 val, status;
713         ssize_t ret;
714
715         /* Not needed with HW sequencer write, make sure it is cleared */
716         ispi->atomic_preopcode = 0;
717
718         while (len > 0) {
719                 block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
720
721                 /* Write cannot cross 4K boundary */
722                 block_size = min_t(loff_t, to + block_size,
723                                    round_up(to + 1, SZ_4K)) - to;
724
725                 writel(to, ispi->base + FADDR);
726
727                 val = readl(ispi->base + HSFSTS_CTL);
728                 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
729                 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
730                 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
731                 val |= HSFSTS_CTL_FCYCLE_WRITE;
732
733                 ret = intel_spi_write_block(ispi, write_buf, block_size);
734                 if (ret) {
735                         dev_err(ispi->dev, "failed to write block\n");
736                         return ret;
737                 }
738
739                 /* Start the write now */
740                 val |= HSFSTS_CTL_FGO;
741                 writel(val, ispi->base + HSFSTS_CTL);
742
743                 ret = intel_spi_wait_hw_busy(ispi);
744                 if (ret) {
745                         dev_err(ispi->dev, "timeout\n");
746                         return ret;
747                 }
748
749                 status = readl(ispi->base + HSFSTS_CTL);
750                 if (status & HSFSTS_CTL_FCERR)
751                         ret = -EIO;
752                 else if (status & HSFSTS_CTL_AEL)
753                         ret = -EACCES;
754
755                 if (ret < 0) {
756                         dev_err(ispi->dev, "write error: %llx: %#x\n", to,
757                                 status);
758                         return ret;
759                 }
760
761                 len -= block_size;
762                 to += block_size;
763                 retlen += block_size;
764                 write_buf += block_size;
765         }
766
767         return retlen;
768 }
769
770 static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
771 {
772         size_t erase_size, len = nor->mtd.erasesize;
773         struct intel_spi *ispi = nor->priv;
774         u32 val, status, cmd;
775         int ret;
776
777         /* If the hardware can do 64k erase use that when possible */
778         if (len >= SZ_64K && ispi->erase_64k) {
779                 cmd = HSFSTS_CTL_FCYCLE_ERASE_64K;
780                 erase_size = SZ_64K;
781         } else {
782                 cmd = HSFSTS_CTL_FCYCLE_ERASE;
783                 erase_size = SZ_4K;
784         }
785
786         if (ispi->swseq_erase) {
787                 while (len > 0) {
788                         writel(offs, ispi->base + FADDR);
789
790                         ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
791                                                  0, OPTYPE_WRITE_WITH_ADDR);
792                         if (ret)
793                                 return ret;
794
795                         offs += erase_size;
796                         len -= erase_size;
797                 }
798
799                 return 0;
800         }
801
802         /* Not needed with HW sequencer erase, make sure it is cleared */
803         ispi->atomic_preopcode = 0;
804
805         while (len > 0) {
806                 writel(offs, ispi->base + FADDR);
807
808                 val = readl(ispi->base + HSFSTS_CTL);
809                 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
810                 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
811                 val |= cmd;
812                 val |= HSFSTS_CTL_FGO;
813                 writel(val, ispi->base + HSFSTS_CTL);
814
815                 ret = intel_spi_wait_hw_busy(ispi);
816                 if (ret)
817                         return ret;
818
819                 status = readl(ispi->base + HSFSTS_CTL);
820                 if (status & HSFSTS_CTL_FCERR)
821                         return -EIO;
822                 else if (status & HSFSTS_CTL_AEL)
823                         return -EACCES;
824
825                 offs += erase_size;
826                 len -= erase_size;
827         }
828
829         return 0;
830 }
831
832 static bool intel_spi_is_protected(const struct intel_spi *ispi,
833                                    unsigned int base, unsigned int limit)
834 {
835         int i;
836
837         for (i = 0; i < ispi->pr_num; i++) {
838                 u32 pr_base, pr_limit, pr_value;
839
840                 pr_value = readl(ispi->pregs + PR(i));
841                 if (!(pr_value & (PR_WPE | PR_RPE)))
842                         continue;
843
844                 pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
845                 pr_base = pr_value & PR_BASE_MASK;
846
847                 if (pr_base >= base && pr_limit <= limit)
848                         return true;
849         }
850
851         return false;
852 }
853
854 /*
855  * There will be a single partition holding all enabled flash regions. We
856  * call this "BIOS".
857  */
858 static void intel_spi_fill_partition(struct intel_spi *ispi,
859                                      struct mtd_partition *part)
860 {
861         u64 end;
862         int i;
863
864         memset(part, 0, sizeof(*part));
865
866         /* Start from the mandatory descriptor region */
867         part->size = 4096;
868         part->name = "BIOS";
869
870         /*
871          * Now try to find where this partition ends based on the flash
872          * region registers.
873          */
874         for (i = 1; i < ispi->nregions; i++) {
875                 u32 region, base, limit;
876
877                 region = readl(ispi->base + FREG(i));
878                 base = region & FREG_BASE_MASK;
879                 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
880
881                 if (base >= limit || limit == 0)
882                         continue;
883
884                 /*
885                  * If any of the regions have protection bits set, make the
886                  * whole partition read-only to be on the safe side.
887                  *
888                  * Also if the user did not ask the chip to be writeable
889                  * mask the bit too.
890                  */
891                 if (!writeable || intel_spi_is_protected(ispi, base, limit))
892                         part->mask_flags |= MTD_WRITEABLE;
893
894                 end = (limit << 12) + 4096;
895                 if (end > part->size)
896                         part->size = end;
897         }
898 }
899
900 static const struct spi_nor_controller_ops intel_spi_controller_ops = {
901         .read_reg = intel_spi_read_reg,
902         .write_reg = intel_spi_write_reg,
903         .read = intel_spi_read,
904         .write = intel_spi_write,
905         .erase = intel_spi_erase,
906 };
907
908 struct intel_spi *intel_spi_probe(struct device *dev,
909         struct resource *mem, const struct intel_spi_boardinfo *info)
910 {
911         const struct spi_nor_hwcaps hwcaps = {
912                 .mask = SNOR_HWCAPS_READ |
913                         SNOR_HWCAPS_READ_FAST |
914                         SNOR_HWCAPS_PP,
915         };
916         struct mtd_partition part;
917         struct intel_spi *ispi;
918         int ret;
919
920         if (!info || !mem)
921                 return ERR_PTR(-EINVAL);
922
923         ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL);
924         if (!ispi)
925                 return ERR_PTR(-ENOMEM);
926
927         ispi->base = devm_ioremap_resource(dev, mem);
928         if (IS_ERR(ispi->base))
929                 return ERR_CAST(ispi->base);
930
931         ispi->dev = dev;
932         ispi->info = info;
933
934         ret = intel_spi_init(ispi);
935         if (ret)
936                 return ERR_PTR(ret);
937
938         ispi->nor.dev = ispi->dev;
939         ispi->nor.priv = ispi;
940         ispi->nor.controller_ops = &intel_spi_controller_ops;
941
942         ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
943         if (ret) {
944                 dev_info(dev, "failed to locate the chip\n");
945                 return ERR_PTR(ret);
946         }
947
948         intel_spi_fill_partition(ispi, &part);
949
950         ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
951         if (ret)
952                 return ERR_PTR(ret);
953
954         return ispi;
955 }
956 EXPORT_SYMBOL_GPL(intel_spi_probe);
957
958 int intel_spi_remove(struct intel_spi *ispi)
959 {
960         return mtd_device_unregister(&ispi->nor.mtd);
961 }
962 EXPORT_SYMBOL_GPL(intel_spi_remove);
963
964 MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
965 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
966 MODULE_LICENSE("GPL v2");