GNU Linux-libre 6.7.9-gnu
[releases.git] / drivers / scsi / ipr.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ipr.c -- driver for IBM Power Linux RAID adapters
4  *
5  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) 2003, 2004 IBM Corporation
8  */
9
10 /*
11  * Notes:
12  *
13  * This driver is used to control the following SCSI adapters:
14  *
15  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
16  *
17  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
19  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
20  *              Embedded SCSI adapter on p615 and p655 systems
21  *
22  * Supported Hardware Features:
23  *      - Ultra 320 SCSI controller
24  *      - PCI-X host interface
25  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26  *      - Non-Volatile Write Cache
27  *      - Supports attachment of non-RAID disks, tape, and optical devices
28  *      - RAID Levels 0, 5, 10
29  *      - Hot spare
30  *      - Background Parity Checking
31  *      - Background Data Scrubbing
32  *      - Ability to increase the capacity of an existing RAID 5 disk array
33  *              by adding disks
34  *
35  * Driver Features:
36  *      - Tagged command queuing
37  *      - Adapter microcode download
38  *      - PCI hot plug
39  *      - SCSI device hot plug
40  *
41  */
42
43 #include <linux/fs.h>
44 #include <linux/init.h>
45 #include <linux/types.h>
46 #include <linux/errno.h>
47 #include <linux/kernel.h>
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <linux/ioport.h>
51 #include <linux/delay.h>
52 #include <linux/pci.h>
53 #include <linux/wait.h>
54 #include <linux/spinlock.h>
55 #include <linux/sched.h>
56 #include <linux/interrupt.h>
57 #include <linux/blkdev.h>
58 #include <linux/firmware.h>
59 #include <linux/module.h>
60 #include <linux/moduleparam.h>
61 #include <linux/hdreg.h>
62 #include <linux/reboot.h>
63 #include <linux/stringify.h>
64 #include <asm/io.h>
65 #include <asm/irq.h>
66 #include <asm/processor.h>
67 #include <scsi/scsi.h>
68 #include <scsi/scsi_host.h>
69 #include <scsi/scsi_tcq.h>
70 #include <scsi/scsi_eh.h>
71 #include <scsi/scsi_cmnd.h>
72 #include "ipr.h"
73
74 /*
75  *   Global Data
76  */
77 static LIST_HEAD(ipr_ioa_head);
78 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
79 static unsigned int ipr_max_speed = 1;
80 static int ipr_testmode = 0;
81 static unsigned int ipr_fastfail = 0;
82 static unsigned int ipr_transop_timeout = 0;
83 static unsigned int ipr_debug = 0;
84 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
85 static unsigned int ipr_dual_ioa_raid = 1;
86 static unsigned int ipr_number_of_msix = 16;
87 static unsigned int ipr_fast_reboot;
88 static DEFINE_SPINLOCK(ipr_driver_lock);
89
90 /* This table describes the differences between DMA controller chips */
91 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
92         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
93                 .mailbox = 0x0042C,
94                 .max_cmds = 100,
95                 .cache_line_size = 0x20,
96                 .clear_isr = 1,
97                 .iopoll_weight = 0,
98                 {
99                         .set_interrupt_mask_reg = 0x0022C,
100                         .clr_interrupt_mask_reg = 0x00230,
101                         .clr_interrupt_mask_reg32 = 0x00230,
102                         .sense_interrupt_mask_reg = 0x0022C,
103                         .sense_interrupt_mask_reg32 = 0x0022C,
104                         .clr_interrupt_reg = 0x00228,
105                         .clr_interrupt_reg32 = 0x00228,
106                         .sense_interrupt_reg = 0x00224,
107                         .sense_interrupt_reg32 = 0x00224,
108                         .ioarrin_reg = 0x00404,
109                         .sense_uproc_interrupt_reg = 0x00214,
110                         .sense_uproc_interrupt_reg32 = 0x00214,
111                         .set_uproc_interrupt_reg = 0x00214,
112                         .set_uproc_interrupt_reg32 = 0x00214,
113                         .clr_uproc_interrupt_reg = 0x00218,
114                         .clr_uproc_interrupt_reg32 = 0x00218
115                 }
116         },
117         { /* Snipe and Scamp */
118                 .mailbox = 0x0052C,
119                 .max_cmds = 100,
120                 .cache_line_size = 0x20,
121                 .clear_isr = 1,
122                 .iopoll_weight = 0,
123                 {
124                         .set_interrupt_mask_reg = 0x00288,
125                         .clr_interrupt_mask_reg = 0x0028C,
126                         .clr_interrupt_mask_reg32 = 0x0028C,
127                         .sense_interrupt_mask_reg = 0x00288,
128                         .sense_interrupt_mask_reg32 = 0x00288,
129                         .clr_interrupt_reg = 0x00284,
130                         .clr_interrupt_reg32 = 0x00284,
131                         .sense_interrupt_reg = 0x00280,
132                         .sense_interrupt_reg32 = 0x00280,
133                         .ioarrin_reg = 0x00504,
134                         .sense_uproc_interrupt_reg = 0x00290,
135                         .sense_uproc_interrupt_reg32 = 0x00290,
136                         .set_uproc_interrupt_reg = 0x00290,
137                         .set_uproc_interrupt_reg32 = 0x00290,
138                         .clr_uproc_interrupt_reg = 0x00294,
139                         .clr_uproc_interrupt_reg32 = 0x00294
140                 }
141         },
142         { /* CRoC */
143                 .mailbox = 0x00044,
144                 .max_cmds = 1000,
145                 .cache_line_size = 0x20,
146                 .clear_isr = 0,
147                 .iopoll_weight = 64,
148                 {
149                         .set_interrupt_mask_reg = 0x00010,
150                         .clr_interrupt_mask_reg = 0x00018,
151                         .clr_interrupt_mask_reg32 = 0x0001C,
152                         .sense_interrupt_mask_reg = 0x00010,
153                         .sense_interrupt_mask_reg32 = 0x00014,
154                         .clr_interrupt_reg = 0x00008,
155                         .clr_interrupt_reg32 = 0x0000C,
156                         .sense_interrupt_reg = 0x00000,
157                         .sense_interrupt_reg32 = 0x00004,
158                         .ioarrin_reg = 0x00070,
159                         .sense_uproc_interrupt_reg = 0x00020,
160                         .sense_uproc_interrupt_reg32 = 0x00024,
161                         .set_uproc_interrupt_reg = 0x00020,
162                         .set_uproc_interrupt_reg32 = 0x00024,
163                         .clr_uproc_interrupt_reg = 0x00028,
164                         .clr_uproc_interrupt_reg32 = 0x0002C,
165                         .init_feedback_reg = 0x0005C,
166                         .dump_addr_reg = 0x00064,
167                         .dump_data_reg = 0x00068,
168                         .endian_swap_reg = 0x00084
169                 }
170         },
171 };
172
173 static const struct ipr_chip_t ipr_chip[] = {
174         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
175         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
176         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
177         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
180         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
181         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
182         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
183         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
184 };
185
186 static int ipr_max_bus_speeds[] = {
187         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
188 };
189
190 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
191 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
192 module_param_named(max_speed, ipr_max_speed, uint, 0);
193 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
194 module_param_named(log_level, ipr_log_level, uint, 0);
195 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
196 module_param_named(testmode, ipr_testmode, int, 0);
197 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
198 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
199 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
200 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
201 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
202 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
203 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
204 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
205 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
206 module_param_named(max_devs, ipr_max_devs, int, 0);
207 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
208                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
209 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
210 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
211 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
213 MODULE_LICENSE("GPL");
214 MODULE_VERSION(IPR_DRIVER_VERSION);
215
216 /*  A constant array of IOASCs/URCs/Error Messages */
217 static const
218 struct ipr_error_table_t ipr_error_table[] = {
219         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
220         "8155: An unknown error was received"},
221         {0x00330000, 0, 0,
222         "Soft underlength error"},
223         {0x005A0000, 0, 0,
224         "Command to be cancelled not found"},
225         {0x00808000, 0, 0,
226         "Qualified success"},
227         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
228         "FFFE: Soft device bus error recovered by the IOA"},
229         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
230         "4101: Soft device bus fabric error"},
231         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
232         "FFFC: Logical block guard error recovered by the device"},
233         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
234         "FFFC: Logical block reference tag error recovered by the device"},
235         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
236         "4171: Recovered scatter list tag / sequence number error"},
237         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
238         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
239         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
240         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
241         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
242         "FFFD: Recovered logical block reference tag error detected by the IOA"},
243         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
244         "FFFD: Logical block guard error recovered by the IOA"},
245         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
246         "FFF9: Device sector reassign successful"},
247         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
248         "FFF7: Media error recovered by device rewrite procedures"},
249         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
250         "7001: IOA sector reassignment successful"},
251         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
252         "FFF9: Soft media error. Sector reassignment recommended"},
253         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
254         "FFF7: Media error recovered by IOA rewrite procedures"},
255         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
256         "FF3D: Soft PCI bus error recovered by the IOA"},
257         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
258         "FFF6: Device hardware error recovered by the IOA"},
259         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
260         "FFF6: Device hardware error recovered by the device"},
261         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
262         "FF3D: Soft IOA error recovered by the IOA"},
263         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
264         "FFFA: Undefined device response recovered by the IOA"},
265         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
266         "FFF6: Device bus error, message or command phase"},
267         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
268         "FFFE: Task Management Function failed"},
269         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
270         "FFF6: Failure prediction threshold exceeded"},
271         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
272         "8009: Impending cache battery pack failure"},
273         {0x02040100, 0, 0,
274         "Logical Unit in process of becoming ready"},
275         {0x02040200, 0, 0,
276         "Initializing command required"},
277         {0x02040400, 0, 0,
278         "34FF: Disk device format in progress"},
279         {0x02040C00, 0, 0,
280         "Logical unit not accessible, target port in unavailable state"},
281         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
282         "9070: IOA requested reset"},
283         {0x023F0000, 0, 0,
284         "Synchronization required"},
285         {0x02408500, 0, 0,
286         "IOA microcode download required"},
287         {0x02408600, 0, 0,
288         "Device bus connection is prohibited by host"},
289         {0x024E0000, 0, 0,
290         "No ready, IOA shutdown"},
291         {0x025A0000, 0, 0,
292         "Not ready, IOA has been shutdown"},
293         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
294         "3020: Storage subsystem configuration error"},
295         {0x03110B00, 0, 0,
296         "FFF5: Medium error, data unreadable, recommend reassign"},
297         {0x03110C00, 0, 0,
298         "7000: Medium error, data unreadable, do not reassign"},
299         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
300         "FFF3: Disk media format bad"},
301         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
302         "3002: Addressed device failed to respond to selection"},
303         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
304         "3100: Device bus error"},
305         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
306         "3109: IOA timed out a device command"},
307         {0x04088000, 0, 0,
308         "3120: SCSI bus is not operational"},
309         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
310         "4100: Hard device bus fabric error"},
311         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
312         "310C: Logical block guard error detected by the device"},
313         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
314         "310C: Logical block reference tag error detected by the device"},
315         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
316         "4170: Scatter list tag / sequence number error"},
317         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
318         "8150: Logical block CRC error on IOA to Host transfer"},
319         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
320         "4170: Logical block sequence number error on IOA to Host transfer"},
321         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
322         "310D: Logical block reference tag error detected by the IOA"},
323         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
324         "310D: Logical block guard error detected by the IOA"},
325         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
326         "9000: IOA reserved area data check"},
327         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
328         "9001: IOA reserved area invalid data pattern"},
329         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
330         "9002: IOA reserved area LRC error"},
331         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
332         "Hardware Error, IOA metadata access error"},
333         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
334         "102E: Out of alternate sectors for disk storage"},
335         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
336         "FFF4: Data transfer underlength error"},
337         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
338         "FFF4: Data transfer overlength error"},
339         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
340         "3400: Logical unit failure"},
341         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
342         "FFF4: Device microcode is corrupt"},
343         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
344         "8150: PCI bus error"},
345         {0x04430000, 1, 0,
346         "Unsupported device bus message received"},
347         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
348         "FFF4: Disk device problem"},
349         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
350         "8150: Permanent IOA failure"},
351         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
352         "3010: Disk device returned wrong response to IOA"},
353         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
354         "8151: IOA microcode error"},
355         {0x04448500, 0, 0,
356         "Device bus status error"},
357         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
358         "8157: IOA error requiring IOA reset to recover"},
359         {0x04448700, 0, 0,
360         "ATA device status error"},
361         {0x04490000, 0, 0,
362         "Message reject received from the device"},
363         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
364         "8008: A permanent cache battery pack failure occurred"},
365         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
366         "9090: Disk unit has been modified after the last known status"},
367         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
368         "9081: IOA detected device error"},
369         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
370         "9082: IOA detected device error"},
371         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
372         "3110: Device bus error, message or command phase"},
373         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
374         "3110: SAS Command / Task Management Function failed"},
375         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
376         "9091: Incorrect hardware configuration change has been detected"},
377         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
378         "9073: Invalid multi-adapter configuration"},
379         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
380         "4010: Incorrect connection between cascaded expanders"},
381         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
382         "4020: Connections exceed IOA design limits"},
383         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
384         "4030: Incorrect multipath connection"},
385         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
386         "4110: Unsupported enclosure function"},
387         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
388         "4120: SAS cable VPD cannot be read"},
389         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
390         "FFF4: Command to logical unit failed"},
391         {0x05240000, 1, 0,
392         "Illegal request, invalid request type or request packet"},
393         {0x05250000, 0, 0,
394         "Illegal request, invalid resource handle"},
395         {0x05258000, 0, 0,
396         "Illegal request, commands not allowed to this device"},
397         {0x05258100, 0, 0,
398         "Illegal request, command not allowed to a secondary adapter"},
399         {0x05258200, 0, 0,
400         "Illegal request, command not allowed to a non-optimized resource"},
401         {0x05260000, 0, 0,
402         "Illegal request, invalid field in parameter list"},
403         {0x05260100, 0, 0,
404         "Illegal request, parameter not supported"},
405         {0x05260200, 0, 0,
406         "Illegal request, parameter value invalid"},
407         {0x052C0000, 0, 0,
408         "Illegal request, command sequence error"},
409         {0x052C8000, 1, 0,
410         "Illegal request, dual adapter support not enabled"},
411         {0x052C8100, 1, 0,
412         "Illegal request, another cable connector was physically disabled"},
413         {0x054E8000, 1, 0,
414         "Illegal request, inconsistent group id/group count"},
415         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
416         "9031: Array protection temporarily suspended, protection resuming"},
417         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
418         "9040: Array protection temporarily suspended, protection resuming"},
419         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
420         "4080: IOA exceeded maximum operating temperature"},
421         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
422         "4085: Service required"},
423         {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
424         "4086: SAS Adapter Hardware Configuration Error"},
425         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
426         "3140: Device bus not ready to ready transition"},
427         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
428         "FFFB: SCSI bus was reset"},
429         {0x06290500, 0, 0,
430         "FFFE: SCSI bus transition to single ended"},
431         {0x06290600, 0, 0,
432         "FFFE: SCSI bus transition to LVD"},
433         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
434         "FFFB: SCSI bus was reset by another initiator"},
435         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
436         "3029: A device replacement has occurred"},
437         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
438         "4102: Device bus fabric performance degradation"},
439         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
440         "9051: IOA cache data exists for a missing or failed device"},
441         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
442         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
443         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
444         "9025: Disk unit is not supported at its physical location"},
445         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
446         "3020: IOA detected a SCSI bus configuration error"},
447         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
448         "3150: SCSI bus configuration error"},
449         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
450         "9074: Asymmetric advanced function disk configuration"},
451         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
452         "4040: Incomplete multipath connection between IOA and enclosure"},
453         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
454         "4041: Incomplete multipath connection between enclosure and device"},
455         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
456         "9075: Incomplete multipath connection between IOA and remote IOA"},
457         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
458         "9076: Configuration error, missing remote IOA"},
459         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
460         "4050: Enclosure does not support a required multipath function"},
461         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
462         "4121: Configuration error, required cable is missing"},
463         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
464         "4122: Cable is not plugged into the correct location on remote IOA"},
465         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
466         "4123: Configuration error, invalid cable vital product data"},
467         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
468         "4124: Configuration error, both cable ends are plugged into the same IOA"},
469         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
470         "4070: Logically bad block written on device"},
471         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
472         "9041: Array protection temporarily suspended"},
473         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
474         "9042: Corrupt array parity detected on specified device"},
475         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
476         "9030: Array no longer protected due to missing or failed disk unit"},
477         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
478         "9071: Link operational transition"},
479         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
480         "9072: Link not operational transition"},
481         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
482         "9032: Array exposed but still protected"},
483         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
484         "70DD: Device forced failed by disrupt device command"},
485         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
486         "4061: Multipath redundancy level got better"},
487         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
488         "4060: Multipath redundancy level got worse"},
489         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
490         "9083: Device raw mode enabled"},
491         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
492         "9084: Device raw mode disabled"},
493         {0x07270000, 0, 0,
494         "Failure due to other device"},
495         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
496         "9008: IOA does not support functions expected by devices"},
497         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
498         "9010: Cache data associated with attached devices cannot be found"},
499         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
500         "9011: Cache data belongs to devices other than those attached"},
501         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
502         "9020: Array missing 2 or more devices with only 1 device present"},
503         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
504         "9021: Array missing 2 or more devices with 2 or more devices present"},
505         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
506         "9022: Exposed array is missing a required device"},
507         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
508         "9023: Array member(s) not at required physical locations"},
509         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
510         "9024: Array not functional due to present hardware configuration"},
511         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
512         "9026: Array not functional due to present hardware configuration"},
513         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
514         "9027: Array is missing a device and parity is out of sync"},
515         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
516         "9028: Maximum number of arrays already exist"},
517         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
518         "9050: Required cache data cannot be located for a disk unit"},
519         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
520         "9052: Cache data exists for a device that has been modified"},
521         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
522         "9054: IOA resources not available due to previous problems"},
523         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
524         "9092: Disk unit requires initialization before use"},
525         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
526         "9029: Incorrect hardware configuration change has been detected"},
527         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
528         "9060: One or more disk pairs are missing from an array"},
529         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
530         "9061: One or more disks are missing from an array"},
531         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
532         "9062: One or more disks are missing from an array"},
533         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
534         "9063: Maximum number of functional arrays has been exceeded"},
535         {0x07279A00, 0, 0,
536         "Data protect, other volume set problem"},
537         {0x0B260000, 0, 0,
538         "Aborted command, invalid descriptor"},
539         {0x0B3F9000, 0, 0,
540         "Target operating conditions have changed, dual adapter takeover"},
541         {0x0B530200, 0, 0,
542         "Aborted command, medium removal prevented"},
543         {0x0B5A0000, 0, 0,
544         "Command terminated by host"},
545         {0x0B5B8000, 0, 0,
546         "Aborted command, command terminated by host"}
547 };
548
549 static const struct ipr_ses_table_entry ipr_ses_table[] = {
550         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
551         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
552         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
553         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
554         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
555         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
556         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
557         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
558         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
559         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
560         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
561         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
562         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
563 };
564
565 /*
566  *  Function Prototypes
567  */
568 static int ipr_reset_alert(struct ipr_cmnd *);
569 static void ipr_process_ccn(struct ipr_cmnd *);
570 static void ipr_process_error(struct ipr_cmnd *);
571 static void ipr_reset_ioa_job(struct ipr_cmnd *);
572 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
573                                    enum ipr_shutdown_type);
574
575 #ifdef CONFIG_SCSI_IPR_TRACE
576 /**
577  * ipr_trc_hook - Add a trace entry to the driver trace
578  * @ipr_cmd:    ipr command struct
579  * @type:               trace type
580  * @add_data:   additional data
581  *
582  * Return value:
583  *      none
584  **/
585 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
586                          u8 type, u32 add_data)
587 {
588         struct ipr_trace_entry *trace_entry;
589         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
590         unsigned int trace_index;
591
592         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
593         trace_entry = &ioa_cfg->trace[trace_index];
594         trace_entry->time = jiffies;
595         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
596         trace_entry->type = type;
597         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
598         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
599         trace_entry->u.add_data = add_data;
600         wmb();
601 }
602 #else
603 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
604 #endif
605
606 /**
607  * ipr_lock_and_done - Acquire lock and complete command
608  * @ipr_cmd:    ipr command struct
609  *
610  * Return value:
611  *      none
612  **/
613 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
614 {
615         unsigned long lock_flags;
616         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
617
618         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
619         ipr_cmd->done(ipr_cmd);
620         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
621 }
622
623 /**
624  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
625  * @ipr_cmd:    ipr command struct
626  *
627  * Return value:
628  *      none
629  **/
630 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
631 {
632         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
633         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
634         dma_addr_t dma_addr = ipr_cmd->dma_addr;
635         int hrrq_id;
636
637         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
638         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
639         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
640         ioarcb->data_transfer_length = 0;
641         ioarcb->read_data_transfer_length = 0;
642         ioarcb->ioadl_len = 0;
643         ioarcb->read_ioadl_len = 0;
644
645         if (ipr_cmd->ioa_cfg->sis64) {
646                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
647                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
648         } else {
649                 ioarcb->write_ioadl_addr =
650                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
651                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
652         }
653
654         ioasa->hdr.ioasc = 0;
655         ioasa->hdr.residual_data_len = 0;
656         ipr_cmd->scsi_cmd = NULL;
657         ipr_cmd->sense_buffer[0] = 0;
658         ipr_cmd->dma_use_sg = 0;
659 }
660
661 /**
662  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
663  * @ipr_cmd:    ipr command struct
664  * @fast_done:  fast done function call-back
665  *
666  * Return value:
667  *      none
668  **/
669 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
670                               void (*fast_done) (struct ipr_cmnd *))
671 {
672         ipr_reinit_ipr_cmnd(ipr_cmd);
673         ipr_cmd->u.scratch = 0;
674         ipr_cmd->sibling = NULL;
675         ipr_cmd->eh_comp = NULL;
676         ipr_cmd->fast_done = fast_done;
677         timer_setup(&ipr_cmd->timer, NULL, 0);
678 }
679
680 /**
681  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
682  * @hrrq:       hrr queue
683  *
684  * Return value:
685  *      pointer to ipr command struct
686  **/
687 static
688 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
689 {
690         struct ipr_cmnd *ipr_cmd = NULL;
691
692         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
693                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
694                         struct ipr_cmnd, queue);
695                 list_del(&ipr_cmd->queue);
696         }
697
698
699         return ipr_cmd;
700 }
701
702 /**
703  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
704  * @ioa_cfg:    ioa config struct
705  *
706  * Return value:
707  *      pointer to ipr command struct
708  **/
709 static
710 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
711 {
712         struct ipr_cmnd *ipr_cmd =
713                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
714         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
715         return ipr_cmd;
716 }
717
718 /**
719  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
720  * @ioa_cfg:    ioa config struct
721  * @clr_ints:     interrupts to clear
722  *
723  * This function masks all interrupts on the adapter, then clears the
724  * interrupts specified in the mask
725  *
726  * Return value:
727  *      none
728  **/
729 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
730                                           u32 clr_ints)
731 {
732         int i;
733
734         /* Stop new interrupts */
735         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
736                 spin_lock(&ioa_cfg->hrrq[i]._lock);
737                 ioa_cfg->hrrq[i].allow_interrupts = 0;
738                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
739         }
740
741         /* Set interrupt mask to stop all new interrupts */
742         if (ioa_cfg->sis64)
743                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
744         else
745                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
746
747         /* Clear any pending interrupts */
748         if (ioa_cfg->sis64)
749                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
750         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
751         readl(ioa_cfg->regs.sense_interrupt_reg);
752 }
753
754 /**
755  * ipr_save_pcix_cmd_reg - Save PCI-X command register
756  * @ioa_cfg:    ioa config struct
757  *
758  * Return value:
759  *      0 on success / -EIO on failure
760  **/
761 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
762 {
763         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
764         int rc;
765
766         if (pcix_cmd_reg == 0)
767                 return 0;
768
769         rc = pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
770                                   &ioa_cfg->saved_pcix_cmd_reg);
771         if (rc != PCIBIOS_SUCCESSFUL) {
772                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
773                 return -EIO;
774         }
775
776         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
777         return 0;
778 }
779
780 /**
781  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
782  * @ioa_cfg:    ioa config struct
783  *
784  * Return value:
785  *      0 on success / -EIO on failure
786  **/
787 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
788 {
789         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
790         int rc;
791
792         if (pcix_cmd_reg) {
793                 rc = pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
794                                            ioa_cfg->saved_pcix_cmd_reg);
795                 if (rc != PCIBIOS_SUCCESSFUL) {
796                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
797                         return -EIO;
798                 }
799         }
800
801         return 0;
802 }
803
804
805 /**
806  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
807  * @ipr_cmd:    ipr command struct
808  *
809  * This function is invoked by the interrupt handler for
810  * ops generated by the SCSI mid-layer which are being aborted.
811  *
812  * Return value:
813  *      none
814  **/
815 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
816 {
817         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
818
819         scsi_cmd->result |= (DID_ERROR << 16);
820
821         scsi_dma_unmap(ipr_cmd->scsi_cmd);
822         scsi_done(scsi_cmd);
823         if (ipr_cmd->eh_comp)
824                 complete(ipr_cmd->eh_comp);
825         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
826 }
827
828 /**
829  * ipr_scsi_eh_done - mid-layer done function for aborted ops
830  * @ipr_cmd:    ipr command struct
831  *
832  * This function is invoked by the interrupt handler for
833  * ops generated by the SCSI mid-layer which are being aborted.
834  *
835  * Return value:
836  *      none
837  **/
838 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
839 {
840         unsigned long hrrq_flags;
841         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
842
843         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
844         __ipr_scsi_eh_done(ipr_cmd);
845         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
846 }
847
848 /**
849  * ipr_fail_all_ops - Fails all outstanding ops.
850  * @ioa_cfg:    ioa config struct
851  *
852  * This function fails all outstanding ops.
853  *
854  * Return value:
855  *      none
856  **/
857 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
858 {
859         struct ipr_cmnd *ipr_cmd, *temp;
860         struct ipr_hrr_queue *hrrq;
861
862         ENTER;
863         for_each_hrrq(hrrq, ioa_cfg) {
864                 spin_lock(&hrrq->_lock);
865                 list_for_each_entry_safe(ipr_cmd,
866                                         temp, &hrrq->hrrq_pending_q, queue) {
867                         list_del(&ipr_cmd->queue);
868
869                         ipr_cmd->s.ioasa.hdr.ioasc =
870                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
871                         ipr_cmd->s.ioasa.hdr.ilid =
872                                 cpu_to_be32(IPR_DRIVER_ILID);
873
874                         if (ipr_cmd->scsi_cmd)
875                                 ipr_cmd->done = __ipr_scsi_eh_done;
876
877                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
878                                      IPR_IOASC_IOA_WAS_RESET);
879                         del_timer(&ipr_cmd->timer);
880                         ipr_cmd->done(ipr_cmd);
881                 }
882                 spin_unlock(&hrrq->_lock);
883         }
884         LEAVE;
885 }
886
887 /**
888  * ipr_send_command -  Send driver initiated requests.
889  * @ipr_cmd:            ipr command struct
890  *
891  * This function sends a command to the adapter using the correct write call.
892  * In the case of sis64, calculate the ioarcb size required. Then or in the
893  * appropriate bits.
894  *
895  * Return value:
896  *      none
897  **/
898 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
899 {
900         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
901         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
902
903         if (ioa_cfg->sis64) {
904                 /* The default size is 256 bytes */
905                 send_dma_addr |= 0x1;
906
907                 /* If the number of ioadls * size of ioadl > 128 bytes,
908                    then use a 512 byte ioarcb */
909                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
910                         send_dma_addr |= 0x4;
911                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
912         } else
913                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
914 }
915
916 /**
917  * ipr_do_req -  Send driver initiated requests.
918  * @ipr_cmd:            ipr command struct
919  * @done:                       done function
920  * @timeout_func:       timeout function
921  * @timeout:            timeout value
922  *
923  * This function sends the specified command to the adapter with the
924  * timeout given. The done function is invoked on command completion.
925  *
926  * Return value:
927  *      none
928  **/
929 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
930                        void (*done) (struct ipr_cmnd *),
931                        void (*timeout_func) (struct timer_list *), u32 timeout)
932 {
933         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
934
935         ipr_cmd->done = done;
936
937         ipr_cmd->timer.expires = jiffies + timeout;
938         ipr_cmd->timer.function = timeout_func;
939
940         add_timer(&ipr_cmd->timer);
941
942         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
943
944         ipr_send_command(ipr_cmd);
945 }
946
947 /**
948  * ipr_internal_cmd_done - Op done function for an internally generated op.
949  * @ipr_cmd:    ipr command struct
950  *
951  * This function is the op done function for an internally generated,
952  * blocking op. It simply wakes the sleeping thread.
953  *
954  * Return value:
955  *      none
956  **/
957 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
958 {
959         if (ipr_cmd->sibling)
960                 ipr_cmd->sibling = NULL;
961         else
962                 complete(&ipr_cmd->completion);
963 }
964
965 /**
966  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
967  * @ipr_cmd:    ipr command struct
968  * @dma_addr:   dma address
969  * @len:        transfer length
970  * @flags:      ioadl flag value
971  *
972  * This function initializes an ioadl in the case where there is only a single
973  * descriptor.
974  *
975  * Return value:
976  *      nothing
977  **/
978 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
979                            u32 len, int flags)
980 {
981         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
982         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
983
984         ipr_cmd->dma_use_sg = 1;
985
986         if (ipr_cmd->ioa_cfg->sis64) {
987                 ioadl64->flags = cpu_to_be32(flags);
988                 ioadl64->data_len = cpu_to_be32(len);
989                 ioadl64->address = cpu_to_be64(dma_addr);
990
991                 ipr_cmd->ioarcb.ioadl_len =
992                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
993                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
994         } else {
995                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
996                 ioadl->address = cpu_to_be32(dma_addr);
997
998                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
999                         ipr_cmd->ioarcb.read_ioadl_len =
1000                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1001                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1002                 } else {
1003                         ipr_cmd->ioarcb.ioadl_len =
1004                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1005                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1006                 }
1007         }
1008 }
1009
1010 /**
1011  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1012  * @ipr_cmd:    ipr command struct
1013  * @timeout_func:       function to invoke if command times out
1014  * @timeout:    timeout
1015  *
1016  * Return value:
1017  *      none
1018  **/
1019 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1020                                   void (*timeout_func) (struct timer_list *),
1021                                   u32 timeout)
1022 {
1023         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1024
1025         init_completion(&ipr_cmd->completion);
1026         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1027
1028         spin_unlock_irq(ioa_cfg->host->host_lock);
1029         wait_for_completion(&ipr_cmd->completion);
1030         spin_lock_irq(ioa_cfg->host->host_lock);
1031 }
1032
1033 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1034 {
1035         unsigned int hrrq;
1036
1037         if (ioa_cfg->hrrq_num == 1)
1038                 hrrq = 0;
1039         else {
1040                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1041                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1042         }
1043         return hrrq;
1044 }
1045
1046 /**
1047  * ipr_send_hcam - Send an HCAM to the adapter.
1048  * @ioa_cfg:    ioa config struct
1049  * @type:               HCAM type
1050  * @hostrcb:    hostrcb struct
1051  *
1052  * This function will send a Host Controlled Async command to the adapter.
1053  * If HCAMs are currently not allowed to be issued to the adapter, it will
1054  * place the hostrcb on the free queue.
1055  *
1056  * Return value:
1057  *      none
1058  **/
1059 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1060                           struct ipr_hostrcb *hostrcb)
1061 {
1062         struct ipr_cmnd *ipr_cmd;
1063         struct ipr_ioarcb *ioarcb;
1064
1065         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1066                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1067                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1068                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1069
1070                 ipr_cmd->u.hostrcb = hostrcb;
1071                 ioarcb = &ipr_cmd->ioarcb;
1072
1073                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1074                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1075                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1076                 ioarcb->cmd_pkt.cdb[1] = type;
1077                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1078                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1079
1080                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1081                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1082
1083                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1084                         ipr_cmd->done = ipr_process_ccn;
1085                 else
1086                         ipr_cmd->done = ipr_process_error;
1087
1088                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1089
1090                 ipr_send_command(ipr_cmd);
1091         } else {
1092                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1093         }
1094 }
1095
1096 /**
1097  * ipr_init_res_entry - Initialize a resource entry struct.
1098  * @res:        resource entry struct
1099  * @cfgtew:     config table entry wrapper struct
1100  *
1101  * Return value:
1102  *      none
1103  **/
1104 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1105                                struct ipr_config_table_entry_wrapper *cfgtew)
1106 {
1107         int found = 0;
1108         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1109         struct ipr_resource_entry *gscsi_res = NULL;
1110
1111         res->needs_sync_complete = 0;
1112         res->in_erp = 0;
1113         res->add_to_ml = 0;
1114         res->del_from_ml = 0;
1115         res->resetting_device = 0;
1116         res->reset_occurred = 0;
1117         res->sdev = NULL;
1118
1119         if (ioa_cfg->sis64) {
1120                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1121                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1122                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1123                 res->type = cfgtew->u.cfgte64->res_type;
1124
1125                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1126                         sizeof(res->res_path));
1127
1128                 res->bus = 0;
1129                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1130                         sizeof(res->dev_lun.scsi_lun));
1131                 res->lun = scsilun_to_int(&res->dev_lun);
1132
1133                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1134                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1135                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1136                                         found = 1;
1137                                         res->target = gscsi_res->target;
1138                                         break;
1139                                 }
1140                         }
1141                         if (!found) {
1142                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1143                                                                   ioa_cfg->max_devs_supported);
1144                                 set_bit(res->target, ioa_cfg->target_ids);
1145                         }
1146                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1147                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1148                         res->target = 0;
1149                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1150                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1151                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1152                                                           ioa_cfg->max_devs_supported);
1153                         set_bit(res->target, ioa_cfg->array_ids);
1154                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1155                         res->bus = IPR_VSET_VIRTUAL_BUS;
1156                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1157                                                           ioa_cfg->max_devs_supported);
1158                         set_bit(res->target, ioa_cfg->vset_ids);
1159                 } else {
1160                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1161                                                           ioa_cfg->max_devs_supported);
1162                         set_bit(res->target, ioa_cfg->target_ids);
1163                 }
1164         } else {
1165                 res->qmodel = IPR_QUEUEING_MODEL(res);
1166                 res->flags = cfgtew->u.cfgte->flags;
1167                 if (res->flags & IPR_IS_IOA_RESOURCE)
1168                         res->type = IPR_RES_TYPE_IOAFP;
1169                 else
1170                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1171
1172                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1173                 res->target = cfgtew->u.cfgte->res_addr.target;
1174                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1175                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1176         }
1177 }
1178
1179 /**
1180  * ipr_is_same_device - Determine if two devices are the same.
1181  * @res:        resource entry struct
1182  * @cfgtew:     config table entry wrapper struct
1183  *
1184  * Return value:
1185  *      1 if the devices are the same / 0 otherwise
1186  **/
1187 static int ipr_is_same_device(struct ipr_resource_entry *res,
1188                               struct ipr_config_table_entry_wrapper *cfgtew)
1189 {
1190         if (res->ioa_cfg->sis64) {
1191                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1192                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1193                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1194                                         sizeof(cfgtew->u.cfgte64->lun))) {
1195                         return 1;
1196                 }
1197         } else {
1198                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1199                     res->target == cfgtew->u.cfgte->res_addr.target &&
1200                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1201                         return 1;
1202         }
1203
1204         return 0;
1205 }
1206
1207 /**
1208  * __ipr_format_res_path - Format the resource path for printing.
1209  * @res_path:   resource path
1210  * @buffer:     buffer
1211  * @len:        length of buffer provided
1212  *
1213  * Return value:
1214  *      pointer to buffer
1215  **/
1216 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1217 {
1218         int i;
1219         char *p = buffer;
1220
1221         *p = '\0';
1222         p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
1223         for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++)
1224                 p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
1225
1226         return buffer;
1227 }
1228
1229 /**
1230  * ipr_format_res_path - Format the resource path for printing.
1231  * @ioa_cfg:    ioa config struct
1232  * @res_path:   resource path
1233  * @buffer:     buffer
1234  * @len:        length of buffer provided
1235  *
1236  * Return value:
1237  *      pointer to buffer
1238  **/
1239 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1240                                  u8 *res_path, char *buffer, int len)
1241 {
1242         char *p = buffer;
1243
1244         *p = '\0';
1245         p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1246         __ipr_format_res_path(res_path, p, len - (p - buffer));
1247         return buffer;
1248 }
1249
1250 /**
1251  * ipr_update_res_entry - Update the resource entry.
1252  * @res:        resource entry struct
1253  * @cfgtew:     config table entry wrapper struct
1254  *
1255  * Return value:
1256  *      none
1257  **/
1258 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1259                                  struct ipr_config_table_entry_wrapper *cfgtew)
1260 {
1261         char buffer[IPR_MAX_RES_PATH_LENGTH];
1262         int new_path = 0;
1263
1264         if (res->ioa_cfg->sis64) {
1265                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1266                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1267                 res->type = cfgtew->u.cfgte64->res_type;
1268
1269                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1270                         sizeof(struct ipr_std_inq_data));
1271
1272                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1273                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1274                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1275
1276                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1277                         sizeof(res->dev_lun.scsi_lun));
1278
1279                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1280                                         sizeof(res->res_path))) {
1281                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1282                                 sizeof(res->res_path));
1283                         new_path = 1;
1284                 }
1285
1286                 if (res->sdev && new_path)
1287                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1288                                     ipr_format_res_path(res->ioa_cfg,
1289                                         res->res_path, buffer, sizeof(buffer)));
1290         } else {
1291                 res->flags = cfgtew->u.cfgte->flags;
1292                 if (res->flags & IPR_IS_IOA_RESOURCE)
1293                         res->type = IPR_RES_TYPE_IOAFP;
1294                 else
1295                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1296
1297                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1298                         sizeof(struct ipr_std_inq_data));
1299
1300                 res->qmodel = IPR_QUEUEING_MODEL(res);
1301                 res->res_handle = cfgtew->u.cfgte->res_handle;
1302         }
1303 }
1304
1305 /**
1306  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1307  *                        for the resource.
1308  * @res:        resource entry struct
1309  *
1310  * Return value:
1311  *      none
1312  **/
1313 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1314 {
1315         struct ipr_resource_entry *gscsi_res = NULL;
1316         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1317
1318         if (!ioa_cfg->sis64)
1319                 return;
1320
1321         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1322                 clear_bit(res->target, ioa_cfg->array_ids);
1323         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1324                 clear_bit(res->target, ioa_cfg->vset_ids);
1325         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1326                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1327                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1328                                 return;
1329                 clear_bit(res->target, ioa_cfg->target_ids);
1330
1331         } else if (res->bus == 0)
1332                 clear_bit(res->target, ioa_cfg->target_ids);
1333 }
1334
1335 /**
1336  * ipr_handle_config_change - Handle a config change from the adapter
1337  * @ioa_cfg:    ioa config struct
1338  * @hostrcb:    hostrcb
1339  *
1340  * Return value:
1341  *      none
1342  **/
1343 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1344                                      struct ipr_hostrcb *hostrcb)
1345 {
1346         struct ipr_resource_entry *res = NULL;
1347         struct ipr_config_table_entry_wrapper cfgtew;
1348         __be32 cc_res_handle;
1349
1350         u32 is_ndn = 1;
1351
1352         if (ioa_cfg->sis64) {
1353                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1354                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1355         } else {
1356                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1357                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1358         }
1359
1360         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1361                 if (res->res_handle == cc_res_handle) {
1362                         is_ndn = 0;
1363                         break;
1364                 }
1365         }
1366
1367         if (is_ndn) {
1368                 if (list_empty(&ioa_cfg->free_res_q)) {
1369                         ipr_send_hcam(ioa_cfg,
1370                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1371                                       hostrcb);
1372                         return;
1373                 }
1374
1375                 res = list_entry(ioa_cfg->free_res_q.next,
1376                                  struct ipr_resource_entry, queue);
1377
1378                 list_del(&res->queue);
1379                 ipr_init_res_entry(res, &cfgtew);
1380                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1381         }
1382
1383         ipr_update_res_entry(res, &cfgtew);
1384
1385         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1386                 if (res->sdev) {
1387                         res->del_from_ml = 1;
1388                         res->res_handle = IPR_INVALID_RES_HANDLE;
1389                         schedule_work(&ioa_cfg->work_q);
1390                 } else {
1391                         ipr_clear_res_target(res);
1392                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1393                 }
1394         } else if (!res->sdev || res->del_from_ml) {
1395                 res->add_to_ml = 1;
1396                 schedule_work(&ioa_cfg->work_q);
1397         }
1398
1399         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1400 }
1401
1402 /**
1403  * ipr_process_ccn - Op done function for a CCN.
1404  * @ipr_cmd:    ipr command struct
1405  *
1406  * This function is the op done function for a configuration
1407  * change notification host controlled async from the adapter.
1408  *
1409  * Return value:
1410  *      none
1411  **/
1412 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1413 {
1414         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1415         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1416         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1417
1418         list_del_init(&hostrcb->queue);
1419         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1420
1421         if (ioasc) {
1422                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1423                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1424                         dev_err(&ioa_cfg->pdev->dev,
1425                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1426
1427                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1428         } else {
1429                 ipr_handle_config_change(ioa_cfg, hostrcb);
1430         }
1431 }
1432
1433 /**
1434  * strip_whitespace - Strip and pad trailing whitespace.
1435  * @i:          size of buffer
1436  * @buf:        string to modify
1437  *
1438  * This function will strip all trailing whitespace and
1439  * NUL terminate the string.
1440  *
1441  **/
1442 static void strip_whitespace(int i, char *buf)
1443 {
1444         if (i < 1)
1445                 return;
1446         i--;
1447         while (i && buf[i] == ' ')
1448                 i--;
1449         buf[i+1] = '\0';
1450 }
1451
1452 /**
1453  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1454  * @prefix:             string to print at start of printk
1455  * @hostrcb:    hostrcb pointer
1456  * @vpd:                vendor/product id/sn struct
1457  *
1458  * Return value:
1459  *      none
1460  **/
1461 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1462                                 struct ipr_vpd *vpd)
1463 {
1464         char vendor_id[IPR_VENDOR_ID_LEN + 1];
1465         char product_id[IPR_PROD_ID_LEN + 1];
1466         char sn[IPR_SERIAL_NUM_LEN + 1];
1467
1468         memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1469         strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
1470
1471         memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
1472         strip_whitespace(IPR_PROD_ID_LEN, product_id);
1473
1474         memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
1475         strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
1476
1477         ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
1478                      vendor_id, product_id, sn);
1479 }
1480
1481 /**
1482  * ipr_log_vpd - Log the passed VPD to the error log.
1483  * @vpd:                vendor/product id/sn struct
1484  *
1485  * Return value:
1486  *      none
1487  **/
1488 static void ipr_log_vpd(struct ipr_vpd *vpd)
1489 {
1490         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1491                     + IPR_SERIAL_NUM_LEN];
1492
1493         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1494         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1495                IPR_PROD_ID_LEN);
1496         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1497         ipr_err("Vendor/Product ID: %s\n", buffer);
1498
1499         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1500         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1501         ipr_err("    Serial Number: %s\n", buffer);
1502 }
1503
1504 /**
1505  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1506  * @prefix:             string to print at start of printk
1507  * @hostrcb:    hostrcb pointer
1508  * @vpd:                vendor/product id/sn/wwn struct
1509  *
1510  * Return value:
1511  *      none
1512  **/
1513 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1514                                     struct ipr_ext_vpd *vpd)
1515 {
1516         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1517         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1518                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1519 }
1520
1521 /**
1522  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1523  * @vpd:                vendor/product id/sn/wwn struct
1524  *
1525  * Return value:
1526  *      none
1527  **/
1528 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1529 {
1530         ipr_log_vpd(&vpd->vpd);
1531         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1532                 be32_to_cpu(vpd->wwid[1]));
1533 }
1534
1535 /**
1536  * ipr_log_enhanced_cache_error - Log a cache error.
1537  * @ioa_cfg:    ioa config struct
1538  * @hostrcb:    hostrcb struct
1539  *
1540  * Return value:
1541  *      none
1542  **/
1543 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1544                                          struct ipr_hostrcb *hostrcb)
1545 {
1546         struct ipr_hostrcb_type_12_error *error;
1547
1548         if (ioa_cfg->sis64)
1549                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1550         else
1551                 error = &hostrcb->hcam.u.error.u.type_12_error;
1552
1553         ipr_err("-----Current Configuration-----\n");
1554         ipr_err("Cache Directory Card Information:\n");
1555         ipr_log_ext_vpd(&error->ioa_vpd);
1556         ipr_err("Adapter Card Information:\n");
1557         ipr_log_ext_vpd(&error->cfc_vpd);
1558
1559         ipr_err("-----Expected Configuration-----\n");
1560         ipr_err("Cache Directory Card Information:\n");
1561         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1562         ipr_err("Adapter Card Information:\n");
1563         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1564
1565         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1566                      be32_to_cpu(error->ioa_data[0]),
1567                      be32_to_cpu(error->ioa_data[1]),
1568                      be32_to_cpu(error->ioa_data[2]));
1569 }
1570
1571 /**
1572  * ipr_log_cache_error - Log a cache error.
1573  * @ioa_cfg:    ioa config struct
1574  * @hostrcb:    hostrcb struct
1575  *
1576  * Return value:
1577  *      none
1578  **/
1579 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1580                                 struct ipr_hostrcb *hostrcb)
1581 {
1582         struct ipr_hostrcb_type_02_error *error =
1583                 &hostrcb->hcam.u.error.u.type_02_error;
1584
1585         ipr_err("-----Current Configuration-----\n");
1586         ipr_err("Cache Directory Card Information:\n");
1587         ipr_log_vpd(&error->ioa_vpd);
1588         ipr_err("Adapter Card Information:\n");
1589         ipr_log_vpd(&error->cfc_vpd);
1590
1591         ipr_err("-----Expected Configuration-----\n");
1592         ipr_err("Cache Directory Card Information:\n");
1593         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1594         ipr_err("Adapter Card Information:\n");
1595         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1596
1597         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1598                      be32_to_cpu(error->ioa_data[0]),
1599                      be32_to_cpu(error->ioa_data[1]),
1600                      be32_to_cpu(error->ioa_data[2]));
1601 }
1602
1603 /**
1604  * ipr_log_enhanced_config_error - Log a configuration error.
1605  * @ioa_cfg:    ioa config struct
1606  * @hostrcb:    hostrcb struct
1607  *
1608  * Return value:
1609  *      none
1610  **/
1611 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1612                                           struct ipr_hostrcb *hostrcb)
1613 {
1614         int errors_logged, i;
1615         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1616         struct ipr_hostrcb_type_13_error *error;
1617
1618         error = &hostrcb->hcam.u.error.u.type_13_error;
1619         errors_logged = be32_to_cpu(error->errors_logged);
1620
1621         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1622                 be32_to_cpu(error->errors_detected), errors_logged);
1623
1624         dev_entry = error->dev;
1625
1626         for (i = 0; i < errors_logged; i++, dev_entry++) {
1627                 ipr_err_separator;
1628
1629                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1630                 ipr_log_ext_vpd(&dev_entry->vpd);
1631
1632                 ipr_err("-----New Device Information-----\n");
1633                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1634
1635                 ipr_err("Cache Directory Card Information:\n");
1636                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1637
1638                 ipr_err("Adapter Card Information:\n");
1639                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1640         }
1641 }
1642
1643 /**
1644  * ipr_log_sis64_config_error - Log a device error.
1645  * @ioa_cfg:    ioa config struct
1646  * @hostrcb:    hostrcb struct
1647  *
1648  * Return value:
1649  *      none
1650  **/
1651 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1652                                        struct ipr_hostrcb *hostrcb)
1653 {
1654         int errors_logged, i;
1655         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1656         struct ipr_hostrcb_type_23_error *error;
1657         char buffer[IPR_MAX_RES_PATH_LENGTH];
1658
1659         error = &hostrcb->hcam.u.error64.u.type_23_error;
1660         errors_logged = be32_to_cpu(error->errors_logged);
1661
1662         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1663                 be32_to_cpu(error->errors_detected), errors_logged);
1664
1665         dev_entry = error->dev;
1666
1667         for (i = 0; i < errors_logged; i++, dev_entry++) {
1668                 ipr_err_separator;
1669
1670                 ipr_err("Device %d : %s", i + 1,
1671                         __ipr_format_res_path(dev_entry->res_path,
1672                                               buffer, sizeof(buffer)));
1673                 ipr_log_ext_vpd(&dev_entry->vpd);
1674
1675                 ipr_err("-----New Device Information-----\n");
1676                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1677
1678                 ipr_err("Cache Directory Card Information:\n");
1679                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1680
1681                 ipr_err("Adapter Card Information:\n");
1682                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1683         }
1684 }
1685
1686 /**
1687  * ipr_log_config_error - Log a configuration error.
1688  * @ioa_cfg:    ioa config struct
1689  * @hostrcb:    hostrcb struct
1690  *
1691  * Return value:
1692  *      none
1693  **/
1694 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1695                                  struct ipr_hostrcb *hostrcb)
1696 {
1697         int errors_logged, i;
1698         struct ipr_hostrcb_device_data_entry *dev_entry;
1699         struct ipr_hostrcb_type_03_error *error;
1700
1701         error = &hostrcb->hcam.u.error.u.type_03_error;
1702         errors_logged = be32_to_cpu(error->errors_logged);
1703
1704         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1705                 be32_to_cpu(error->errors_detected), errors_logged);
1706
1707         dev_entry = error->dev;
1708
1709         for (i = 0; i < errors_logged; i++, dev_entry++) {
1710                 ipr_err_separator;
1711
1712                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1713                 ipr_log_vpd(&dev_entry->vpd);
1714
1715                 ipr_err("-----New Device Information-----\n");
1716                 ipr_log_vpd(&dev_entry->new_vpd);
1717
1718                 ipr_err("Cache Directory Card Information:\n");
1719                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1720
1721                 ipr_err("Adapter Card Information:\n");
1722                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1723
1724                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1725                         be32_to_cpu(dev_entry->ioa_data[0]),
1726                         be32_to_cpu(dev_entry->ioa_data[1]),
1727                         be32_to_cpu(dev_entry->ioa_data[2]),
1728                         be32_to_cpu(dev_entry->ioa_data[3]),
1729                         be32_to_cpu(dev_entry->ioa_data[4]));
1730         }
1731 }
1732
1733 /**
1734  * ipr_log_enhanced_array_error - Log an array configuration error.
1735  * @ioa_cfg:    ioa config struct
1736  * @hostrcb:    hostrcb struct
1737  *
1738  * Return value:
1739  *      none
1740  **/
1741 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1742                                          struct ipr_hostrcb *hostrcb)
1743 {
1744         int i, num_entries;
1745         struct ipr_hostrcb_type_14_error *error;
1746         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1747         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1748
1749         error = &hostrcb->hcam.u.error.u.type_14_error;
1750
1751         ipr_err_separator;
1752
1753         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1754                 error->protection_level,
1755                 ioa_cfg->host->host_no,
1756                 error->last_func_vset_res_addr.bus,
1757                 error->last_func_vset_res_addr.target,
1758                 error->last_func_vset_res_addr.lun);
1759
1760         ipr_err_separator;
1761
1762         array_entry = error->array_member;
1763         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1764                             ARRAY_SIZE(error->array_member));
1765
1766         for (i = 0; i < num_entries; i++, array_entry++) {
1767                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1768                         continue;
1769
1770                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1771                         ipr_err("Exposed Array Member %d:\n", i);
1772                 else
1773                         ipr_err("Array Member %d:\n", i);
1774
1775                 ipr_log_ext_vpd(&array_entry->vpd);
1776                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1777                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1778                                  "Expected Location");
1779
1780                 ipr_err_separator;
1781         }
1782 }
1783
1784 /**
1785  * ipr_log_array_error - Log an array configuration error.
1786  * @ioa_cfg:    ioa config struct
1787  * @hostrcb:    hostrcb struct
1788  *
1789  * Return value:
1790  *      none
1791  **/
1792 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1793                                 struct ipr_hostrcb *hostrcb)
1794 {
1795         int i;
1796         struct ipr_hostrcb_type_04_error *error;
1797         struct ipr_hostrcb_array_data_entry *array_entry;
1798         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1799
1800         error = &hostrcb->hcam.u.error.u.type_04_error;
1801
1802         ipr_err_separator;
1803
1804         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1805                 error->protection_level,
1806                 ioa_cfg->host->host_no,
1807                 error->last_func_vset_res_addr.bus,
1808                 error->last_func_vset_res_addr.target,
1809                 error->last_func_vset_res_addr.lun);
1810
1811         ipr_err_separator;
1812
1813         array_entry = error->array_member;
1814
1815         for (i = 0; i < 18; i++) {
1816                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1817                         continue;
1818
1819                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1820                         ipr_err("Exposed Array Member %d:\n", i);
1821                 else
1822                         ipr_err("Array Member %d:\n", i);
1823
1824                 ipr_log_vpd(&array_entry->vpd);
1825
1826                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1827                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1828                                  "Expected Location");
1829
1830                 ipr_err_separator;
1831
1832                 if (i == 9)
1833                         array_entry = error->array_member2;
1834                 else
1835                         array_entry++;
1836         }
1837 }
1838
1839 /**
1840  * ipr_log_hex_data - Log additional hex IOA error data.
1841  * @ioa_cfg:    ioa config struct
1842  * @data:               IOA error data
1843  * @len:                data length
1844  *
1845  * Return value:
1846  *      none
1847  **/
1848 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1849 {
1850         int i;
1851
1852         if (len == 0)
1853                 return;
1854
1855         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1856                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1857
1858         for (i = 0; i < len / 4; i += 4) {
1859                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1860                         be32_to_cpu(data[i]),
1861                         be32_to_cpu(data[i+1]),
1862                         be32_to_cpu(data[i+2]),
1863                         be32_to_cpu(data[i+3]));
1864         }
1865 }
1866
1867 /**
1868  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1869  * @ioa_cfg:    ioa config struct
1870  * @hostrcb:    hostrcb struct
1871  *
1872  * Return value:
1873  *      none
1874  **/
1875 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1876                                             struct ipr_hostrcb *hostrcb)
1877 {
1878         struct ipr_hostrcb_type_17_error *error;
1879
1880         if (ioa_cfg->sis64)
1881                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1882         else
1883                 error = &hostrcb->hcam.u.error.u.type_17_error;
1884
1885         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1886         strim(error->failure_reason);
1887
1888         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1889                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1890         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1891         ipr_log_hex_data(ioa_cfg, error->data,
1892                          be32_to_cpu(hostrcb->hcam.length) -
1893                          (offsetof(struct ipr_hostrcb_error, u) +
1894                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1895 }
1896
1897 /**
1898  * ipr_log_dual_ioa_error - Log a dual adapter error.
1899  * @ioa_cfg:    ioa config struct
1900  * @hostrcb:    hostrcb struct
1901  *
1902  * Return value:
1903  *      none
1904  **/
1905 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1906                                    struct ipr_hostrcb *hostrcb)
1907 {
1908         struct ipr_hostrcb_type_07_error *error;
1909
1910         error = &hostrcb->hcam.u.error.u.type_07_error;
1911         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1912         strim(error->failure_reason);
1913
1914         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1915                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1916         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1917         ipr_log_hex_data(ioa_cfg, error->data,
1918                          be32_to_cpu(hostrcb->hcam.length) -
1919                          (offsetof(struct ipr_hostrcb_error, u) +
1920                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1921 }
1922
1923 static const struct {
1924         u8 active;
1925         char *desc;
1926 } path_active_desc[] = {
1927         { IPR_PATH_NO_INFO, "Path" },
1928         { IPR_PATH_ACTIVE, "Active path" },
1929         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1930 };
1931
1932 static const struct {
1933         u8 state;
1934         char *desc;
1935 } path_state_desc[] = {
1936         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1937         { IPR_PATH_HEALTHY, "is healthy" },
1938         { IPR_PATH_DEGRADED, "is degraded" },
1939         { IPR_PATH_FAILED, "is failed" }
1940 };
1941
1942 /**
1943  * ipr_log_fabric_path - Log a fabric path error
1944  * @hostrcb:    hostrcb struct
1945  * @fabric:             fabric descriptor
1946  *
1947  * Return value:
1948  *      none
1949  **/
1950 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1951                                 struct ipr_hostrcb_fabric_desc *fabric)
1952 {
1953         int i, j;
1954         u8 path_state = fabric->path_state;
1955         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1956         u8 state = path_state & IPR_PATH_STATE_MASK;
1957
1958         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1959                 if (path_active_desc[i].active != active)
1960                         continue;
1961
1962                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1963                         if (path_state_desc[j].state != state)
1964                                 continue;
1965
1966                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1967                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1968                                              path_active_desc[i].desc, path_state_desc[j].desc,
1969                                              fabric->ioa_port);
1970                         } else if (fabric->cascaded_expander == 0xff) {
1971                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1972                                              path_active_desc[i].desc, path_state_desc[j].desc,
1973                                              fabric->ioa_port, fabric->phy);
1974                         } else if (fabric->phy == 0xff) {
1975                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1976                                              path_active_desc[i].desc, path_state_desc[j].desc,
1977                                              fabric->ioa_port, fabric->cascaded_expander);
1978                         } else {
1979                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1980                                              path_active_desc[i].desc, path_state_desc[j].desc,
1981                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1982                         }
1983                         return;
1984                 }
1985         }
1986
1987         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1988                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1989 }
1990
1991 /**
1992  * ipr_log64_fabric_path - Log a fabric path error
1993  * @hostrcb:    hostrcb struct
1994  * @fabric:             fabric descriptor
1995  *
1996  * Return value:
1997  *      none
1998  **/
1999 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2000                                   struct ipr_hostrcb64_fabric_desc *fabric)
2001 {
2002         int i, j;
2003         u8 path_state = fabric->path_state;
2004         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2005         u8 state = path_state & IPR_PATH_STATE_MASK;
2006         char buffer[IPR_MAX_RES_PATH_LENGTH];
2007
2008         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2009                 if (path_active_desc[i].active != active)
2010                         continue;
2011
2012                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2013                         if (path_state_desc[j].state != state)
2014                                 continue;
2015
2016                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2017                                      path_active_desc[i].desc, path_state_desc[j].desc,
2018                                      ipr_format_res_path(hostrcb->ioa_cfg,
2019                                                 fabric->res_path,
2020                                                 buffer, sizeof(buffer)));
2021                         return;
2022                 }
2023         }
2024
2025         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2026                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2027                                     buffer, sizeof(buffer)));
2028 }
2029
2030 static const struct {
2031         u8 type;
2032         char *desc;
2033 } path_type_desc[] = {
2034         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2035         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2036         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2037         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2038 };
2039
2040 static const struct {
2041         u8 status;
2042         char *desc;
2043 } path_status_desc[] = {
2044         { IPR_PATH_CFG_NO_PROB, "Functional" },
2045         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2046         { IPR_PATH_CFG_FAILED, "Failed" },
2047         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2048         { IPR_PATH_NOT_DETECTED, "Missing" },
2049         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2050 };
2051
2052 static const char *link_rate[] = {
2053         "unknown",
2054         "disabled",
2055         "phy reset problem",
2056         "spinup hold",
2057         "port selector",
2058         "unknown",
2059         "unknown",
2060         "unknown",
2061         "1.5Gbps",
2062         "3.0Gbps",
2063         "unknown",
2064         "unknown",
2065         "unknown",
2066         "unknown",
2067         "unknown",
2068         "unknown"
2069 };
2070
2071 /**
2072  * ipr_log_path_elem - Log a fabric path element.
2073  * @hostrcb:    hostrcb struct
2074  * @cfg:                fabric path element struct
2075  *
2076  * Return value:
2077  *      none
2078  **/
2079 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2080                               struct ipr_hostrcb_config_element *cfg)
2081 {
2082         int i, j;
2083         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2084         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2085
2086         if (type == IPR_PATH_CFG_NOT_EXIST)
2087                 return;
2088
2089         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2090                 if (path_type_desc[i].type != type)
2091                         continue;
2092
2093                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2094                         if (path_status_desc[j].status != status)
2095                                 continue;
2096
2097                         if (type == IPR_PATH_CFG_IOA_PORT) {
2098                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2099                                              path_status_desc[j].desc, path_type_desc[i].desc,
2100                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2101                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2102                         } else {
2103                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2104                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2105                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2106                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2107                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2108                                 } else if (cfg->cascaded_expander == 0xff) {
2109                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2110                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2111                                                      path_type_desc[i].desc, cfg->phy,
2112                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2113                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2114                                 } else if (cfg->phy == 0xff) {
2115                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2116                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2117                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2118                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2119                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2120                                 } else {
2121                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2122                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2123                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2124                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2125                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2126                                 }
2127                         }
2128                         return;
2129                 }
2130         }
2131
2132         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2133                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2134                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2135                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2136 }
2137
2138 /**
2139  * ipr_log64_path_elem - Log a fabric path element.
2140  * @hostrcb:    hostrcb struct
2141  * @cfg:                fabric path element struct
2142  *
2143  * Return value:
2144  *      none
2145  **/
2146 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2147                                 struct ipr_hostrcb64_config_element *cfg)
2148 {
2149         int i, j;
2150         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2151         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2152         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2153         char buffer[IPR_MAX_RES_PATH_LENGTH];
2154
2155         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2156                 return;
2157
2158         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2159                 if (path_type_desc[i].type != type)
2160                         continue;
2161
2162                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2163                         if (path_status_desc[j].status != status)
2164                                 continue;
2165
2166                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2167                                      path_status_desc[j].desc, path_type_desc[i].desc,
2168                                      ipr_format_res_path(hostrcb->ioa_cfg,
2169                                         cfg->res_path, buffer, sizeof(buffer)),
2170                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2171                                         be32_to_cpu(cfg->wwid[0]),
2172                                         be32_to_cpu(cfg->wwid[1]));
2173                         return;
2174                 }
2175         }
2176         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2177                      "WWN=%08X%08X\n", cfg->type_status,
2178                      ipr_format_res_path(hostrcb->ioa_cfg,
2179                         cfg->res_path, buffer, sizeof(buffer)),
2180                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2181                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2182 }
2183
2184 /**
2185  * ipr_log_fabric_error - Log a fabric error.
2186  * @ioa_cfg:    ioa config struct
2187  * @hostrcb:    hostrcb struct
2188  *
2189  * Return value:
2190  *      none
2191  **/
2192 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2193                                  struct ipr_hostrcb *hostrcb)
2194 {
2195         struct ipr_hostrcb_type_20_error *error;
2196         struct ipr_hostrcb_fabric_desc *fabric;
2197         struct ipr_hostrcb_config_element *cfg;
2198         int i, add_len;
2199
2200         error = &hostrcb->hcam.u.error.u.type_20_error;
2201         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2202         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2203
2204         add_len = be32_to_cpu(hostrcb->hcam.length) -
2205                 (offsetof(struct ipr_hostrcb_error, u) +
2206                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2207
2208         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2209                 ipr_log_fabric_path(hostrcb, fabric);
2210                 for_each_fabric_cfg(fabric, cfg)
2211                         ipr_log_path_elem(hostrcb, cfg);
2212
2213                 add_len -= be16_to_cpu(fabric->length);
2214                 fabric = (struct ipr_hostrcb_fabric_desc *)
2215                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2216         }
2217
2218         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2219 }
2220
2221 /**
2222  * ipr_log_sis64_array_error - Log a sis64 array error.
2223  * @ioa_cfg:    ioa config struct
2224  * @hostrcb:    hostrcb struct
2225  *
2226  * Return value:
2227  *      none
2228  **/
2229 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2230                                       struct ipr_hostrcb *hostrcb)
2231 {
2232         int i, num_entries;
2233         struct ipr_hostrcb_type_24_error *error;
2234         struct ipr_hostrcb64_array_data_entry *array_entry;
2235         char buffer[IPR_MAX_RES_PATH_LENGTH];
2236         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2237
2238         error = &hostrcb->hcam.u.error64.u.type_24_error;
2239
2240         ipr_err_separator;
2241
2242         ipr_err("RAID %s Array Configuration: %s\n",
2243                 error->protection_level,
2244                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2245                         buffer, sizeof(buffer)));
2246
2247         ipr_err_separator;
2248
2249         array_entry = error->array_member;
2250         num_entries = min_t(u32, error->num_entries,
2251                             ARRAY_SIZE(error->array_member));
2252
2253         for (i = 0; i < num_entries; i++, array_entry++) {
2254
2255                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2256                         continue;
2257
2258                 if (error->exposed_mode_adn == i)
2259                         ipr_err("Exposed Array Member %d:\n", i);
2260                 else
2261                         ipr_err("Array Member %d:\n", i);
2262
2263                 ipr_err("Array Member %d:\n", i);
2264                 ipr_log_ext_vpd(&array_entry->vpd);
2265                 ipr_err("Current Location: %s\n",
2266                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2267                                 buffer, sizeof(buffer)));
2268                 ipr_err("Expected Location: %s\n",
2269                          ipr_format_res_path(ioa_cfg,
2270                                 array_entry->expected_res_path,
2271                                 buffer, sizeof(buffer)));
2272
2273                 ipr_err_separator;
2274         }
2275 }
2276
2277 /**
2278  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2279  * @ioa_cfg:    ioa config struct
2280  * @hostrcb:    hostrcb struct
2281  *
2282  * Return value:
2283  *      none
2284  **/
2285 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2286                                        struct ipr_hostrcb *hostrcb)
2287 {
2288         struct ipr_hostrcb_type_30_error *error;
2289         struct ipr_hostrcb64_fabric_desc *fabric;
2290         struct ipr_hostrcb64_config_element *cfg;
2291         int i, add_len;
2292
2293         error = &hostrcb->hcam.u.error64.u.type_30_error;
2294
2295         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2296         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2297
2298         add_len = be32_to_cpu(hostrcb->hcam.length) -
2299                 (offsetof(struct ipr_hostrcb64_error, u) +
2300                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2301
2302         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2303                 ipr_log64_fabric_path(hostrcb, fabric);
2304                 for_each_fabric_cfg(fabric, cfg)
2305                         ipr_log64_path_elem(hostrcb, cfg);
2306
2307                 add_len -= be16_to_cpu(fabric->length);
2308                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2309                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2310         }
2311
2312         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2313 }
2314
2315 /**
2316  * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2317  * @ioa_cfg:    ioa config struct
2318  * @hostrcb:    hostrcb struct
2319  *
2320  * Return value:
2321  *      none
2322  **/
2323 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2324                                        struct ipr_hostrcb *hostrcb)
2325 {
2326         struct ipr_hostrcb_type_41_error *error;
2327
2328         error = &hostrcb->hcam.u.error64.u.type_41_error;
2329
2330         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2331         ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2332         ipr_log_hex_data(ioa_cfg, error->data,
2333                          be32_to_cpu(hostrcb->hcam.length) -
2334                          (offsetof(struct ipr_hostrcb_error, u) +
2335                           offsetof(struct ipr_hostrcb_type_41_error, data)));
2336 }
2337 /**
2338  * ipr_log_generic_error - Log an adapter error.
2339  * @ioa_cfg:    ioa config struct
2340  * @hostrcb:    hostrcb struct
2341  *
2342  * Return value:
2343  *      none
2344  **/
2345 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2346                                   struct ipr_hostrcb *hostrcb)
2347 {
2348         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2349                          be32_to_cpu(hostrcb->hcam.length));
2350 }
2351
2352 /**
2353  * ipr_log_sis64_device_error - Log a cache error.
2354  * @ioa_cfg:    ioa config struct
2355  * @hostrcb:    hostrcb struct
2356  *
2357  * Return value:
2358  *      none
2359  **/
2360 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2361                                          struct ipr_hostrcb *hostrcb)
2362 {
2363         struct ipr_hostrcb_type_21_error *error;
2364         char buffer[IPR_MAX_RES_PATH_LENGTH];
2365
2366         error = &hostrcb->hcam.u.error64.u.type_21_error;
2367
2368         ipr_err("-----Failing Device Information-----\n");
2369         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2370                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2371                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2372         ipr_err("Device Resource Path: %s\n",
2373                 __ipr_format_res_path(error->res_path,
2374                                       buffer, sizeof(buffer)));
2375         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2376         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2377         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2378         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2379         ipr_err("SCSI Sense Data:\n");
2380         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2381         ipr_err("SCSI Command Descriptor Block: \n");
2382         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2383
2384         ipr_err("Additional IOA Data:\n");
2385         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2386 }
2387
2388 /**
2389  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2390  * @ioasc:      IOASC
2391  *
2392  * This function will return the index of into the ipr_error_table
2393  * for the specified IOASC. If the IOASC is not in the table,
2394  * 0 will be returned, which points to the entry used for unknown errors.
2395  *
2396  * Return value:
2397  *      index into the ipr_error_table
2398  **/
2399 static u32 ipr_get_error(u32 ioasc)
2400 {
2401         int i;
2402
2403         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2404                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2405                         return i;
2406
2407         return 0;
2408 }
2409
2410 /**
2411  * ipr_handle_log_data - Log an adapter error.
2412  * @ioa_cfg:    ioa config struct
2413  * @hostrcb:    hostrcb struct
2414  *
2415  * This function logs an adapter error to the system.
2416  *
2417  * Return value:
2418  *      none
2419  **/
2420 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2421                                 struct ipr_hostrcb *hostrcb)
2422 {
2423         u32 ioasc;
2424         int error_index;
2425         struct ipr_hostrcb_type_21_error *error;
2426
2427         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2428                 return;
2429
2430         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2431                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2432
2433         if (ioa_cfg->sis64)
2434                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2435         else
2436                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2437
2438         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2439             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2440                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2441                 scsi_report_bus_reset(ioa_cfg->host,
2442                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2443         }
2444
2445         error_index = ipr_get_error(ioasc);
2446
2447         if (!ipr_error_table[error_index].log_hcam)
2448                 return;
2449
2450         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2451             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2452                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2453
2454                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2455                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2456                                 return;
2457         }
2458
2459         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2460
2461         /* Set indication we have logged an error */
2462         ioa_cfg->errors_logged++;
2463
2464         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2465                 return;
2466         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2467                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2468
2469         switch (hostrcb->hcam.overlay_id) {
2470         case IPR_HOST_RCB_OVERLAY_ID_2:
2471                 ipr_log_cache_error(ioa_cfg, hostrcb);
2472                 break;
2473         case IPR_HOST_RCB_OVERLAY_ID_3:
2474                 ipr_log_config_error(ioa_cfg, hostrcb);
2475                 break;
2476         case IPR_HOST_RCB_OVERLAY_ID_4:
2477         case IPR_HOST_RCB_OVERLAY_ID_6:
2478                 ipr_log_array_error(ioa_cfg, hostrcb);
2479                 break;
2480         case IPR_HOST_RCB_OVERLAY_ID_7:
2481                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2482                 break;
2483         case IPR_HOST_RCB_OVERLAY_ID_12:
2484                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2485                 break;
2486         case IPR_HOST_RCB_OVERLAY_ID_13:
2487                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2488                 break;
2489         case IPR_HOST_RCB_OVERLAY_ID_14:
2490         case IPR_HOST_RCB_OVERLAY_ID_16:
2491                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2492                 break;
2493         case IPR_HOST_RCB_OVERLAY_ID_17:
2494                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2495                 break;
2496         case IPR_HOST_RCB_OVERLAY_ID_20:
2497                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2498                 break;
2499         case IPR_HOST_RCB_OVERLAY_ID_21:
2500                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2501                 break;
2502         case IPR_HOST_RCB_OVERLAY_ID_23:
2503                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2504                 break;
2505         case IPR_HOST_RCB_OVERLAY_ID_24:
2506         case IPR_HOST_RCB_OVERLAY_ID_26:
2507                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2508                 break;
2509         case IPR_HOST_RCB_OVERLAY_ID_30:
2510                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2511                 break;
2512         case IPR_HOST_RCB_OVERLAY_ID_41:
2513                 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2514                 break;
2515         case IPR_HOST_RCB_OVERLAY_ID_1:
2516         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2517         default:
2518                 ipr_log_generic_error(ioa_cfg, hostrcb);
2519                 break;
2520         }
2521 }
2522
2523 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2524 {
2525         struct ipr_hostrcb *hostrcb;
2526
2527         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2528                                         struct ipr_hostrcb, queue);
2529
2530         if (unlikely(!hostrcb)) {
2531                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2532                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2533                                                 struct ipr_hostrcb, queue);
2534         }
2535
2536         list_del_init(&hostrcb->queue);
2537         return hostrcb;
2538 }
2539
2540 /**
2541  * ipr_process_error - Op done function for an adapter error log.
2542  * @ipr_cmd:    ipr command struct
2543  *
2544  * This function is the op done function for an error log host
2545  * controlled async from the adapter. It will log the error and
2546  * send the HCAM back to the adapter.
2547  *
2548  * Return value:
2549  *      none
2550  **/
2551 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2552 {
2553         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2554         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2555         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2556         u32 fd_ioasc;
2557
2558         if (ioa_cfg->sis64)
2559                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2560         else
2561                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2562
2563         list_del_init(&hostrcb->queue);
2564         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2565
2566         if (!ioasc) {
2567                 ipr_handle_log_data(ioa_cfg, hostrcb);
2568                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2569                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2570         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2571                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2572                 dev_err(&ioa_cfg->pdev->dev,
2573                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2574         }
2575
2576         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2577         schedule_work(&ioa_cfg->work_q);
2578         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2579
2580         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2581 }
2582
2583 /**
2584  * ipr_timeout -  An internally generated op has timed out.
2585  * @t: Timer context used to fetch ipr command struct
2586  *
2587  * This function blocks host requests and initiates an
2588  * adapter reset.
2589  *
2590  * Return value:
2591  *      none
2592  **/
2593 static void ipr_timeout(struct timer_list *t)
2594 {
2595         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2596         unsigned long lock_flags = 0;
2597         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2598
2599         ENTER;
2600         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2601
2602         ioa_cfg->errors_logged++;
2603         dev_err(&ioa_cfg->pdev->dev,
2604                 "Adapter being reset due to command timeout.\n");
2605
2606         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2607                 ioa_cfg->sdt_state = GET_DUMP;
2608
2609         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2610                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2611
2612         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2613         LEAVE;
2614 }
2615
2616 /**
2617  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2618  * @t: Timer context used to fetch ipr command struct
2619  *
2620  * This function blocks host requests and initiates an
2621  * adapter reset.
2622  *
2623  * Return value:
2624  *      none
2625  **/
2626 static void ipr_oper_timeout(struct timer_list *t)
2627 {
2628         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2629         unsigned long lock_flags = 0;
2630         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2631
2632         ENTER;
2633         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2634
2635         ioa_cfg->errors_logged++;
2636         dev_err(&ioa_cfg->pdev->dev,
2637                 "Adapter timed out transitioning to operational.\n");
2638
2639         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2640                 ioa_cfg->sdt_state = GET_DUMP;
2641
2642         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2643                 if (ipr_fastfail)
2644                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2645                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2646         }
2647
2648         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2649         LEAVE;
2650 }
2651
2652 /**
2653  * ipr_find_ses_entry - Find matching SES in SES table
2654  * @res:        resource entry struct of SES
2655  *
2656  * Return value:
2657  *      pointer to SES table entry / NULL on failure
2658  **/
2659 static const struct ipr_ses_table_entry *
2660 ipr_find_ses_entry(struct ipr_resource_entry *res)
2661 {
2662         int i, j, matches;
2663         struct ipr_std_inq_vpids *vpids;
2664         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2665
2666         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2667                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2668                         if (ste->compare_product_id_byte[j] == 'X') {
2669                                 vpids = &res->std_inq_data.vpids;
2670                                 if (vpids->product_id[j] == ste->product_id[j])
2671                                         matches++;
2672                                 else
2673                                         break;
2674                         } else
2675                                 matches++;
2676                 }
2677
2678                 if (matches == IPR_PROD_ID_LEN)
2679                         return ste;
2680         }
2681
2682         return NULL;
2683 }
2684
2685 /**
2686  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2687  * @ioa_cfg:    ioa config struct
2688  * @bus:                SCSI bus
2689  * @bus_width:  bus width
2690  *
2691  * Return value:
2692  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2693  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2694  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2695  *      max 160MHz = max 320MB/sec).
2696  **/
2697 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2698 {
2699         struct ipr_resource_entry *res;
2700         const struct ipr_ses_table_entry *ste;
2701         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2702
2703         /* Loop through each config table entry in the config table buffer */
2704         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2705                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2706                         continue;
2707
2708                 if (bus != res->bus)
2709                         continue;
2710
2711                 if (!(ste = ipr_find_ses_entry(res)))
2712                         continue;
2713
2714                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2715         }
2716
2717         return max_xfer_rate;
2718 }
2719
2720 /**
2721  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2722  * @ioa_cfg:            ioa config struct
2723  * @max_delay:          max delay in micro-seconds to wait
2724  *
2725  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2726  *
2727  * Return value:
2728  *      0 on success / other on failure
2729  **/
2730 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2731 {
2732         volatile u32 pcii_reg;
2733         int delay = 1;
2734
2735         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2736         while (delay < max_delay) {
2737                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2738
2739                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2740                         return 0;
2741
2742                 /* udelay cannot be used if delay is more than a few milliseconds */
2743                 if ((delay / 1000) > MAX_UDELAY_MS)
2744                         mdelay(delay / 1000);
2745                 else
2746                         udelay(delay);
2747
2748                 delay += delay;
2749         }
2750         return -EIO;
2751 }
2752
2753 /**
2754  * ipr_get_sis64_dump_data_section - Dump IOA memory
2755  * @ioa_cfg:                    ioa config struct
2756  * @start_addr:                 adapter address to dump
2757  * @dest:                       destination kernel buffer
2758  * @length_in_words:            length to dump in 4 byte words
2759  *
2760  * Return value:
2761  *      0 on success
2762  **/
2763 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2764                                            u32 start_addr,
2765                                            __be32 *dest, u32 length_in_words)
2766 {
2767         int i;
2768
2769         for (i = 0; i < length_in_words; i++) {
2770                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2771                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2772                 dest++;
2773         }
2774
2775         return 0;
2776 }
2777
2778 /**
2779  * ipr_get_ldump_data_section - Dump IOA memory
2780  * @ioa_cfg:                    ioa config struct
2781  * @start_addr:                 adapter address to dump
2782  * @dest:                               destination kernel buffer
2783  * @length_in_words:    length to dump in 4 byte words
2784  *
2785  * Return value:
2786  *      0 on success / -EIO on failure
2787  **/
2788 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2789                                       u32 start_addr,
2790                                       __be32 *dest, u32 length_in_words)
2791 {
2792         volatile u32 temp_pcii_reg;
2793         int i, delay = 0;
2794
2795         if (ioa_cfg->sis64)
2796                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2797                                                        dest, length_in_words);
2798
2799         /* Write IOA interrupt reg starting LDUMP state  */
2800         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2801                ioa_cfg->regs.set_uproc_interrupt_reg32);
2802
2803         /* Wait for IO debug acknowledge */
2804         if (ipr_wait_iodbg_ack(ioa_cfg,
2805                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2806                 dev_err(&ioa_cfg->pdev->dev,
2807                         "IOA dump long data transfer timeout\n");
2808                 return -EIO;
2809         }
2810
2811         /* Signal LDUMP interlocked - clear IO debug ack */
2812         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2813                ioa_cfg->regs.clr_interrupt_reg);
2814
2815         /* Write Mailbox with starting address */
2816         writel(start_addr, ioa_cfg->ioa_mailbox);
2817
2818         /* Signal address valid - clear IOA Reset alert */
2819         writel(IPR_UPROCI_RESET_ALERT,
2820                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2821
2822         for (i = 0; i < length_in_words; i++) {
2823                 /* Wait for IO debug acknowledge */
2824                 if (ipr_wait_iodbg_ack(ioa_cfg,
2825                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2826                         dev_err(&ioa_cfg->pdev->dev,
2827                                 "IOA dump short data transfer timeout\n");
2828                         return -EIO;
2829                 }
2830
2831                 /* Read data from mailbox and increment destination pointer */
2832                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2833                 dest++;
2834
2835                 /* For all but the last word of data, signal data received */
2836                 if (i < (length_in_words - 1)) {
2837                         /* Signal dump data received - Clear IO debug Ack */
2838                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2839                                ioa_cfg->regs.clr_interrupt_reg);
2840                 }
2841         }
2842
2843         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2844         writel(IPR_UPROCI_RESET_ALERT,
2845                ioa_cfg->regs.set_uproc_interrupt_reg32);
2846
2847         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2848                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2849
2850         /* Signal dump data received - Clear IO debug Ack */
2851         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2852                ioa_cfg->regs.clr_interrupt_reg);
2853
2854         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2855         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2856                 temp_pcii_reg =
2857                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2858
2859                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2860                         return 0;
2861
2862                 udelay(10);
2863                 delay += 10;
2864         }
2865
2866         return 0;
2867 }
2868
2869 #ifdef CONFIG_SCSI_IPR_DUMP
2870 /**
2871  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2872  * @ioa_cfg:            ioa config struct
2873  * @pci_address:        adapter address
2874  * @length:                     length of data to copy
2875  *
2876  * Copy data from PCI adapter to kernel buffer.
2877  * Note: length MUST be a 4 byte multiple
2878  * Return value:
2879  *      0 on success / other on failure
2880  **/
2881 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2882                         unsigned long pci_address, u32 length)
2883 {
2884         int bytes_copied = 0;
2885         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2886         __be32 *page;
2887         unsigned long lock_flags = 0;
2888         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2889
2890         if (ioa_cfg->sis64)
2891                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2892         else
2893                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2894
2895         while (bytes_copied < length &&
2896                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2897                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2898                     ioa_dump->page_offset == 0) {
2899                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2900
2901                         if (!page) {
2902                                 ipr_trace;
2903                                 return bytes_copied;
2904                         }
2905
2906                         ioa_dump->page_offset = 0;
2907                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2908                         ioa_dump->next_page_index++;
2909                 } else
2910                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2911
2912                 rem_len = length - bytes_copied;
2913                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2914                 cur_len = min(rem_len, rem_page_len);
2915
2916                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2917                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2918                         rc = -EIO;
2919                 } else {
2920                         rc = ipr_get_ldump_data_section(ioa_cfg,
2921                                                         pci_address + bytes_copied,
2922                                                         &page[ioa_dump->page_offset / 4],
2923                                                         (cur_len / sizeof(u32)));
2924                 }
2925                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2926
2927                 if (!rc) {
2928                         ioa_dump->page_offset += cur_len;
2929                         bytes_copied += cur_len;
2930                 } else {
2931                         ipr_trace;
2932                         break;
2933                 }
2934                 schedule();
2935         }
2936
2937         return bytes_copied;
2938 }
2939
2940 /**
2941  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2942  * @hdr:        dump entry header struct
2943  *
2944  * Return value:
2945  *      nothing
2946  **/
2947 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2948 {
2949         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2950         hdr->num_elems = 1;
2951         hdr->offset = sizeof(*hdr);
2952         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2953 }
2954
2955 /**
2956  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2957  * @ioa_cfg:    ioa config struct
2958  * @driver_dump:        driver dump struct
2959  *
2960  * Return value:
2961  *      nothing
2962  **/
2963 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2964                                    struct ipr_driver_dump *driver_dump)
2965 {
2966         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2967
2968         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2969         driver_dump->ioa_type_entry.hdr.len =
2970                 sizeof(struct ipr_dump_ioa_type_entry) -
2971                 sizeof(struct ipr_dump_entry_header);
2972         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2973         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2974         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2975         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2976                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2977                 ucode_vpd->minor_release[1];
2978         driver_dump->hdr.num_entries++;
2979 }
2980
2981 /**
2982  * ipr_dump_version_data - Fill in the driver version in the dump.
2983  * @ioa_cfg:    ioa config struct
2984  * @driver_dump:        driver dump struct
2985  *
2986  * Return value:
2987  *      nothing
2988  **/
2989 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2990                                   struct ipr_driver_dump *driver_dump)
2991 {
2992         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2993         driver_dump->version_entry.hdr.len =
2994                 sizeof(struct ipr_dump_version_entry) -
2995                 sizeof(struct ipr_dump_entry_header);
2996         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2997         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2998         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2999         driver_dump->hdr.num_entries++;
3000 }
3001
3002 /**
3003  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3004  * @ioa_cfg:    ioa config struct
3005  * @driver_dump:        driver dump struct
3006  *
3007  * Return value:
3008  *      nothing
3009  **/
3010 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3011                                    struct ipr_driver_dump *driver_dump)
3012 {
3013         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3014         driver_dump->trace_entry.hdr.len =
3015                 sizeof(struct ipr_dump_trace_entry) -
3016                 sizeof(struct ipr_dump_entry_header);
3017         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3018         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3019         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3020         driver_dump->hdr.num_entries++;
3021 }
3022
3023 /**
3024  * ipr_dump_location_data - Fill in the IOA location in the dump.
3025  * @ioa_cfg:    ioa config struct
3026  * @driver_dump:        driver dump struct
3027  *
3028  * Return value:
3029  *      nothing
3030  **/
3031 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3032                                    struct ipr_driver_dump *driver_dump)
3033 {
3034         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3035         driver_dump->location_entry.hdr.len =
3036                 sizeof(struct ipr_dump_location_entry) -
3037                 sizeof(struct ipr_dump_entry_header);
3038         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3039         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3040         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3041         driver_dump->hdr.num_entries++;
3042 }
3043
3044 /**
3045  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3046  * @ioa_cfg:    ioa config struct
3047  * @dump:               dump struct
3048  *
3049  * Return value:
3050  *      nothing
3051  **/
3052 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3053 {
3054         unsigned long start_addr, sdt_word;
3055         unsigned long lock_flags = 0;
3056         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3057         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3058         u32 num_entries, max_num_entries, start_off, end_off;
3059         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3060         struct ipr_sdt *sdt;
3061         int valid = 1;
3062         int i;
3063
3064         ENTER;
3065
3066         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3067
3068         if (ioa_cfg->sdt_state != READ_DUMP) {
3069                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3070                 return;
3071         }
3072
3073         if (ioa_cfg->sis64) {
3074                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3075                 ssleep(IPR_DUMP_DELAY_SECONDS);
3076                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3077         }
3078
3079         start_addr = readl(ioa_cfg->ioa_mailbox);
3080
3081         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3082                 dev_err(&ioa_cfg->pdev->dev,
3083                         "Invalid dump table format: %lx\n", start_addr);
3084                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3085                 return;
3086         }
3087
3088         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3089
3090         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3091
3092         /* Initialize the overall dump header */
3093         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3094         driver_dump->hdr.num_entries = 1;
3095         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3096         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3097         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3098         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3099
3100         ipr_dump_version_data(ioa_cfg, driver_dump);
3101         ipr_dump_location_data(ioa_cfg, driver_dump);
3102         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3103         ipr_dump_trace_data(ioa_cfg, driver_dump);
3104
3105         /* Update dump_header */
3106         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3107
3108         /* IOA Dump entry */
3109         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3110         ioa_dump->hdr.len = 0;
3111         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3112         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3113
3114         /* First entries in sdt are actually a list of dump addresses and
3115          lengths to gather the real dump data.  sdt represents the pointer
3116          to the ioa generated dump table.  Dump data will be extracted based
3117          on entries in this table */
3118         sdt = &ioa_dump->sdt;
3119
3120         if (ioa_cfg->sis64) {
3121                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3122                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3123         } else {
3124                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3125                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3126         }
3127
3128         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3129                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3130         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3131                                         bytes_to_copy / sizeof(__be32));
3132
3133         /* Smart Dump table is ready to use and the first entry is valid */
3134         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3135             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3136                 dev_err(&ioa_cfg->pdev->dev,
3137                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3138                         rc, be32_to_cpu(sdt->hdr.state));
3139                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3140                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3141                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3142                 return;
3143         }
3144
3145         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3146
3147         if (num_entries > max_num_entries)
3148                 num_entries = max_num_entries;
3149
3150         /* Update dump length to the actual data to be copied */
3151         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3152         if (ioa_cfg->sis64)
3153                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3154         else
3155                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3156
3157         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158
3159         for (i = 0; i < num_entries; i++) {
3160                 if (ioa_dump->hdr.len > max_dump_size) {
3161                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3162                         break;
3163                 }
3164
3165                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3166                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3167                         if (ioa_cfg->sis64)
3168                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3169                         else {
3170                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3171                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3172
3173                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3174                                         bytes_to_copy = end_off - start_off;
3175                                 else
3176                                         valid = 0;
3177                         }
3178                         if (valid) {
3179                                 if (bytes_to_copy > max_dump_size) {
3180                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3181                                         continue;
3182                                 }
3183
3184                                 /* Copy data from adapter to driver buffers */
3185                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3186                                                             bytes_to_copy);
3187
3188                                 ioa_dump->hdr.len += bytes_copied;
3189
3190                                 if (bytes_copied != bytes_to_copy) {
3191                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3192                                         break;
3193                                 }
3194                         }
3195                 }
3196         }
3197
3198         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3199
3200         /* Update dump_header */
3201         driver_dump->hdr.len += ioa_dump->hdr.len;
3202         wmb();
3203         ioa_cfg->sdt_state = DUMP_OBTAINED;
3204         LEAVE;
3205 }
3206
3207 #else
3208 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3209 #endif
3210
3211 /**
3212  * ipr_release_dump - Free adapter dump memory
3213  * @kref:       kref struct
3214  *
3215  * Return value:
3216  *      nothing
3217  **/
3218 static void ipr_release_dump(struct kref *kref)
3219 {
3220         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3221         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3222         unsigned long lock_flags = 0;
3223         int i;
3224
3225         ENTER;
3226         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3227         ioa_cfg->dump = NULL;
3228         ioa_cfg->sdt_state = INACTIVE;
3229         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230
3231         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3232                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3233
3234         vfree(dump->ioa_dump.ioa_data);
3235         kfree(dump);
3236         LEAVE;
3237 }
3238
3239 static void ipr_add_remove_thread(struct work_struct *work)
3240 {
3241         unsigned long lock_flags;
3242         struct ipr_resource_entry *res;
3243         struct scsi_device *sdev;
3244         struct ipr_ioa_cfg *ioa_cfg =
3245                 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3246         u8 bus, target, lun;
3247         int did_work;
3248
3249         ENTER;
3250         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3251
3252 restart:
3253         do {
3254                 did_work = 0;
3255                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3256                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257                         return;
3258                 }
3259
3260                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3261                         if (res->del_from_ml && res->sdev) {
3262                                 did_work = 1;
3263                                 sdev = res->sdev;
3264                                 if (!scsi_device_get(sdev)) {
3265                                         if (!res->add_to_ml)
3266                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3267                                         else
3268                                                 res->del_from_ml = 0;
3269                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3270                                         scsi_remove_device(sdev);
3271                                         scsi_device_put(sdev);
3272                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3273                                 }
3274                                 break;
3275                         }
3276                 }
3277         } while (did_work);
3278
3279         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3280                 if (res->add_to_ml) {
3281                         bus = res->bus;
3282                         target = res->target;
3283                         lun = res->lun;
3284                         res->add_to_ml = 0;
3285                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3286                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3287                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3288                         goto restart;
3289                 }
3290         }
3291
3292         ioa_cfg->scan_done = 1;
3293         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3294         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3295         LEAVE;
3296 }
3297
3298 /**
3299  * ipr_worker_thread - Worker thread
3300  * @work:               ioa config struct
3301  *
3302  * Called at task level from a work thread. This function takes care
3303  * of adding and removing device from the mid-layer as configuration
3304  * changes are detected by the adapter.
3305  *
3306  * Return value:
3307  *      nothing
3308  **/
3309 static void ipr_worker_thread(struct work_struct *work)
3310 {
3311         unsigned long lock_flags;
3312         struct ipr_dump *dump;
3313         struct ipr_ioa_cfg *ioa_cfg =
3314                 container_of(work, struct ipr_ioa_cfg, work_q);
3315
3316         ENTER;
3317         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3318
3319         if (ioa_cfg->sdt_state == READ_DUMP) {
3320                 dump = ioa_cfg->dump;
3321                 if (!dump) {
3322                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3323                         return;
3324                 }
3325                 kref_get(&dump->kref);
3326                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3327                 ipr_get_ioa_dump(ioa_cfg, dump);
3328                 kref_put(&dump->kref, ipr_release_dump);
3329
3330                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3331                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3332                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3333                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3334                 return;
3335         }
3336
3337         if (ioa_cfg->scsi_unblock) {
3338                 ioa_cfg->scsi_unblock = 0;
3339                 ioa_cfg->scsi_blocked = 0;
3340                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3341                 scsi_unblock_requests(ioa_cfg->host);
3342                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3343                 if (ioa_cfg->scsi_blocked)
3344                         scsi_block_requests(ioa_cfg->host);
3345         }
3346
3347         if (!ioa_cfg->scan_enabled) {
3348                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3349                 return;
3350         }
3351
3352         schedule_work(&ioa_cfg->scsi_add_work_q);
3353
3354         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3355         LEAVE;
3356 }
3357
3358 #ifdef CONFIG_SCSI_IPR_TRACE
3359 /**
3360  * ipr_read_trace - Dump the adapter trace
3361  * @filp:               open sysfs file
3362  * @kobj:               kobject struct
3363  * @bin_attr:           bin_attribute struct
3364  * @buf:                buffer
3365  * @off:                offset
3366  * @count:              buffer size
3367  *
3368  * Return value:
3369  *      number of bytes printed to buffer
3370  **/
3371 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3372                               struct bin_attribute *bin_attr,
3373                               char *buf, loff_t off, size_t count)
3374 {
3375         struct device *dev = kobj_to_dev(kobj);
3376         struct Scsi_Host *shost = class_to_shost(dev);
3377         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3378         unsigned long lock_flags = 0;
3379         ssize_t ret;
3380
3381         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3382         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3383                                 IPR_TRACE_SIZE);
3384         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3385
3386         return ret;
3387 }
3388
3389 static struct bin_attribute ipr_trace_attr = {
3390         .attr = {
3391                 .name = "trace",
3392                 .mode = S_IRUGO,
3393         },
3394         .size = 0,
3395         .read = ipr_read_trace,
3396 };
3397 #endif
3398
3399 /**
3400  * ipr_show_fw_version - Show the firmware version
3401  * @dev:        class device struct
3402  * @attr:       device attribute (unused)
3403  * @buf:        buffer
3404  *
3405  * Return value:
3406  *      number of bytes printed to buffer
3407  **/
3408 static ssize_t ipr_show_fw_version(struct device *dev,
3409                                    struct device_attribute *attr, char *buf)
3410 {
3411         struct Scsi_Host *shost = class_to_shost(dev);
3412         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3413         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3414         unsigned long lock_flags = 0;
3415         int len;
3416
3417         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3418         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3419                        ucode_vpd->major_release, ucode_vpd->card_type,
3420                        ucode_vpd->minor_release[0],
3421                        ucode_vpd->minor_release[1]);
3422         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3423         return len;
3424 }
3425
3426 static struct device_attribute ipr_fw_version_attr = {
3427         .attr = {
3428                 .name =         "fw_version",
3429                 .mode =         S_IRUGO,
3430         },
3431         .show = ipr_show_fw_version,
3432 };
3433
3434 /**
3435  * ipr_show_log_level - Show the adapter's error logging level
3436  * @dev:        class device struct
3437  * @attr:       device attribute (unused)
3438  * @buf:        buffer
3439  *
3440  * Return value:
3441  *      number of bytes printed to buffer
3442  **/
3443 static ssize_t ipr_show_log_level(struct device *dev,
3444                                    struct device_attribute *attr, char *buf)
3445 {
3446         struct Scsi_Host *shost = class_to_shost(dev);
3447         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3448         unsigned long lock_flags = 0;
3449         int len;
3450
3451         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3452         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3453         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3454         return len;
3455 }
3456
3457 /**
3458  * ipr_store_log_level - Change the adapter's error logging level
3459  * @dev:        class device struct
3460  * @attr:       device attribute (unused)
3461  * @buf:        buffer
3462  * @count:      buffer size
3463  *
3464  * Return value:
3465  *      number of bytes printed to buffer
3466  **/
3467 static ssize_t ipr_store_log_level(struct device *dev,
3468                                    struct device_attribute *attr,
3469                                    const char *buf, size_t count)
3470 {
3471         struct Scsi_Host *shost = class_to_shost(dev);
3472         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3473         unsigned long lock_flags = 0;
3474
3475         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3476         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3477         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3478         return strlen(buf);
3479 }
3480
3481 static struct device_attribute ipr_log_level_attr = {
3482         .attr = {
3483                 .name =         "log_level",
3484                 .mode =         S_IRUGO | S_IWUSR,
3485         },
3486         .show = ipr_show_log_level,
3487         .store = ipr_store_log_level
3488 };
3489
3490 /**
3491  * ipr_store_diagnostics - IOA Diagnostics interface
3492  * @dev:        device struct
3493  * @attr:       device attribute (unused)
3494  * @buf:        buffer
3495  * @count:      buffer size
3496  *
3497  * This function will reset the adapter and wait a reasonable
3498  * amount of time for any errors that the adapter might log.
3499  *
3500  * Return value:
3501  *      count on success / other on failure
3502  **/
3503 static ssize_t ipr_store_diagnostics(struct device *dev,
3504                                      struct device_attribute *attr,
3505                                      const char *buf, size_t count)
3506 {
3507         struct Scsi_Host *shost = class_to_shost(dev);
3508         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3509         unsigned long lock_flags = 0;
3510         int rc = count;
3511
3512         if (!capable(CAP_SYS_ADMIN))
3513                 return -EACCES;
3514
3515         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3516         while (ioa_cfg->in_reset_reload) {
3517                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3518                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3519                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3520         }
3521
3522         ioa_cfg->errors_logged = 0;
3523         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3524
3525         if (ioa_cfg->in_reset_reload) {
3526                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3527                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3528
3529                 /* Wait for a second for any errors to be logged */
3530                 msleep(1000);
3531         } else {
3532                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3533                 return -EIO;
3534         }
3535
3536         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3537         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3538                 rc = -EIO;
3539         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3540
3541         return rc;
3542 }
3543
3544 static struct device_attribute ipr_diagnostics_attr = {
3545         .attr = {
3546                 .name =         "run_diagnostics",
3547                 .mode =         S_IWUSR,
3548         },
3549         .store = ipr_store_diagnostics
3550 };
3551
3552 /**
3553  * ipr_show_adapter_state - Show the adapter's state
3554  * @dev:        device struct
3555  * @attr:       device attribute (unused)
3556  * @buf:        buffer
3557  *
3558  * Return value:
3559  *      number of bytes printed to buffer
3560  **/
3561 static ssize_t ipr_show_adapter_state(struct device *dev,
3562                                       struct device_attribute *attr, char *buf)
3563 {
3564         struct Scsi_Host *shost = class_to_shost(dev);
3565         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3566         unsigned long lock_flags = 0;
3567         int len;
3568
3569         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3570         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3571                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3572         else
3573                 len = snprintf(buf, PAGE_SIZE, "online\n");
3574         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3575         return len;
3576 }
3577
3578 /**
3579  * ipr_store_adapter_state - Change adapter state
3580  * @dev:        device struct
3581  * @attr:       device attribute (unused)
3582  * @buf:        buffer
3583  * @count:      buffer size
3584  *
3585  * This function will change the adapter's state.
3586  *
3587  * Return value:
3588  *      count on success / other on failure
3589  **/
3590 static ssize_t ipr_store_adapter_state(struct device *dev,
3591                                        struct device_attribute *attr,
3592                                        const char *buf, size_t count)
3593 {
3594         struct Scsi_Host *shost = class_to_shost(dev);
3595         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3596         unsigned long lock_flags;
3597         int result = count, i;
3598
3599         if (!capable(CAP_SYS_ADMIN))
3600                 return -EACCES;
3601
3602         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3603         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3604             !strncmp(buf, "online", 6)) {
3605                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3606                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3607                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3608                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3609                 }
3610                 wmb();
3611                 ioa_cfg->reset_retries = 0;
3612                 ioa_cfg->in_ioa_bringdown = 0;
3613                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3614         }
3615         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3616         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3617
3618         return result;
3619 }
3620
3621 static struct device_attribute ipr_ioa_state_attr = {
3622         .attr = {
3623                 .name =         "online_state",
3624                 .mode =         S_IRUGO | S_IWUSR,
3625         },
3626         .show = ipr_show_adapter_state,
3627         .store = ipr_store_adapter_state
3628 };
3629
3630 /**
3631  * ipr_store_reset_adapter - Reset the adapter
3632  * @dev:        device struct
3633  * @attr:       device attribute (unused)
3634  * @buf:        buffer
3635  * @count:      buffer size
3636  *
3637  * This function will reset the adapter.
3638  *
3639  * Return value:
3640  *      count on success / other on failure
3641  **/
3642 static ssize_t ipr_store_reset_adapter(struct device *dev,
3643                                        struct device_attribute *attr,
3644                                        const char *buf, size_t count)
3645 {
3646         struct Scsi_Host *shost = class_to_shost(dev);
3647         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3648         unsigned long lock_flags;
3649         int result = count;
3650
3651         if (!capable(CAP_SYS_ADMIN))
3652                 return -EACCES;
3653
3654         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3655         if (!ioa_cfg->in_reset_reload)
3656                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3657         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3658         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3659
3660         return result;
3661 }
3662
3663 static struct device_attribute ipr_ioa_reset_attr = {
3664         .attr = {
3665                 .name =         "reset_host",
3666                 .mode =         S_IWUSR,
3667         },
3668         .store = ipr_store_reset_adapter
3669 };
3670
3671 static int ipr_iopoll(struct irq_poll *iop, int budget);
3672  /**
3673  * ipr_show_iopoll_weight - Show ipr polling mode
3674  * @dev:        class device struct
3675  * @attr:       device attribute (unused)
3676  * @buf:        buffer
3677  *
3678  * Return value:
3679  *      number of bytes printed to buffer
3680  **/
3681 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3682                                    struct device_attribute *attr, char *buf)
3683 {
3684         struct Scsi_Host *shost = class_to_shost(dev);
3685         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3686         unsigned long lock_flags = 0;
3687         int len;
3688
3689         spin_lock_irqsave(shost->host_lock, lock_flags);
3690         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3691         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3692
3693         return len;
3694 }
3695
3696 /**
3697  * ipr_store_iopoll_weight - Change the adapter's polling mode
3698  * @dev:        class device struct
3699  * @attr:       device attribute (unused)
3700  * @buf:        buffer
3701  * @count:      buffer size
3702  *
3703  * Return value:
3704  *      number of bytes printed to buffer
3705  **/
3706 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3707                                         struct device_attribute *attr,
3708                                         const char *buf, size_t count)
3709 {
3710         struct Scsi_Host *shost = class_to_shost(dev);
3711         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3712         unsigned long user_iopoll_weight;
3713         unsigned long lock_flags = 0;
3714         int i;
3715
3716         if (!ioa_cfg->sis64) {
3717                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3718                 return -EINVAL;
3719         }
3720         if (kstrtoul(buf, 10, &user_iopoll_weight))
3721                 return -EINVAL;
3722
3723         if (user_iopoll_weight > 256) {
3724                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3725                 return -EINVAL;
3726         }
3727
3728         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3729                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3730                 return strlen(buf);
3731         }
3732
3733         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3734                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3735                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3736         }
3737
3738         spin_lock_irqsave(shost->host_lock, lock_flags);
3739         ioa_cfg->iopoll_weight = user_iopoll_weight;
3740         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3741                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3742                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3743                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3744                 }
3745         }
3746         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3747
3748         return strlen(buf);
3749 }
3750
3751 static struct device_attribute ipr_iopoll_weight_attr = {
3752         .attr = {
3753                 .name =         "iopoll_weight",
3754                 .mode =         S_IRUGO | S_IWUSR,
3755         },
3756         .show = ipr_show_iopoll_weight,
3757         .store = ipr_store_iopoll_weight
3758 };
3759
3760 /**
3761  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3762  * @buf_len:            buffer length
3763  *
3764  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3765  * list to use for microcode download
3766  *
3767  * Return value:
3768  *      pointer to sglist / NULL on failure
3769  **/
3770 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3771 {
3772         int sg_size, order;
3773         struct ipr_sglist *sglist;
3774
3775         /* Get the minimum size per scatter/gather element */
3776         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3777
3778         /* Get the actual size per element */
3779         order = get_order(sg_size);
3780
3781         /* Allocate a scatter/gather list for the DMA */
3782         sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3783         if (sglist == NULL) {
3784                 ipr_trace;
3785                 return NULL;
3786         }
3787         sglist->order = order;
3788         sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3789                                               &sglist->num_sg);
3790         if (!sglist->scatterlist) {
3791                 kfree(sglist);
3792                 return NULL;
3793         }
3794
3795         return sglist;
3796 }
3797
3798 /**
3799  * ipr_free_ucode_buffer - Frees a microcode download buffer
3800  * @sglist:             scatter/gather list pointer
3801  *
3802  * Free a DMA'able ucode download buffer previously allocated with
3803  * ipr_alloc_ucode_buffer
3804  *
3805  * Return value:
3806  *      nothing
3807  **/
3808 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3809 {
3810         sgl_free_order(sglist->scatterlist, sglist->order);
3811         kfree(sglist);
3812 }
3813
3814 /**
3815  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3816  * @sglist:             scatter/gather list pointer
3817  * @buffer:             buffer pointer
3818  * @len:                buffer length
3819  *
3820  * Copy a microcode image from a user buffer into a buffer allocated by
3821  * ipr_alloc_ucode_buffer
3822  *
3823  * Return value:
3824  *      0 on success / other on failure
3825  **/
3826 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3827                                  u8 *buffer, u32 len)
3828 {
3829         int bsize_elem, i, result = 0;
3830         struct scatterlist *sg;
3831
3832         /* Determine the actual number of bytes per element */
3833         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3834
3835         sg = sglist->scatterlist;
3836
3837         for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
3838                         buffer += bsize_elem) {
3839                 struct page *page = sg_page(sg);
3840
3841                 memcpy_to_page(page, 0, buffer, bsize_elem);
3842
3843                 sg->length = bsize_elem;
3844
3845                 if (result != 0) {
3846                         ipr_trace;
3847                         return result;
3848                 }
3849         }
3850
3851         if (len % bsize_elem) {
3852                 struct page *page = sg_page(sg);
3853
3854                 memcpy_to_page(page, 0, buffer, len % bsize_elem);
3855
3856                 sg->length = len % bsize_elem;
3857         }
3858
3859         sglist->buffer_len = len;
3860         return result;
3861 }
3862
3863 /**
3864  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3865  * @ipr_cmd:            ipr command struct
3866  * @sglist:             scatter/gather list
3867  *
3868  * Builds a microcode download IOA data list (IOADL).
3869  *
3870  **/
3871 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3872                                     struct ipr_sglist *sglist)
3873 {
3874         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3875         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3876         struct scatterlist *scatterlist = sglist->scatterlist;
3877         struct scatterlist *sg;
3878         int i;
3879
3880         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3881         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3882         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3883
3884         ioarcb->ioadl_len =
3885                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3886         for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3887                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3888                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
3889                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
3890         }
3891
3892         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3893 }
3894
3895 /**
3896  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3897  * @ipr_cmd:    ipr command struct
3898  * @sglist:             scatter/gather list
3899  *
3900  * Builds a microcode download IOA data list (IOADL).
3901  *
3902  **/
3903 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3904                                   struct ipr_sglist *sglist)
3905 {
3906         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3907         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3908         struct scatterlist *scatterlist = sglist->scatterlist;
3909         struct scatterlist *sg;
3910         int i;
3911
3912         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3913         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3914         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3915
3916         ioarcb->ioadl_len =
3917                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3918
3919         for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3920                 ioadl[i].flags_and_data_len =
3921                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
3922                 ioadl[i].address =
3923                         cpu_to_be32(sg_dma_address(sg));
3924         }
3925
3926         ioadl[i-1].flags_and_data_len |=
3927                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3928 }
3929
3930 /**
3931  * ipr_update_ioa_ucode - Update IOA's microcode
3932  * @ioa_cfg:    ioa config struct
3933  * @sglist:             scatter/gather list
3934  *
3935  * Initiate an adapter reset to update the IOA's microcode
3936  *
3937  * Return value:
3938  *      0 on success / -EIO on failure
3939  **/
3940 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3941                                 struct ipr_sglist *sglist)
3942 {
3943         unsigned long lock_flags;
3944
3945         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3946         while (ioa_cfg->in_reset_reload) {
3947                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3948                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3949                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3950         }
3951
3952         if (ioa_cfg->ucode_sglist) {
3953                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3954                 dev_err(&ioa_cfg->pdev->dev,
3955                         "Microcode download already in progress\n");
3956                 return -EIO;
3957         }
3958
3959         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3960                                         sglist->scatterlist, sglist->num_sg,
3961                                         DMA_TO_DEVICE);
3962
3963         if (!sglist->num_dma_sg) {
3964                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3965                 dev_err(&ioa_cfg->pdev->dev,
3966                         "Failed to map microcode download buffer!\n");
3967                 return -EIO;
3968         }
3969
3970         ioa_cfg->ucode_sglist = sglist;
3971         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3972         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3973         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3974
3975         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3976         ioa_cfg->ucode_sglist = NULL;
3977         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3978         return 0;
3979 }
3980
3981 /**
3982  * ipr_store_update_fw - Update the firmware on the adapter
3983  * @dev:        device struct
3984  * @attr:       device attribute (unused)
3985  * @buf:        buffer
3986  * @count:      buffer size
3987  *
3988  * This function will update the firmware on the adapter.
3989  *
3990  * Return value:
3991  *      count on success / other on failure
3992  **/
3993 static ssize_t ipr_store_update_fw(struct device *dev,
3994                                    struct device_attribute *attr,
3995                                    const char *buf, size_t count)
3996 {
3997         struct Scsi_Host *shost = class_to_shost(dev);
3998         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3999         struct ipr_ucode_image_header *image_hdr;
4000         const struct firmware *fw_entry;
4001         struct ipr_sglist *sglist;
4002         char fname[100];
4003         char *src;
4004         char *endline;
4005         int result, dnld_size;
4006
4007         if (!capable(CAP_SYS_ADMIN))
4008                 return -EACCES;
4009
4010         snprintf(fname, sizeof(fname), "%s", buf);
4011
4012         endline = strchr(fname, '\n');
4013         if (endline)
4014                 *endline = '\0';
4015
4016         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4017                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4018                 return -EIO;
4019         }
4020
4021         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4022
4023         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4024         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4025         sglist = ipr_alloc_ucode_buffer(dnld_size);
4026
4027         if (!sglist) {
4028                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4029                 release_firmware(fw_entry);
4030                 return -ENOMEM;
4031         }
4032
4033         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4034
4035         if (result) {
4036                 dev_err(&ioa_cfg->pdev->dev,
4037                         "Microcode buffer copy to DMA buffer failed\n");
4038                 goto out;
4039         }
4040
4041         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4042
4043         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4044
4045         if (!result)
4046                 result = count;
4047 out:
4048         ipr_free_ucode_buffer(sglist);
4049         release_firmware(fw_entry);
4050         return result;
4051 }
4052
4053 static struct device_attribute ipr_update_fw_attr = {
4054         .attr = {
4055                 .name =         "update_fw",
4056                 .mode =         S_IWUSR,
4057         },
4058         .store = ipr_store_update_fw
4059 };
4060
4061 /**
4062  * ipr_show_fw_type - Show the adapter's firmware type.
4063  * @dev:        class device struct
4064  * @attr:       device attribute (unused)
4065  * @buf:        buffer
4066  *
4067  * Return value:
4068  *      number of bytes printed to buffer
4069  **/
4070 static ssize_t ipr_show_fw_type(struct device *dev,
4071                                 struct device_attribute *attr, char *buf)
4072 {
4073         struct Scsi_Host *shost = class_to_shost(dev);
4074         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4075         unsigned long lock_flags = 0;
4076         int len;
4077
4078         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4079         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4080         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4081         return len;
4082 }
4083
4084 static struct device_attribute ipr_ioa_fw_type_attr = {
4085         .attr = {
4086                 .name =         "fw_type",
4087                 .mode =         S_IRUGO,
4088         },
4089         .show = ipr_show_fw_type
4090 };
4091
4092 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4093                                 struct bin_attribute *bin_attr, char *buf,
4094                                 loff_t off, size_t count)
4095 {
4096         struct device *cdev = kobj_to_dev(kobj);
4097         struct Scsi_Host *shost = class_to_shost(cdev);
4098         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4099         struct ipr_hostrcb *hostrcb;
4100         unsigned long lock_flags = 0;
4101         int ret;
4102
4103         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4104         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4105                                         struct ipr_hostrcb, queue);
4106         if (!hostrcb) {
4107                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4108                 return 0;
4109         }
4110         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4111                                 sizeof(hostrcb->hcam));
4112         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4113         return ret;
4114 }
4115
4116 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4117                                 struct bin_attribute *bin_attr, char *buf,
4118                                 loff_t off, size_t count)
4119 {
4120         struct device *cdev = kobj_to_dev(kobj);
4121         struct Scsi_Host *shost = class_to_shost(cdev);
4122         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4123         struct ipr_hostrcb *hostrcb;
4124         unsigned long lock_flags = 0;
4125
4126         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4127         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4128                                         struct ipr_hostrcb, queue);
4129         if (!hostrcb) {
4130                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4131                 return count;
4132         }
4133
4134         /* Reclaim hostrcb before exit */
4135         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4136         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4137         return count;
4138 }
4139
4140 static struct bin_attribute ipr_ioa_async_err_log = {
4141         .attr = {
4142                 .name =         "async_err_log",
4143                 .mode =         S_IRUGO | S_IWUSR,
4144         },
4145         .size = 0,
4146         .read = ipr_read_async_err_log,
4147         .write = ipr_next_async_err_log
4148 };
4149
4150 static struct attribute *ipr_ioa_attrs[] = {
4151         &ipr_fw_version_attr.attr,
4152         &ipr_log_level_attr.attr,
4153         &ipr_diagnostics_attr.attr,
4154         &ipr_ioa_state_attr.attr,
4155         &ipr_ioa_reset_attr.attr,
4156         &ipr_update_fw_attr.attr,
4157         &ipr_ioa_fw_type_attr.attr,
4158         &ipr_iopoll_weight_attr.attr,
4159         NULL,
4160 };
4161
4162 ATTRIBUTE_GROUPS(ipr_ioa);
4163
4164 #ifdef CONFIG_SCSI_IPR_DUMP
4165 /**
4166  * ipr_read_dump - Dump the adapter
4167  * @filp:               open sysfs file
4168  * @kobj:               kobject struct
4169  * @bin_attr:           bin_attribute struct
4170  * @buf:                buffer
4171  * @off:                offset
4172  * @count:              buffer size
4173  *
4174  * Return value:
4175  *      number of bytes printed to buffer
4176  **/
4177 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4178                              struct bin_attribute *bin_attr,
4179                              char *buf, loff_t off, size_t count)
4180 {
4181         struct device *cdev = kobj_to_dev(kobj);
4182         struct Scsi_Host *shost = class_to_shost(cdev);
4183         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4184         struct ipr_dump *dump;
4185         unsigned long lock_flags = 0;
4186         char *src;
4187         int len, sdt_end;
4188         size_t rc = count;
4189
4190         if (!capable(CAP_SYS_ADMIN))
4191                 return -EACCES;
4192
4193         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4194         dump = ioa_cfg->dump;
4195
4196         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4197                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4198                 return 0;
4199         }
4200         kref_get(&dump->kref);
4201         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4202
4203         if (off > dump->driver_dump.hdr.len) {
4204                 kref_put(&dump->kref, ipr_release_dump);
4205                 return 0;
4206         }
4207
4208         if (off + count > dump->driver_dump.hdr.len) {
4209                 count = dump->driver_dump.hdr.len - off;
4210                 rc = count;
4211         }
4212
4213         if (count && off < sizeof(dump->driver_dump)) {
4214                 if (off + count > sizeof(dump->driver_dump))
4215                         len = sizeof(dump->driver_dump) - off;
4216                 else
4217                         len = count;
4218                 src = (u8 *)&dump->driver_dump + off;
4219                 memcpy(buf, src, len);
4220                 buf += len;
4221                 off += len;
4222                 count -= len;
4223         }
4224
4225         off -= sizeof(dump->driver_dump);
4226
4227         if (ioa_cfg->sis64)
4228                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4229                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4230                            sizeof(struct ipr_sdt_entry));
4231         else
4232                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4233                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4234
4235         if (count && off < sdt_end) {
4236                 if (off + count > sdt_end)
4237                         len = sdt_end - off;
4238                 else
4239                         len = count;
4240                 src = (u8 *)&dump->ioa_dump + off;
4241                 memcpy(buf, src, len);
4242                 buf += len;
4243                 off += len;
4244                 count -= len;
4245         }
4246
4247         off -= sdt_end;
4248
4249         while (count) {
4250                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4251                         len = PAGE_ALIGN(off) - off;
4252                 else
4253                         len = count;
4254                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4255                 src += off & ~PAGE_MASK;
4256                 memcpy(buf, src, len);
4257                 buf += len;
4258                 off += len;
4259                 count -= len;
4260         }
4261
4262         kref_put(&dump->kref, ipr_release_dump);
4263         return rc;
4264 }
4265
4266 /**
4267  * ipr_alloc_dump - Prepare for adapter dump
4268  * @ioa_cfg:    ioa config struct
4269  *
4270  * Return value:
4271  *      0 on success / other on failure
4272  **/
4273 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4274 {
4275         struct ipr_dump *dump;
4276         __be32 **ioa_data;
4277         unsigned long lock_flags = 0;
4278
4279         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4280
4281         if (!dump) {
4282                 ipr_err("Dump memory allocation failed\n");
4283                 return -ENOMEM;
4284         }
4285
4286         if (ioa_cfg->sis64)
4287                 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4288                                               sizeof(__be32 *)));
4289         else
4290                 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4291                                               sizeof(__be32 *)));
4292
4293         if (!ioa_data) {
4294                 ipr_err("Dump memory allocation failed\n");
4295                 kfree(dump);
4296                 return -ENOMEM;
4297         }
4298
4299         dump->ioa_dump.ioa_data = ioa_data;
4300
4301         kref_init(&dump->kref);
4302         dump->ioa_cfg = ioa_cfg;
4303
4304         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4305
4306         if (INACTIVE != ioa_cfg->sdt_state) {
4307                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4308                 vfree(dump->ioa_dump.ioa_data);
4309                 kfree(dump);
4310                 return 0;
4311         }
4312
4313         ioa_cfg->dump = dump;
4314         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4315         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4316                 ioa_cfg->dump_taken = 1;
4317                 schedule_work(&ioa_cfg->work_q);
4318         }
4319         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4320
4321         return 0;
4322 }
4323
4324 /**
4325  * ipr_free_dump - Free adapter dump memory
4326  * @ioa_cfg:    ioa config struct
4327  *
4328  * Return value:
4329  *      0 on success / other on failure
4330  **/
4331 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4332 {
4333         struct ipr_dump *dump;
4334         unsigned long lock_flags = 0;
4335
4336         ENTER;
4337
4338         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4339         dump = ioa_cfg->dump;
4340         if (!dump) {
4341                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4342                 return 0;
4343         }
4344
4345         ioa_cfg->dump = NULL;
4346         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4347
4348         kref_put(&dump->kref, ipr_release_dump);
4349
4350         LEAVE;
4351         return 0;
4352 }
4353
4354 /**
4355  * ipr_write_dump - Setup dump state of adapter
4356  * @filp:               open sysfs file
4357  * @kobj:               kobject struct
4358  * @bin_attr:           bin_attribute struct
4359  * @buf:                buffer
4360  * @off:                offset
4361  * @count:              buffer size
4362  *
4363  * Return value:
4364  *      number of bytes printed to buffer
4365  **/
4366 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4367                               struct bin_attribute *bin_attr,
4368                               char *buf, loff_t off, size_t count)
4369 {
4370         struct device *cdev = kobj_to_dev(kobj);
4371         struct Scsi_Host *shost = class_to_shost(cdev);
4372         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4373         int rc;
4374
4375         if (!capable(CAP_SYS_ADMIN))
4376                 return -EACCES;
4377
4378         if (buf[0] == '1')
4379                 rc = ipr_alloc_dump(ioa_cfg);
4380         else if (buf[0] == '0')
4381                 rc = ipr_free_dump(ioa_cfg);
4382         else
4383                 return -EINVAL;
4384
4385         if (rc)
4386                 return rc;
4387         else
4388                 return count;
4389 }
4390
4391 static struct bin_attribute ipr_dump_attr = {
4392         .attr = {
4393                 .name = "dump",
4394                 .mode = S_IRUSR | S_IWUSR,
4395         },
4396         .size = 0,
4397         .read = ipr_read_dump,
4398         .write = ipr_write_dump
4399 };
4400 #else
4401 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4402 #endif
4403
4404 /**
4405  * ipr_change_queue_depth - Change the device's queue depth
4406  * @sdev:       scsi device struct
4407  * @qdepth:     depth to set
4408  *
4409  * Return value:
4410  *      actual depth set
4411  **/
4412 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4413 {
4414         scsi_change_queue_depth(sdev, qdepth);
4415         return sdev->queue_depth;
4416 }
4417
4418 /**
4419  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4420  * @dev:        device struct
4421  * @attr:       device attribute structure
4422  * @buf:        buffer
4423  *
4424  * Return value:
4425  *      number of bytes printed to buffer
4426  **/
4427 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4428 {
4429         struct scsi_device *sdev = to_scsi_device(dev);
4430         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4431         struct ipr_resource_entry *res;
4432         unsigned long lock_flags = 0;
4433         ssize_t len = -ENXIO;
4434
4435         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4436         res = (struct ipr_resource_entry *)sdev->hostdata;
4437         if (res)
4438                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4439         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4440         return len;
4441 }
4442
4443 static struct device_attribute ipr_adapter_handle_attr = {
4444         .attr = {
4445                 .name =         "adapter_handle",
4446                 .mode =         S_IRUSR,
4447         },
4448         .show = ipr_show_adapter_handle
4449 };
4450
4451 /**
4452  * ipr_show_resource_path - Show the resource path or the resource address for
4453  *                          this device.
4454  * @dev:        device struct
4455  * @attr:       device attribute structure
4456  * @buf:        buffer
4457  *
4458  * Return value:
4459  *      number of bytes printed to buffer
4460  **/
4461 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4462 {
4463         struct scsi_device *sdev = to_scsi_device(dev);
4464         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4465         struct ipr_resource_entry *res;
4466         unsigned long lock_flags = 0;
4467         ssize_t len = -ENXIO;
4468         char buffer[IPR_MAX_RES_PATH_LENGTH];
4469
4470         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4471         res = (struct ipr_resource_entry *)sdev->hostdata;
4472         if (res && ioa_cfg->sis64)
4473                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4474                                __ipr_format_res_path(res->res_path, buffer,
4475                                                      sizeof(buffer)));
4476         else if (res)
4477                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4478                                res->bus, res->target, res->lun);
4479
4480         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4481         return len;
4482 }
4483
4484 static struct device_attribute ipr_resource_path_attr = {
4485         .attr = {
4486                 .name =         "resource_path",
4487                 .mode =         S_IRUGO,
4488         },
4489         .show = ipr_show_resource_path
4490 };
4491
4492 /**
4493  * ipr_show_device_id - Show the device_id for this device.
4494  * @dev:        device struct
4495  * @attr:       device attribute structure
4496  * @buf:        buffer
4497  *
4498  * Return value:
4499  *      number of bytes printed to buffer
4500  **/
4501 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4502 {
4503         struct scsi_device *sdev = to_scsi_device(dev);
4504         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4505         struct ipr_resource_entry *res;
4506         unsigned long lock_flags = 0;
4507         ssize_t len = -ENXIO;
4508
4509         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4510         res = (struct ipr_resource_entry *)sdev->hostdata;
4511         if (res && ioa_cfg->sis64)
4512                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4513         else if (res)
4514                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4515
4516         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4517         return len;
4518 }
4519
4520 static struct device_attribute ipr_device_id_attr = {
4521         .attr = {
4522                 .name =         "device_id",
4523                 .mode =         S_IRUGO,
4524         },
4525         .show = ipr_show_device_id
4526 };
4527
4528 /**
4529  * ipr_show_resource_type - Show the resource type for this device.
4530  * @dev:        device struct
4531  * @attr:       device attribute structure
4532  * @buf:        buffer
4533  *
4534  * Return value:
4535  *      number of bytes printed to buffer
4536  **/
4537 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4538 {
4539         struct scsi_device *sdev = to_scsi_device(dev);
4540         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4541         struct ipr_resource_entry *res;
4542         unsigned long lock_flags = 0;
4543         ssize_t len = -ENXIO;
4544
4545         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4546         res = (struct ipr_resource_entry *)sdev->hostdata;
4547
4548         if (res)
4549                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4550
4551         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4552         return len;
4553 }
4554
4555 static struct device_attribute ipr_resource_type_attr = {
4556         .attr = {
4557                 .name =         "resource_type",
4558                 .mode =         S_IRUGO,
4559         },
4560         .show = ipr_show_resource_type
4561 };
4562
4563 /**
4564  * ipr_show_raw_mode - Show the adapter's raw mode
4565  * @dev:        class device struct
4566  * @attr:       device attribute (unused)
4567  * @buf:        buffer
4568  *
4569  * Return value:
4570  *      number of bytes printed to buffer
4571  **/
4572 static ssize_t ipr_show_raw_mode(struct device *dev,
4573                                  struct device_attribute *attr, char *buf)
4574 {
4575         struct scsi_device *sdev = to_scsi_device(dev);
4576         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4577         struct ipr_resource_entry *res;
4578         unsigned long lock_flags = 0;
4579         ssize_t len;
4580
4581         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4582         res = (struct ipr_resource_entry *)sdev->hostdata;
4583         if (res)
4584                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4585         else
4586                 len = -ENXIO;
4587         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4588         return len;
4589 }
4590
4591 /**
4592  * ipr_store_raw_mode - Change the adapter's raw mode
4593  * @dev:        class device struct
4594  * @attr:       device attribute (unused)
4595  * @buf:        buffer
4596  * @count:              buffer size
4597  *
4598  * Return value:
4599  *      number of bytes printed to buffer
4600  **/
4601 static ssize_t ipr_store_raw_mode(struct device *dev,
4602                                   struct device_attribute *attr,
4603                                   const char *buf, size_t count)
4604 {
4605         struct scsi_device *sdev = to_scsi_device(dev);
4606         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4607         struct ipr_resource_entry *res;
4608         unsigned long lock_flags = 0;
4609         ssize_t len;
4610
4611         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4612         res = (struct ipr_resource_entry *)sdev->hostdata;
4613         if (res) {
4614                 if (ipr_is_af_dasd_device(res)) {
4615                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4616                         len = strlen(buf);
4617                         if (res->sdev)
4618                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4619                                         res->raw_mode ? "enabled" : "disabled");
4620                 } else
4621                         len = -EINVAL;
4622         } else
4623                 len = -ENXIO;
4624         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4625         return len;
4626 }
4627
4628 static struct device_attribute ipr_raw_mode_attr = {
4629         .attr = {
4630                 .name =         "raw_mode",
4631                 .mode =         S_IRUGO | S_IWUSR,
4632         },
4633         .show = ipr_show_raw_mode,
4634         .store = ipr_store_raw_mode
4635 };
4636
4637 static struct attribute *ipr_dev_attrs[] = {
4638         &ipr_adapter_handle_attr.attr,
4639         &ipr_resource_path_attr.attr,
4640         &ipr_device_id_attr.attr,
4641         &ipr_resource_type_attr.attr,
4642         &ipr_raw_mode_attr.attr,
4643         NULL,
4644 };
4645
4646 ATTRIBUTE_GROUPS(ipr_dev);
4647
4648 /**
4649  * ipr_biosparam - Return the HSC mapping
4650  * @sdev:                       scsi device struct
4651  * @block_device:       block device pointer
4652  * @capacity:           capacity of the device
4653  * @parm:                       Array containing returned HSC values.
4654  *
4655  * This function generates the HSC parms that fdisk uses.
4656  * We want to make sure we return something that places partitions
4657  * on 4k boundaries for best performance with the IOA.
4658  *
4659  * Return value:
4660  *      0 on success
4661  **/
4662 static int ipr_biosparam(struct scsi_device *sdev,
4663                          struct block_device *block_device,
4664                          sector_t capacity, int *parm)
4665 {
4666         int heads, sectors;
4667         sector_t cylinders;
4668
4669         heads = 128;
4670         sectors = 32;
4671
4672         cylinders = capacity;
4673         sector_div(cylinders, (128 * 32));
4674
4675         /* return result */
4676         parm[0] = heads;
4677         parm[1] = sectors;
4678         parm[2] = cylinders;
4679
4680         return 0;
4681 }
4682
4683 /**
4684  * ipr_find_starget - Find target based on bus/target.
4685  * @starget:    scsi target struct
4686  *
4687  * Return value:
4688  *      resource entry pointer if found / NULL if not found
4689  **/
4690 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4691 {
4692         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4693         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4694         struct ipr_resource_entry *res;
4695
4696         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4697                 if ((res->bus == starget->channel) &&
4698                     (res->target == starget->id)) {
4699                         return res;
4700                 }
4701         }
4702
4703         return NULL;
4704 }
4705
4706 /**
4707  * ipr_target_destroy - Destroy a SCSI target
4708  * @starget:    scsi target struct
4709  *
4710  **/
4711 static void ipr_target_destroy(struct scsi_target *starget)
4712 {
4713         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4714         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4715
4716         if (ioa_cfg->sis64) {
4717                 if (!ipr_find_starget(starget)) {
4718                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4719                                 clear_bit(starget->id, ioa_cfg->array_ids);
4720                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4721                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4722                         else if (starget->channel == 0)
4723                                 clear_bit(starget->id, ioa_cfg->target_ids);
4724                 }
4725         }
4726 }
4727
4728 /**
4729  * ipr_find_sdev - Find device based on bus/target/lun.
4730  * @sdev:       scsi device struct
4731  *
4732  * Return value:
4733  *      resource entry pointer if found / NULL if not found
4734  **/
4735 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4736 {
4737         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4738         struct ipr_resource_entry *res;
4739
4740         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4741                 if ((res->bus == sdev->channel) &&
4742                     (res->target == sdev->id) &&
4743                     (res->lun == sdev->lun))
4744                         return res;
4745         }
4746
4747         return NULL;
4748 }
4749
4750 /**
4751  * ipr_slave_destroy - Unconfigure a SCSI device
4752  * @sdev:       scsi device struct
4753  *
4754  * Return value:
4755  *      nothing
4756  **/
4757 static void ipr_slave_destroy(struct scsi_device *sdev)
4758 {
4759         struct ipr_resource_entry *res;
4760         struct ipr_ioa_cfg *ioa_cfg;
4761         unsigned long lock_flags = 0;
4762
4763         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4764
4765         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4766         res = (struct ipr_resource_entry *) sdev->hostdata;
4767         if (res) {
4768                 sdev->hostdata = NULL;
4769                 res->sdev = NULL;
4770         }
4771         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4772 }
4773
4774 /**
4775  * ipr_slave_configure - Configure a SCSI device
4776  * @sdev:       scsi device struct
4777  *
4778  * This function configures the specified scsi device.
4779  *
4780  * Return value:
4781  *      0 on success
4782  **/
4783 static int ipr_slave_configure(struct scsi_device *sdev)
4784 {
4785         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4786         struct ipr_resource_entry *res;
4787         unsigned long lock_flags = 0;
4788         char buffer[IPR_MAX_RES_PATH_LENGTH];
4789
4790         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4791         res = sdev->hostdata;
4792         if (res) {
4793                 if (ipr_is_af_dasd_device(res))
4794                         sdev->type = TYPE_RAID;
4795                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4796                         sdev->scsi_level = 4;
4797                         sdev->no_uld_attach = 1;
4798                 }
4799                 if (ipr_is_vset_device(res)) {
4800                         sdev->scsi_level = SCSI_SPC_3;
4801                         sdev->no_report_opcodes = 1;
4802                         blk_queue_rq_timeout(sdev->request_queue,
4803                                              IPR_VSET_RW_TIMEOUT);
4804                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4805                 }
4806                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4807
4808                 if (ioa_cfg->sis64)
4809                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4810                                     ipr_format_res_path(ioa_cfg,
4811                                 res->res_path, buffer, sizeof(buffer)));
4812                 return 0;
4813         }
4814         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4815         return 0;
4816 }
4817
4818 /**
4819  * ipr_slave_alloc - Prepare for commands to a device.
4820  * @sdev:       scsi device struct
4821  *
4822  * This function saves a pointer to the resource entry
4823  * in the scsi device struct if the device exists. We
4824  * can then use this pointer in ipr_queuecommand when
4825  * handling new commands.
4826  *
4827  * Return value:
4828  *      0 on success / -ENXIO if device does not exist
4829  **/
4830 static int ipr_slave_alloc(struct scsi_device *sdev)
4831 {
4832         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4833         struct ipr_resource_entry *res;
4834         unsigned long lock_flags;
4835         int rc = -ENXIO;
4836
4837         sdev->hostdata = NULL;
4838
4839         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4840
4841         res = ipr_find_sdev(sdev);
4842         if (res) {
4843                 res->sdev = sdev;
4844                 res->add_to_ml = 0;
4845                 res->in_erp = 0;
4846                 sdev->hostdata = res;
4847                 if (!ipr_is_naca_model(res))
4848                         res->needs_sync_complete = 1;
4849                 rc = 0;
4850                 if (ipr_is_gata(res)) {
4851                         sdev_printk(KERN_ERR, sdev, "SATA devices are no longer "
4852                                 "supported by this driver. Skipping device.\n");
4853                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4854                         return -ENXIO;
4855                 }
4856         }
4857
4858         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4859
4860         return rc;
4861 }
4862
4863 /**
4864  * ipr_match_lun - Match function for specified LUN
4865  * @ipr_cmd:    ipr command struct
4866  * @device:             device to match (sdev)
4867  *
4868  * Returns:
4869  *      1 if command matches sdev / 0 if command does not match sdev
4870  **/
4871 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4872 {
4873         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4874                 return 1;
4875         return 0;
4876 }
4877
4878 /**
4879  * ipr_cmnd_is_free - Check if a command is free or not
4880  * @ipr_cmd:    ipr command struct
4881  *
4882  * Returns:
4883  *      true / false
4884  **/
4885 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
4886 {
4887         struct ipr_cmnd *loop_cmd;
4888
4889         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
4890                 if (loop_cmd == ipr_cmd)
4891                         return true;
4892         }
4893
4894         return false;
4895 }
4896
4897 /**
4898  * ipr_wait_for_ops - Wait for matching commands to complete
4899  * @ioa_cfg:    ioa config struct
4900  * @device:             device to match (sdev)
4901  * @match:              match function to use
4902  *
4903  * Returns:
4904  *      SUCCESS / FAILED
4905  **/
4906 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4907                             int (*match)(struct ipr_cmnd *, void *))
4908 {
4909         struct ipr_cmnd *ipr_cmd;
4910         int wait, i;
4911         unsigned long flags;
4912         struct ipr_hrr_queue *hrrq;
4913         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4914         DECLARE_COMPLETION_ONSTACK(comp);
4915
4916         ENTER;
4917         do {
4918                 wait = 0;
4919
4920                 for_each_hrrq(hrrq, ioa_cfg) {
4921                         spin_lock_irqsave(hrrq->lock, flags);
4922                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
4923                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
4924                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
4925                                         if (match(ipr_cmd, device)) {
4926                                                 ipr_cmd->eh_comp = &comp;
4927                                                 wait++;
4928                                         }
4929                                 }
4930                         }
4931                         spin_unlock_irqrestore(hrrq->lock, flags);
4932                 }
4933
4934                 if (wait) {
4935                         timeout = wait_for_completion_timeout(&comp, timeout);
4936
4937                         if (!timeout) {
4938                                 wait = 0;
4939
4940                                 for_each_hrrq(hrrq, ioa_cfg) {
4941                                         spin_lock_irqsave(hrrq->lock, flags);
4942                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
4943                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
4944                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
4945                                                         if (match(ipr_cmd, device)) {
4946                                                                 ipr_cmd->eh_comp = NULL;
4947                                                                 wait++;
4948                                                         }
4949                                                 }
4950                                         }
4951                                         spin_unlock_irqrestore(hrrq->lock, flags);
4952                                 }
4953
4954                                 if (wait)
4955                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4956                                 LEAVE;
4957                                 return wait ? FAILED : SUCCESS;
4958                         }
4959                 }
4960         } while (wait);
4961
4962         LEAVE;
4963         return SUCCESS;
4964 }
4965
4966 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4967 {
4968         struct ipr_ioa_cfg *ioa_cfg;
4969         unsigned long lock_flags = 0;
4970         int rc = SUCCESS;
4971
4972         ENTER;
4973         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4974         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4975
4976         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4977                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4978                 dev_err(&ioa_cfg->pdev->dev,
4979                         "Adapter being reset as a result of error recovery.\n");
4980
4981                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4982                         ioa_cfg->sdt_state = GET_DUMP;
4983         }
4984
4985         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4986         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4987         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4988
4989         /* If we got hit with a host reset while we were already resetting
4990          the adapter for some reason, and the reset failed. */
4991         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4992                 ipr_trace;
4993                 rc = FAILED;
4994         }
4995
4996         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4997         LEAVE;
4998         return rc;
4999 }
5000
5001 /**
5002  * ipr_device_reset - Reset the device
5003  * @ioa_cfg:    ioa config struct
5004  * @res:                resource entry struct
5005  *
5006  * This function issues a device reset to the affected device.
5007  * If the device is a SCSI device, a LUN reset will be sent
5008  * to the device first. If that does not work, a target reset
5009  * will be sent.
5010  *
5011  * Return value:
5012  *      0 on success / non-zero on failure
5013  **/
5014 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5015                             struct ipr_resource_entry *res)
5016 {
5017         struct ipr_cmnd *ipr_cmd;
5018         struct ipr_ioarcb *ioarcb;
5019         struct ipr_cmd_pkt *cmd_pkt;
5020         u32 ioasc;
5021
5022         ENTER;
5023         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5024         ioarcb = &ipr_cmd->ioarcb;
5025         cmd_pkt = &ioarcb->cmd_pkt;
5026
5027         if (ipr_cmd->ioa_cfg->sis64)
5028                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5029
5030         ioarcb->res_handle = res->res_handle;
5031         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5032         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5033
5034         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5035         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5036         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5037
5038         LEAVE;
5039         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5040 }
5041
5042 /**
5043  * __ipr_eh_dev_reset - Reset the device
5044  * @scsi_cmd:   scsi command struct
5045  *
5046  * This function issues a device reset to the affected device.
5047  * A LUN reset will be sent to the device first. If that does
5048  * not work, a target reset will be sent.
5049  *
5050  * Return value:
5051  *      SUCCESS / FAILED
5052  **/
5053 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5054 {
5055         struct ipr_ioa_cfg *ioa_cfg;
5056         struct ipr_resource_entry *res;
5057         int rc = 0;
5058
5059         ENTER;
5060         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5061         res = scsi_cmd->device->hostdata;
5062
5063         /*
5064          * If we are currently going through reset/reload, return failed. This will force the
5065          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5066          * reset to complete
5067          */
5068         if (ioa_cfg->in_reset_reload)
5069                 return FAILED;
5070         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5071                 return FAILED;
5072
5073         res->resetting_device = 1;
5074         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5075
5076         rc = ipr_device_reset(ioa_cfg, res);
5077         res->resetting_device = 0;
5078         res->reset_occurred = 1;
5079
5080         LEAVE;
5081         return rc ? FAILED : SUCCESS;
5082 }
5083
5084 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5085 {
5086         int rc;
5087         struct ipr_ioa_cfg *ioa_cfg;
5088         struct ipr_resource_entry *res;
5089
5090         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5091         res = cmd->device->hostdata;
5092
5093         if (!res)
5094                 return FAILED;
5095
5096         spin_lock_irq(cmd->device->host->host_lock);
5097         rc = __ipr_eh_dev_reset(cmd);
5098         spin_unlock_irq(cmd->device->host->host_lock);
5099
5100         if (rc == SUCCESS)
5101                 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5102
5103         return rc;
5104 }
5105
5106 /**
5107  * ipr_bus_reset_done - Op done function for bus reset.
5108  * @ipr_cmd:    ipr command struct
5109  *
5110  * This function is the op done function for a bus reset
5111  *
5112  * Return value:
5113  *      none
5114  **/
5115 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5116 {
5117         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5118         struct ipr_resource_entry *res;
5119
5120         ENTER;
5121         if (!ioa_cfg->sis64)
5122                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5123                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5124                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5125                                 break;
5126                         }
5127                 }
5128
5129         /*
5130          * If abort has not completed, indicate the reset has, else call the
5131          * abort's done function to wake the sleeping eh thread
5132          */
5133         if (ipr_cmd->sibling->sibling)
5134                 ipr_cmd->sibling->sibling = NULL;
5135         else
5136                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5137
5138         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5139         LEAVE;
5140 }
5141
5142 /**
5143  * ipr_abort_timeout - An abort task has timed out
5144  * @t: Timer context used to fetch ipr command struct
5145  *
5146  * This function handles when an abort task times out. If this
5147  * happens we issue a bus reset since we have resources tied
5148  * up that must be freed before returning to the midlayer.
5149  *
5150  * Return value:
5151  *      none
5152  **/
5153 static void ipr_abort_timeout(struct timer_list *t)
5154 {
5155         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5156         struct ipr_cmnd *reset_cmd;
5157         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5158         struct ipr_cmd_pkt *cmd_pkt;
5159         unsigned long lock_flags = 0;
5160
5161         ENTER;
5162         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5163         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5164                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5165                 return;
5166         }
5167
5168         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5169         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5170         ipr_cmd->sibling = reset_cmd;
5171         reset_cmd->sibling = ipr_cmd;
5172         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5173         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5174         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5175         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5176         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5177
5178         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5179         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5180         LEAVE;
5181 }
5182
5183 /**
5184  * ipr_cancel_op - Cancel specified op
5185  * @scsi_cmd:   scsi command struct
5186  *
5187  * This function cancels specified op.
5188  *
5189  * Return value:
5190  *      SUCCESS / FAILED
5191  **/
5192 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5193 {
5194         struct ipr_cmnd *ipr_cmd;
5195         struct ipr_ioa_cfg *ioa_cfg;
5196         struct ipr_resource_entry *res;
5197         struct ipr_cmd_pkt *cmd_pkt;
5198         u32 ioasc;
5199         int i, op_found = 0;
5200         struct ipr_hrr_queue *hrrq;
5201
5202         ENTER;
5203         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5204         res = scsi_cmd->device->hostdata;
5205
5206         /* If we are currently going through reset/reload, return failed.
5207          * This will force the mid-layer to call ipr_eh_host_reset,
5208          * which will then go to sleep and wait for the reset to complete
5209          */
5210         if (ioa_cfg->in_reset_reload ||
5211             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5212                 return FAILED;
5213         if (!res)
5214                 return FAILED;
5215
5216         /*
5217          * If we are aborting a timed out op, chances are that the timeout was caused
5218          * by a still not detected EEH error. In such cases, reading a register will
5219          * trigger the EEH recovery infrastructure.
5220          */
5221         readl(ioa_cfg->regs.sense_interrupt_reg);
5222
5223         if (!ipr_is_gscsi(res))
5224                 return FAILED;
5225
5226         for_each_hrrq(hrrq, ioa_cfg) {
5227                 spin_lock(&hrrq->_lock);
5228                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5229                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5230                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5231                                         op_found = 1;
5232                                         break;
5233                                 }
5234                         }
5235                 }
5236                 spin_unlock(&hrrq->_lock);
5237         }
5238
5239         if (!op_found)
5240                 return SUCCESS;
5241
5242         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5243         ipr_cmd->ioarcb.res_handle = res->res_handle;
5244         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5245         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5246         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5247         ipr_cmd->u.sdev = scsi_cmd->device;
5248
5249         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5250                     scsi_cmd->cmnd[0]);
5251         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5252         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5253
5254         /*
5255          * If the abort task timed out and we sent a bus reset, we will get
5256          * one the following responses to the abort
5257          */
5258         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5259                 ioasc = 0;
5260                 ipr_trace;
5261         }
5262
5263         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5264         if (!ipr_is_naca_model(res))
5265                 res->needs_sync_complete = 1;
5266
5267         LEAVE;
5268         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5269 }
5270
5271 /**
5272  * ipr_scan_finished - Report whether scan is done
5273  * @shost:           scsi host struct
5274  * @elapsed_time:    elapsed time
5275  *
5276  * Return value:
5277  *      0 if scan in progress / 1 if scan is complete
5278  **/
5279 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5280 {
5281         unsigned long lock_flags;
5282         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5283         int rc = 0;
5284
5285         spin_lock_irqsave(shost->host_lock, lock_flags);
5286         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5287                 rc = 1;
5288         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5289                 rc = 1;
5290         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5291         return rc;
5292 }
5293
5294 /**
5295  * ipr_eh_abort - Reset the host adapter
5296  * @scsi_cmd:   scsi command struct
5297  *
5298  * Return value:
5299  *      SUCCESS / FAILED
5300  **/
5301 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5302 {
5303         unsigned long flags;
5304         int rc;
5305         struct ipr_ioa_cfg *ioa_cfg;
5306
5307         ENTER;
5308
5309         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5310
5311         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5312         rc = ipr_cancel_op(scsi_cmd);
5313         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5314
5315         if (rc == SUCCESS)
5316                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5317         LEAVE;
5318         return rc;
5319 }
5320
5321 /**
5322  * ipr_handle_other_interrupt - Handle "other" interrupts
5323  * @ioa_cfg:    ioa config struct
5324  * @int_reg:    interrupt register
5325  *
5326  * Return value:
5327  *      IRQ_NONE / IRQ_HANDLED
5328  **/
5329 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5330                                               u32 int_reg)
5331 {
5332         irqreturn_t rc = IRQ_HANDLED;
5333         u32 int_mask_reg;
5334
5335         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5336         int_reg &= ~int_mask_reg;
5337
5338         /* If an interrupt on the adapter did not occur, ignore it.
5339          * Or in the case of SIS 64, check for a stage change interrupt.
5340          */
5341         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5342                 if (ioa_cfg->sis64) {
5343                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5344                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5345                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5346
5347                                 /* clear stage change */
5348                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5349                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5350                                 list_del(&ioa_cfg->reset_cmd->queue);
5351                                 del_timer(&ioa_cfg->reset_cmd->timer);
5352                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5353                                 return IRQ_HANDLED;
5354                         }
5355                 }
5356
5357                 return IRQ_NONE;
5358         }
5359
5360         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5361                 /* Mask the interrupt */
5362                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5363                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5364
5365                 list_del(&ioa_cfg->reset_cmd->queue);
5366                 del_timer(&ioa_cfg->reset_cmd->timer);
5367                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5368         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5369                 if (ioa_cfg->clear_isr) {
5370                         if (ipr_debug && printk_ratelimit())
5371                                 dev_err(&ioa_cfg->pdev->dev,
5372                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5373                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5374                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5375                         return IRQ_NONE;
5376                 }
5377         } else {
5378                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5379                         ioa_cfg->ioa_unit_checked = 1;
5380                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5381                         dev_err(&ioa_cfg->pdev->dev,
5382                                 "No Host RRQ. 0x%08X\n", int_reg);
5383                 else
5384                         dev_err(&ioa_cfg->pdev->dev,
5385                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5386
5387                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5388                         ioa_cfg->sdt_state = GET_DUMP;
5389
5390                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5391                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5392         }
5393
5394         return rc;
5395 }
5396
5397 /**
5398  * ipr_isr_eh - Interrupt service routine error handler
5399  * @ioa_cfg:    ioa config struct
5400  * @msg:        message to log
5401  * @number:     various meanings depending on the caller/message
5402  *
5403  * Return value:
5404  *      none
5405  **/
5406 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5407 {
5408         ioa_cfg->errors_logged++;
5409         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5410
5411         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5412                 ioa_cfg->sdt_state = GET_DUMP;
5413
5414         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5415 }
5416
5417 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5418                                                 struct list_head *doneq)
5419 {
5420         u32 ioasc;
5421         u16 cmd_index;
5422         struct ipr_cmnd *ipr_cmd;
5423         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5424         int num_hrrq = 0;
5425
5426         /* If interrupts are disabled, ignore the interrupt */
5427         if (!hrr_queue->allow_interrupts)
5428                 return 0;
5429
5430         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5431                hrr_queue->toggle_bit) {
5432
5433                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5434                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5435                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5436
5437                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5438                              cmd_index < hrr_queue->min_cmd_id)) {
5439                         ipr_isr_eh(ioa_cfg,
5440                                 "Invalid response handle from IOA: ",
5441                                 cmd_index);
5442                         break;
5443                 }
5444
5445                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5446                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5447
5448                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5449
5450                 list_move_tail(&ipr_cmd->queue, doneq);
5451
5452                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5453                         hrr_queue->hrrq_curr++;
5454                 } else {
5455                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5456                         hrr_queue->toggle_bit ^= 1u;
5457                 }
5458                 num_hrrq++;
5459                 if (budget > 0 && num_hrrq >= budget)
5460                         break;
5461         }
5462
5463         return num_hrrq;
5464 }
5465
5466 static int ipr_iopoll(struct irq_poll *iop, int budget)
5467 {
5468         struct ipr_hrr_queue *hrrq;
5469         struct ipr_cmnd *ipr_cmd, *temp;
5470         unsigned long hrrq_flags;
5471         int completed_ops;
5472         LIST_HEAD(doneq);
5473
5474         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5475
5476         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5477         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5478
5479         if (completed_ops < budget)
5480                 irq_poll_complete(iop);
5481         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5482
5483         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5484                 list_del(&ipr_cmd->queue);
5485                 del_timer(&ipr_cmd->timer);
5486                 ipr_cmd->fast_done(ipr_cmd);
5487         }
5488
5489         return completed_ops;
5490 }
5491
5492 /**
5493  * ipr_isr - Interrupt service routine
5494  * @irq:        irq number
5495  * @devp:       pointer to ioa config struct
5496  *
5497  * Return value:
5498  *      IRQ_NONE / IRQ_HANDLED
5499  **/
5500 static irqreturn_t ipr_isr(int irq, void *devp)
5501 {
5502         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5503         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5504         unsigned long hrrq_flags = 0;
5505         u32 int_reg = 0;
5506         int num_hrrq = 0;
5507         int irq_none = 0;
5508         struct ipr_cmnd *ipr_cmd, *temp;
5509         irqreturn_t rc = IRQ_NONE;
5510         LIST_HEAD(doneq);
5511
5512         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5513         /* If interrupts are disabled, ignore the interrupt */
5514         if (!hrrq->allow_interrupts) {
5515                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5516                 return IRQ_NONE;
5517         }
5518
5519         while (1) {
5520                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5521                         rc =  IRQ_HANDLED;
5522
5523                         if (!ioa_cfg->clear_isr)
5524                                 break;
5525
5526                         /* Clear the PCI interrupt */
5527                         num_hrrq = 0;
5528                         do {
5529                                 writel(IPR_PCII_HRRQ_UPDATED,
5530                                      ioa_cfg->regs.clr_interrupt_reg32);
5531                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5532                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5533                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5534
5535                 } else if (rc == IRQ_NONE && irq_none == 0) {
5536                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5537                         irq_none++;
5538                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5539                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5540                         ipr_isr_eh(ioa_cfg,
5541                                 "Error clearing HRRQ: ", num_hrrq);
5542                         rc = IRQ_HANDLED;
5543                         break;
5544                 } else
5545                         break;
5546         }
5547
5548         if (unlikely(rc == IRQ_NONE))
5549                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5550
5551         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5552         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5553                 list_del(&ipr_cmd->queue);
5554                 del_timer(&ipr_cmd->timer);
5555                 ipr_cmd->fast_done(ipr_cmd);
5556         }
5557         return rc;
5558 }
5559
5560 /**
5561  * ipr_isr_mhrrq - Interrupt service routine
5562  * @irq:        irq number
5563  * @devp:       pointer to ioa config struct
5564  *
5565  * Return value:
5566  *      IRQ_NONE / IRQ_HANDLED
5567  **/
5568 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5569 {
5570         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5571         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5572         unsigned long hrrq_flags = 0;
5573         struct ipr_cmnd *ipr_cmd, *temp;
5574         irqreturn_t rc = IRQ_NONE;
5575         LIST_HEAD(doneq);
5576
5577         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5578
5579         /* If interrupts are disabled, ignore the interrupt */
5580         if (!hrrq->allow_interrupts) {
5581                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5582                 return IRQ_NONE;
5583         }
5584
5585         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5586                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5587                        hrrq->toggle_bit) {
5588                         irq_poll_sched(&hrrq->iopoll);
5589                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5590                         return IRQ_HANDLED;
5591                 }
5592         } else {
5593                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5594                         hrrq->toggle_bit)
5595
5596                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5597                                 rc =  IRQ_HANDLED;
5598         }
5599
5600         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5601
5602         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5603                 list_del(&ipr_cmd->queue);
5604                 del_timer(&ipr_cmd->timer);
5605                 ipr_cmd->fast_done(ipr_cmd);
5606         }
5607         return rc;
5608 }
5609
5610 /**
5611  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5612  * @ioa_cfg:    ioa config struct
5613  * @ipr_cmd:    ipr command struct
5614  *
5615  * Return value:
5616  *      0 on success / -1 on failure
5617  **/
5618 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5619                              struct ipr_cmnd *ipr_cmd)
5620 {
5621         int i, nseg;
5622         struct scatterlist *sg;
5623         u32 length;
5624         u32 ioadl_flags = 0;
5625         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5626         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5627         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5628
5629         length = scsi_bufflen(scsi_cmd);
5630         if (!length)
5631                 return 0;
5632
5633         nseg = scsi_dma_map(scsi_cmd);
5634         if (nseg < 0) {
5635                 if (printk_ratelimit())
5636                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5637                 return -1;
5638         }
5639
5640         ipr_cmd->dma_use_sg = nseg;
5641
5642         ioarcb->data_transfer_length = cpu_to_be32(length);
5643         ioarcb->ioadl_len =
5644                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5645
5646         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5647                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5648                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5649         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5650                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5651
5652         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5653                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5654                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5655                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5656         }
5657
5658         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5659         return 0;
5660 }
5661
5662 /**
5663  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5664  * @ioa_cfg:    ioa config struct
5665  * @ipr_cmd:    ipr command struct
5666  *
5667  * Return value:
5668  *      0 on success / -1 on failure
5669  **/
5670 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5671                            struct ipr_cmnd *ipr_cmd)
5672 {
5673         int i, nseg;
5674         struct scatterlist *sg;
5675         u32 length;
5676         u32 ioadl_flags = 0;
5677         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5678         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5679         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5680
5681         length = scsi_bufflen(scsi_cmd);
5682         if (!length)
5683                 return 0;
5684
5685         nseg = scsi_dma_map(scsi_cmd);
5686         if (nseg < 0) {
5687                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5688                 return -1;
5689         }
5690
5691         ipr_cmd->dma_use_sg = nseg;
5692
5693         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5694                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5695                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5696                 ioarcb->data_transfer_length = cpu_to_be32(length);
5697                 ioarcb->ioadl_len =
5698                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5699         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5700                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5701                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5702                 ioarcb->read_ioadl_len =
5703                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5704         }
5705
5706         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5707                 ioadl = ioarcb->u.add_data.u.ioadl;
5708                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5709                                     offsetof(struct ipr_ioarcb, u.add_data));
5710                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5711         }
5712
5713         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5714                 ioadl[i].flags_and_data_len =
5715                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5716                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5717         }
5718
5719         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5720         return 0;
5721 }
5722
5723 /**
5724  * __ipr_erp_done - Process completion of ERP for a device
5725  * @ipr_cmd:            ipr command struct
5726  *
5727  * This function copies the sense buffer into the scsi_cmd
5728  * struct and pushes the scsi_done function.
5729  *
5730  * Return value:
5731  *      nothing
5732  **/
5733 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5734 {
5735         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5736         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5737         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5738
5739         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5740                 scsi_cmd->result |= (DID_ERROR << 16);
5741                 scmd_printk(KERN_ERR, scsi_cmd,
5742                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5743         } else {
5744                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5745                        SCSI_SENSE_BUFFERSIZE);
5746         }
5747
5748         if (res) {
5749                 if (!ipr_is_naca_model(res))
5750                         res->needs_sync_complete = 1;
5751                 res->in_erp = 0;
5752         }
5753         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5754         scsi_done(scsi_cmd);
5755         if (ipr_cmd->eh_comp)
5756                 complete(ipr_cmd->eh_comp);
5757         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5758 }
5759
5760 /**
5761  * ipr_erp_done - Process completion of ERP for a device
5762  * @ipr_cmd:            ipr command struct
5763  *
5764  * This function copies the sense buffer into the scsi_cmd
5765  * struct and pushes the scsi_done function.
5766  *
5767  * Return value:
5768  *      nothing
5769  **/
5770 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5771 {
5772         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
5773         unsigned long hrrq_flags;
5774
5775         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
5776         __ipr_erp_done(ipr_cmd);
5777         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
5778 }
5779
5780 /**
5781  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5782  * @ipr_cmd:    ipr command struct
5783  *
5784  * Return value:
5785  *      none
5786  **/
5787 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5788 {
5789         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5790         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5791         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5792
5793         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5794         ioarcb->data_transfer_length = 0;
5795         ioarcb->read_data_transfer_length = 0;
5796         ioarcb->ioadl_len = 0;
5797         ioarcb->read_ioadl_len = 0;
5798         ioasa->hdr.ioasc = 0;
5799         ioasa->hdr.residual_data_len = 0;
5800
5801         if (ipr_cmd->ioa_cfg->sis64)
5802                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5803                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5804         else {
5805                 ioarcb->write_ioadl_addr =
5806                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5807                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5808         }
5809 }
5810
5811 /**
5812  * __ipr_erp_request_sense - Send request sense to a device
5813  * @ipr_cmd:    ipr command struct
5814  *
5815  * This function sends a request sense to a device as a result
5816  * of a check condition.
5817  *
5818  * Return value:
5819  *      nothing
5820  **/
5821 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5822 {
5823         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5824         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5825
5826         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5827                 __ipr_erp_done(ipr_cmd);
5828                 return;
5829         }
5830
5831         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5832
5833         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5834         cmd_pkt->cdb[0] = REQUEST_SENSE;
5835         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5836         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5837         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5838         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5839
5840         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5841                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5842
5843         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5844                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5845 }
5846
5847 /**
5848  * ipr_erp_request_sense - Send request sense to a device
5849  * @ipr_cmd:    ipr command struct
5850  *
5851  * This function sends a request sense to a device as a result
5852  * of a check condition.
5853  *
5854  * Return value:
5855  *      nothing
5856  **/
5857 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5858 {
5859         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
5860         unsigned long hrrq_flags;
5861
5862         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
5863         __ipr_erp_request_sense(ipr_cmd);
5864         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
5865 }
5866
5867 /**
5868  * ipr_erp_cancel_all - Send cancel all to a device
5869  * @ipr_cmd:    ipr command struct
5870  *
5871  * This function sends a cancel all to a device to clear the
5872  * queue. If we are running TCQ on the device, QERR is set to 1,
5873  * which means all outstanding ops have been dropped on the floor.
5874  * Cancel all will return them to us.
5875  *
5876  * Return value:
5877  *      nothing
5878  **/
5879 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5880 {
5881         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5882         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5883         struct ipr_cmd_pkt *cmd_pkt;
5884
5885         res->in_erp = 1;
5886
5887         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5888
5889         if (!scsi_cmd->device->simple_tags) {
5890                 __ipr_erp_request_sense(ipr_cmd);
5891                 return;
5892         }
5893
5894         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5895         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5896         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5897
5898         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5899                    IPR_CANCEL_ALL_TIMEOUT);
5900 }
5901
5902 /**
5903  * ipr_dump_ioasa - Dump contents of IOASA
5904  * @ioa_cfg:    ioa config struct
5905  * @ipr_cmd:    ipr command struct
5906  * @res:                resource entry struct
5907  *
5908  * This function is invoked by the interrupt handler when ops
5909  * fail. It will log the IOASA if appropriate. Only called
5910  * for GPDD ops.
5911  *
5912  * Return value:
5913  *      none
5914  **/
5915 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5916                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5917 {
5918         int i;
5919         u16 data_len;
5920         u32 ioasc, fd_ioasc;
5921         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5922         __be32 *ioasa_data = (__be32 *)ioasa;
5923         int error_index;
5924
5925         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5926         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5927
5928         if (0 == ioasc)
5929                 return;
5930
5931         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5932                 return;
5933
5934         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5935                 error_index = ipr_get_error(fd_ioasc);
5936         else
5937                 error_index = ipr_get_error(ioasc);
5938
5939         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5940                 /* Don't log an error if the IOA already logged one */
5941                 if (ioasa->hdr.ilid != 0)
5942                         return;
5943
5944                 if (!ipr_is_gscsi(res))
5945                         return;
5946
5947                 if (ipr_error_table[error_index].log_ioasa == 0)
5948                         return;
5949         }
5950
5951         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5952
5953         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5954         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5955                 data_len = sizeof(struct ipr_ioasa64);
5956         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5957                 data_len = sizeof(struct ipr_ioasa);
5958
5959         ipr_err("IOASA Dump:\n");
5960
5961         for (i = 0; i < data_len / 4; i += 4) {
5962                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5963                         be32_to_cpu(ioasa_data[i]),
5964                         be32_to_cpu(ioasa_data[i+1]),
5965                         be32_to_cpu(ioasa_data[i+2]),
5966                         be32_to_cpu(ioasa_data[i+3]));
5967         }
5968 }
5969
5970 /**
5971  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5972  * @ipr_cmd:    ipr command struct
5973  *
5974  * Return value:
5975  *      none
5976  **/
5977 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5978 {
5979         u32 failing_lba;
5980         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5981         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5982         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5983         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5984
5985         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5986
5987         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5988                 return;
5989
5990         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5991
5992         if (ipr_is_vset_device(res) &&
5993             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5994             ioasa->u.vset.failing_lba_hi != 0) {
5995                 sense_buf[0] = 0x72;
5996                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5997                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5998                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5999
6000                 sense_buf[7] = 12;
6001                 sense_buf[8] = 0;
6002                 sense_buf[9] = 0x0A;
6003                 sense_buf[10] = 0x80;
6004
6005                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6006
6007                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6008                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6009                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6010                 sense_buf[15] = failing_lba & 0x000000ff;
6011
6012                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6013
6014                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6015                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6016                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6017                 sense_buf[19] = failing_lba & 0x000000ff;
6018         } else {
6019                 sense_buf[0] = 0x70;
6020                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6021                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6022                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6023
6024                 /* Illegal request */
6025                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6026                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6027                         sense_buf[7] = 10;      /* additional length */
6028
6029                         /* IOARCB was in error */
6030                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6031                                 sense_buf[15] = 0xC0;
6032                         else    /* Parameter data was invalid */
6033                                 sense_buf[15] = 0x80;
6034
6035                         sense_buf[16] =
6036                             ((IPR_FIELD_POINTER_MASK &
6037                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6038                         sense_buf[17] =
6039                             (IPR_FIELD_POINTER_MASK &
6040                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6041                 } else {
6042                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6043                                 if (ipr_is_vset_device(res))
6044                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6045                                 else
6046                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6047
6048                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6049                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6050                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6051                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6052                                 sense_buf[6] = failing_lba & 0x000000ff;
6053                         }
6054
6055                         sense_buf[7] = 6;       /* additional length */
6056                 }
6057         }
6058 }
6059
6060 /**
6061  * ipr_get_autosense - Copy autosense data to sense buffer
6062  * @ipr_cmd:    ipr command struct
6063  *
6064  * This function copies the autosense buffer to the buffer
6065  * in the scsi_cmd, if there is autosense available.
6066  *
6067  * Return value:
6068  *      1 if autosense was available / 0 if not
6069  **/
6070 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6071 {
6072         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6073         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6074
6075         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6076                 return 0;
6077
6078         if (ipr_cmd->ioa_cfg->sis64)
6079                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6080                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6081                            SCSI_SENSE_BUFFERSIZE));
6082         else
6083                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6084                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6085                            SCSI_SENSE_BUFFERSIZE));
6086         return 1;
6087 }
6088
6089 /**
6090  * ipr_erp_start - Process an error response for a SCSI op
6091  * @ioa_cfg:    ioa config struct
6092  * @ipr_cmd:    ipr command struct
6093  *
6094  * This function determines whether or not to initiate ERP
6095  * on the affected device.
6096  *
6097  * Return value:
6098  *      nothing
6099  **/
6100 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6101                               struct ipr_cmnd *ipr_cmd)
6102 {
6103         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6104         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6105         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6106         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6107
6108         if (!res) {
6109                 __ipr_scsi_eh_done(ipr_cmd);
6110                 return;
6111         }
6112
6113         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6114                 ipr_gen_sense(ipr_cmd);
6115
6116         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6117
6118         switch (masked_ioasc) {
6119         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6120                 if (ipr_is_naca_model(res))
6121                         scsi_cmd->result |= (DID_ABORT << 16);
6122                 else
6123                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6124                 break;
6125         case IPR_IOASC_IR_RESOURCE_HANDLE:
6126         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6127                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6128                 break;
6129         case IPR_IOASC_HW_SEL_TIMEOUT:
6130                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6131                 if (!ipr_is_naca_model(res))
6132                         res->needs_sync_complete = 1;
6133                 break;
6134         case IPR_IOASC_SYNC_REQUIRED:
6135                 if (!res->in_erp)
6136                         res->needs_sync_complete = 1;
6137                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6138                 break;
6139         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6140         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6141                 /*
6142                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6143                  * so SCSI mid-layer and upper layers handle it accordingly.
6144                  */
6145                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6146                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6147                 break;
6148         case IPR_IOASC_BUS_WAS_RESET:
6149         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6150                 /*
6151                  * Report the bus reset and ask for a retry. The device
6152                  * will give CC/UA the next command.
6153                  */
6154                 if (!res->resetting_device)
6155                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6156                 scsi_cmd->result |= (DID_ERROR << 16);
6157                 if (!ipr_is_naca_model(res))
6158                         res->needs_sync_complete = 1;
6159                 break;
6160         case IPR_IOASC_HW_DEV_BUS_STATUS:
6161                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6162                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6163                         if (!ipr_get_autosense(ipr_cmd)) {
6164                                 if (!ipr_is_naca_model(res)) {
6165                                         ipr_erp_cancel_all(ipr_cmd);
6166                                         return;
6167                                 }
6168                         }
6169                 }
6170                 if (!ipr_is_naca_model(res))
6171                         res->needs_sync_complete = 1;
6172                 break;
6173         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6174                 break;
6175         case IPR_IOASC_IR_NON_OPTIMIZED:
6176                 if (res->raw_mode) {
6177                         res->raw_mode = 0;
6178                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6179                 } else
6180                         scsi_cmd->result |= (DID_ERROR << 16);
6181                 break;
6182         default:
6183                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6184                         scsi_cmd->result |= (DID_ERROR << 16);
6185                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6186                         res->needs_sync_complete = 1;
6187                 break;
6188         }
6189
6190         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6191         scsi_done(scsi_cmd);
6192         if (ipr_cmd->eh_comp)
6193                 complete(ipr_cmd->eh_comp);
6194         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6195 }
6196
6197 /**
6198  * ipr_scsi_done - mid-layer done function
6199  * @ipr_cmd:    ipr command struct
6200  *
6201  * This function is invoked by the interrupt handler for
6202  * ops generated by the SCSI mid-layer
6203  *
6204  * Return value:
6205  *      none
6206  **/
6207 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6208 {
6209         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6210         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6211         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6212         unsigned long lock_flags;
6213
6214         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6215
6216         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6217                 scsi_dma_unmap(scsi_cmd);
6218
6219                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6220                 scsi_done(scsi_cmd);
6221                 if (ipr_cmd->eh_comp)
6222                         complete(ipr_cmd->eh_comp);
6223                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6224                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6225         } else {
6226                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6227                 spin_lock(&ipr_cmd->hrrq->_lock);
6228                 ipr_erp_start(ioa_cfg, ipr_cmd);
6229                 spin_unlock(&ipr_cmd->hrrq->_lock);
6230                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6231         }
6232 }
6233
6234 /**
6235  * ipr_queuecommand - Queue a mid-layer request
6236  * @shost:              scsi host struct
6237  * @scsi_cmd:   scsi command struct
6238  *
6239  * This function queues a request generated by the mid-layer.
6240  *
6241  * Return value:
6242  *      0 on success
6243  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6244  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6245  **/
6246 static int ipr_queuecommand(struct Scsi_Host *shost,
6247                             struct scsi_cmnd *scsi_cmd)
6248 {
6249         struct ipr_ioa_cfg *ioa_cfg;
6250         struct ipr_resource_entry *res;
6251         struct ipr_ioarcb *ioarcb;
6252         struct ipr_cmnd *ipr_cmd;
6253         unsigned long hrrq_flags;
6254         int rc;
6255         struct ipr_hrr_queue *hrrq;
6256         int hrrq_id;
6257
6258         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6259
6260         scsi_cmd->result = (DID_OK << 16);
6261         res = scsi_cmd->device->hostdata;
6262
6263         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6264         hrrq = &ioa_cfg->hrrq[hrrq_id];
6265
6266         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6267         /*
6268          * We are currently blocking all devices due to a host reset
6269          * We have told the host to stop giving us new requests, but
6270          * ERP ops don't count. FIXME
6271          */
6272         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6273                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6274                 return SCSI_MLQUEUE_HOST_BUSY;
6275         }
6276
6277         /*
6278          * FIXME - Create scsi_set_host_offline interface
6279          *  and the ioa_is_dead check can be removed
6280          */
6281         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6282                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6283                 goto err_nodev;
6284         }
6285
6286         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6287         if (ipr_cmd == NULL) {
6288                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6289                 return SCSI_MLQUEUE_HOST_BUSY;
6290         }
6291         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6292
6293         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6294         ioarcb = &ipr_cmd->ioarcb;
6295
6296         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6297         ipr_cmd->scsi_cmd = scsi_cmd;
6298         ipr_cmd->done = ipr_scsi_eh_done;
6299
6300         if (ipr_is_gscsi(res)) {
6301                 if (scsi_cmd->underflow == 0)
6302                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6303
6304                 if (res->reset_occurred) {
6305                         res->reset_occurred = 0;
6306                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6307                 }
6308         }
6309
6310         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6311                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6312
6313                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6314                 if (scsi_cmd->flags & SCMD_TAGGED)
6315                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6316                 else
6317                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6318         }
6319
6320         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6321             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6322                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6323         }
6324         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6325                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6326
6327                 if (scsi_cmd->underflow == 0)
6328                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6329         }
6330
6331         if (ioa_cfg->sis64)
6332                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6333         else
6334                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6335
6336         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6337         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6338                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6339                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6340                 if (!rc)
6341                         scsi_dma_unmap(scsi_cmd);
6342                 return SCSI_MLQUEUE_HOST_BUSY;
6343         }
6344
6345         if (unlikely(hrrq->ioa_is_dead)) {
6346                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6347                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6348                 scsi_dma_unmap(scsi_cmd);
6349                 goto err_nodev;
6350         }
6351
6352         ioarcb->res_handle = res->res_handle;
6353         if (res->needs_sync_complete) {
6354                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6355                 res->needs_sync_complete = 0;
6356         }
6357         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6358         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6359         ipr_send_command(ipr_cmd);
6360         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6361         return 0;
6362
6363 err_nodev:
6364         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6365         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6366         scsi_cmd->result = (DID_NO_CONNECT << 16);
6367         scsi_done(scsi_cmd);
6368         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6369         return 0;
6370 }
6371
6372 /**
6373  * ipr_ioa_info - Get information about the card/driver
6374  * @host:       scsi host struct
6375  *
6376  * Return value:
6377  *      pointer to buffer with description string
6378  **/
6379 static const char *ipr_ioa_info(struct Scsi_Host *host)
6380 {
6381         static char buffer[512];
6382         struct ipr_ioa_cfg *ioa_cfg;
6383         unsigned long lock_flags = 0;
6384
6385         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6386
6387         spin_lock_irqsave(host->host_lock, lock_flags);
6388         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6389         spin_unlock_irqrestore(host->host_lock, lock_flags);
6390
6391         return buffer;
6392 }
6393
6394 static const struct scsi_host_template driver_template = {
6395         .module = THIS_MODULE,
6396         .name = "IPR",
6397         .info = ipr_ioa_info,
6398         .queuecommand = ipr_queuecommand,
6399         .eh_abort_handler = ipr_eh_abort,
6400         .eh_device_reset_handler = ipr_eh_dev_reset,
6401         .eh_host_reset_handler = ipr_eh_host_reset,
6402         .slave_alloc = ipr_slave_alloc,
6403         .slave_configure = ipr_slave_configure,
6404         .slave_destroy = ipr_slave_destroy,
6405         .scan_finished = ipr_scan_finished,
6406         .target_destroy = ipr_target_destroy,
6407         .change_queue_depth = ipr_change_queue_depth,
6408         .bios_param = ipr_biosparam,
6409         .can_queue = IPR_MAX_COMMANDS,
6410         .this_id = -1,
6411         .sg_tablesize = IPR_MAX_SGLIST,
6412         .max_sectors = IPR_IOA_MAX_SECTORS,
6413         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6414         .shost_groups = ipr_ioa_groups,
6415         .sdev_groups = ipr_dev_groups,
6416         .proc_name = IPR_NAME,
6417 };
6418
6419 #ifdef CONFIG_PPC_PSERIES
6420 static const u16 ipr_blocked_processors[] = {
6421         PVR_NORTHSTAR,
6422         PVR_PULSAR,
6423         PVR_POWER4,
6424         PVR_ICESTAR,
6425         PVR_SSTAR,
6426         PVR_POWER4p,
6427         PVR_630,
6428         PVR_630p
6429 };
6430
6431 /**
6432  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6433  * @ioa_cfg:    ioa cfg struct
6434  *
6435  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6436  * certain pSeries hardware. This function determines if the given
6437  * adapter is in one of these confgurations or not.
6438  *
6439  * Return value:
6440  *      1 if adapter is not supported / 0 if adapter is supported
6441  **/
6442 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6443 {
6444         int i;
6445
6446         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6447                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6448                         if (pvr_version_is(ipr_blocked_processors[i]))
6449                                 return 1;
6450                 }
6451         }
6452         return 0;
6453 }
6454 #else
6455 #define ipr_invalid_adapter(ioa_cfg) 0
6456 #endif
6457
6458 /**
6459  * ipr_ioa_bringdown_done - IOA bring down completion.
6460  * @ipr_cmd:    ipr command struct
6461  *
6462  * This function processes the completion of an adapter bring down.
6463  * It wakes any reset sleepers.
6464  *
6465  * Return value:
6466  *      IPR_RC_JOB_RETURN
6467  **/
6468 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6469 {
6470         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6471         int i;
6472
6473         ENTER;
6474         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6475                 ipr_trace;
6476                 ioa_cfg->scsi_unblock = 1;
6477                 schedule_work(&ioa_cfg->work_q);
6478         }
6479
6480         ioa_cfg->in_reset_reload = 0;
6481         ioa_cfg->reset_retries = 0;
6482         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6483                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6484                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6485                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6486         }
6487         wmb();
6488
6489         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6490         wake_up_all(&ioa_cfg->reset_wait_q);
6491         LEAVE;
6492
6493         return IPR_RC_JOB_RETURN;
6494 }
6495
6496 /**
6497  * ipr_ioa_reset_done - IOA reset completion.
6498  * @ipr_cmd:    ipr command struct
6499  *
6500  * This function processes the completion of an adapter reset.
6501  * It schedules any necessary mid-layer add/removes and
6502  * wakes any reset sleepers.
6503  *
6504  * Return value:
6505  *      IPR_RC_JOB_RETURN
6506  **/
6507 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6508 {
6509         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6510         struct ipr_resource_entry *res;
6511         int j;
6512
6513         ENTER;
6514         ioa_cfg->in_reset_reload = 0;
6515         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6516                 spin_lock(&ioa_cfg->hrrq[j]._lock);
6517                 ioa_cfg->hrrq[j].allow_cmds = 1;
6518                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6519         }
6520         wmb();
6521         ioa_cfg->reset_cmd = NULL;
6522         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6523
6524         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6525                 if (res->add_to_ml || res->del_from_ml) {
6526                         ipr_trace;
6527                         break;
6528                 }
6529         }
6530         schedule_work(&ioa_cfg->work_q);
6531
6532         for (j = 0; j < IPR_NUM_HCAMS; j++) {
6533                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
6534                 if (j < IPR_NUM_LOG_HCAMS)
6535                         ipr_send_hcam(ioa_cfg,
6536                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
6537                                 ioa_cfg->hostrcb[j]);
6538                 else
6539                         ipr_send_hcam(ioa_cfg,
6540                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
6541                                 ioa_cfg->hostrcb[j]);
6542         }
6543
6544         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6545         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6546
6547         ioa_cfg->reset_retries = 0;
6548         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6549         wake_up_all(&ioa_cfg->reset_wait_q);
6550
6551         ioa_cfg->scsi_unblock = 1;
6552         schedule_work(&ioa_cfg->work_q);
6553         LEAVE;
6554         return IPR_RC_JOB_RETURN;
6555 }
6556
6557 /**
6558  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6559  * @supported_dev:      supported device struct
6560  * @vpids:                      vendor product id struct
6561  *
6562  * Return value:
6563  *      none
6564  **/
6565 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6566                                  struct ipr_std_inq_vpids *vpids)
6567 {
6568         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6569         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6570         supported_dev->num_records = 1;
6571         supported_dev->data_length =
6572                 cpu_to_be16(sizeof(struct ipr_supported_device));
6573         supported_dev->reserved = 0;
6574 }
6575
6576 /**
6577  * ipr_set_supported_devs - Send Set Supported Devices for a device
6578  * @ipr_cmd:    ipr command struct
6579  *
6580  * This function sends a Set Supported Devices to the adapter
6581  *
6582  * Return value:
6583  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6584  **/
6585 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6586 {
6587         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6588         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6589         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6590         struct ipr_resource_entry *res = ipr_cmd->u.res;
6591
6592         ipr_cmd->job_step = ipr_ioa_reset_done;
6593
6594         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6595                 if (!ipr_is_scsi_disk(res))
6596                         continue;
6597
6598                 ipr_cmd->u.res = res;
6599                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6600
6601                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6602                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6603                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6604
6605                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6606                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6607                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6608                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6609
6610                 ipr_init_ioadl(ipr_cmd,
6611                                ioa_cfg->vpd_cbs_dma +
6612                                  offsetof(struct ipr_misc_cbs, supp_dev),
6613                                sizeof(struct ipr_supported_device),
6614                                IPR_IOADL_FLAGS_WRITE_LAST);
6615
6616                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6617                            IPR_SET_SUP_DEVICE_TIMEOUT);
6618
6619                 if (!ioa_cfg->sis64)
6620                         ipr_cmd->job_step = ipr_set_supported_devs;
6621                 LEAVE;
6622                 return IPR_RC_JOB_RETURN;
6623         }
6624
6625         LEAVE;
6626         return IPR_RC_JOB_CONTINUE;
6627 }
6628
6629 /**
6630  * ipr_get_mode_page - Locate specified mode page
6631  * @mode_pages: mode page buffer
6632  * @page_code:  page code to find
6633  * @len:                minimum required length for mode page
6634  *
6635  * Return value:
6636  *      pointer to mode page / NULL on failure
6637  **/
6638 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6639                                u32 page_code, u32 len)
6640 {
6641         struct ipr_mode_page_hdr *mode_hdr;
6642         u32 page_length;
6643         u32 length;
6644
6645         if (!mode_pages || (mode_pages->hdr.length == 0))
6646                 return NULL;
6647
6648         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6649         mode_hdr = (struct ipr_mode_page_hdr *)
6650                 (mode_pages->data + mode_pages->hdr.block_desc_len);
6651
6652         while (length) {
6653                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6654                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6655                                 return mode_hdr;
6656                         break;
6657                 } else {
6658                         page_length = (sizeof(struct ipr_mode_page_hdr) +
6659                                        mode_hdr->page_length);
6660                         length -= page_length;
6661                         mode_hdr = (struct ipr_mode_page_hdr *)
6662                                 ((unsigned long)mode_hdr + page_length);
6663                 }
6664         }
6665         return NULL;
6666 }
6667
6668 /**
6669  * ipr_check_term_power - Check for term power errors
6670  * @ioa_cfg:    ioa config struct
6671  * @mode_pages: IOAFP mode pages buffer
6672  *
6673  * Check the IOAFP's mode page 28 for term power errors
6674  *
6675  * Return value:
6676  *      nothing
6677  **/
6678 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6679                                  struct ipr_mode_pages *mode_pages)
6680 {
6681         int i;
6682         int entry_length;
6683         struct ipr_dev_bus_entry *bus;
6684         struct ipr_mode_page28 *mode_page;
6685
6686         mode_page = ipr_get_mode_page(mode_pages, 0x28,
6687                                       sizeof(struct ipr_mode_page28));
6688
6689         entry_length = mode_page->entry_length;
6690
6691         bus = mode_page->bus;
6692
6693         for (i = 0; i < mode_page->num_entries; i++) {
6694                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6695                         dev_err(&ioa_cfg->pdev->dev,
6696                                 "Term power is absent on scsi bus %d\n",
6697                                 bus->res_addr.bus);
6698                 }
6699
6700                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6701         }
6702 }
6703
6704 /**
6705  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6706  * @ioa_cfg:    ioa config struct
6707  *
6708  * Looks through the config table checking for SES devices. If
6709  * the SES device is in the SES table indicating a maximum SCSI
6710  * bus speed, the speed is limited for the bus.
6711  *
6712  * Return value:
6713  *      none
6714  **/
6715 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6716 {
6717         u32 max_xfer_rate;
6718         int i;
6719
6720         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6721                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6722                                                        ioa_cfg->bus_attr[i].bus_width);
6723
6724                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6725                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6726         }
6727 }
6728
6729 /**
6730  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6731  * @ioa_cfg:    ioa config struct
6732  * @mode_pages: mode page 28 buffer
6733  *
6734  * Updates mode page 28 based on driver configuration
6735  *
6736  * Return value:
6737  *      none
6738  **/
6739 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6740                                           struct ipr_mode_pages *mode_pages)
6741 {
6742         int i, entry_length;
6743         struct ipr_dev_bus_entry *bus;
6744         struct ipr_bus_attributes *bus_attr;
6745         struct ipr_mode_page28 *mode_page;
6746
6747         mode_page = ipr_get_mode_page(mode_pages, 0x28,
6748                                       sizeof(struct ipr_mode_page28));
6749
6750         entry_length = mode_page->entry_length;
6751
6752         /* Loop for each device bus entry */
6753         for (i = 0, bus = mode_page->bus;
6754              i < mode_page->num_entries;
6755              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6756                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6757                         dev_err(&ioa_cfg->pdev->dev,
6758                                 "Invalid resource address reported: 0x%08X\n",
6759                                 IPR_GET_PHYS_LOC(bus->res_addr));
6760                         continue;
6761                 }
6762
6763                 bus_attr = &ioa_cfg->bus_attr[i];
6764                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6765                 bus->bus_width = bus_attr->bus_width;
6766                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6767                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6768                 if (bus_attr->qas_enabled)
6769                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6770                 else
6771                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6772         }
6773 }
6774
6775 /**
6776  * ipr_build_mode_select - Build a mode select command
6777  * @ipr_cmd:    ipr command struct
6778  * @res_handle: resource handle to send command to
6779  * @parm:               Byte 2 of Mode Sense command
6780  * @dma_addr:   DMA buffer address
6781  * @xfer_len:   data transfer length
6782  *
6783  * Return value:
6784  *      none
6785  **/
6786 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6787                                   __be32 res_handle, u8 parm,
6788                                   dma_addr_t dma_addr, u8 xfer_len)
6789 {
6790         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6791
6792         ioarcb->res_handle = res_handle;
6793         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6794         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6795         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6796         ioarcb->cmd_pkt.cdb[1] = parm;
6797         ioarcb->cmd_pkt.cdb[4] = xfer_len;
6798
6799         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6800 }
6801
6802 /**
6803  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6804  * @ipr_cmd:    ipr command struct
6805  *
6806  * This function sets up the SCSI bus attributes and sends
6807  * a Mode Select for Page 28 to activate them.
6808  *
6809  * Return value:
6810  *      IPR_RC_JOB_RETURN
6811  **/
6812 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6813 {
6814         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6815         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6816         int length;
6817
6818         ENTER;
6819         ipr_scsi_bus_speed_limit(ioa_cfg);
6820         ipr_check_term_power(ioa_cfg, mode_pages);
6821         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6822         length = mode_pages->hdr.length + 1;
6823         mode_pages->hdr.length = 0;
6824
6825         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6826                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6827                               length);
6828
6829         ipr_cmd->job_step = ipr_set_supported_devs;
6830         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6831                                     struct ipr_resource_entry, queue);
6832         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6833
6834         LEAVE;
6835         return IPR_RC_JOB_RETURN;
6836 }
6837
6838 /**
6839  * ipr_build_mode_sense - Builds a mode sense command
6840  * @ipr_cmd:    ipr command struct
6841  * @res_handle:         resource entry struct
6842  * @parm:               Byte 2 of mode sense command
6843  * @dma_addr:   DMA address of mode sense buffer
6844  * @xfer_len:   Size of DMA buffer
6845  *
6846  * Return value:
6847  *      none
6848  **/
6849 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6850                                  __be32 res_handle,
6851                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6852 {
6853         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6854
6855         ioarcb->res_handle = res_handle;
6856         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6857         ioarcb->cmd_pkt.cdb[2] = parm;
6858         ioarcb->cmd_pkt.cdb[4] = xfer_len;
6859         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6860
6861         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6862 }
6863
6864 /**
6865  * ipr_reset_cmd_failed - Handle failure of IOA reset command
6866  * @ipr_cmd:    ipr command struct
6867  *
6868  * This function handles the failure of an IOA bringup command.
6869  *
6870  * Return value:
6871  *      IPR_RC_JOB_RETURN
6872  **/
6873 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6874 {
6875         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6876         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6877
6878         dev_err(&ioa_cfg->pdev->dev,
6879                 "0x%02X failed with IOASC: 0x%08X\n",
6880                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6881
6882         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6883         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6884         return IPR_RC_JOB_RETURN;
6885 }
6886
6887 /**
6888  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6889  * @ipr_cmd:    ipr command struct
6890  *
6891  * This function handles the failure of a Mode Sense to the IOAFP.
6892  * Some adapters do not handle all mode pages.
6893  *
6894  * Return value:
6895  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6896  **/
6897 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6898 {
6899         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6900         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6901
6902         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6903                 ipr_cmd->job_step = ipr_set_supported_devs;
6904                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6905                                             struct ipr_resource_entry, queue);
6906                 return IPR_RC_JOB_CONTINUE;
6907         }
6908
6909         return ipr_reset_cmd_failed(ipr_cmd);
6910 }
6911
6912 /**
6913  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6914  * @ipr_cmd:    ipr command struct
6915  *
6916  * This function send a Page 28 mode sense to the IOA to
6917  * retrieve SCSI bus attributes.
6918  *
6919  * Return value:
6920  *      IPR_RC_JOB_RETURN
6921  **/
6922 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6923 {
6924         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6925
6926         ENTER;
6927         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6928                              0x28, ioa_cfg->vpd_cbs_dma +
6929                              offsetof(struct ipr_misc_cbs, mode_pages),
6930                              sizeof(struct ipr_mode_pages));
6931
6932         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6933         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6934
6935         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6936
6937         LEAVE;
6938         return IPR_RC_JOB_RETURN;
6939 }
6940
6941 /**
6942  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6943  * @ipr_cmd:    ipr command struct
6944  *
6945  * This function enables dual IOA RAID support if possible.
6946  *
6947  * Return value:
6948  *      IPR_RC_JOB_RETURN
6949  **/
6950 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6951 {
6952         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6953         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6954         struct ipr_mode_page24 *mode_page;
6955         int length;
6956
6957         ENTER;
6958         mode_page = ipr_get_mode_page(mode_pages, 0x24,
6959                                       sizeof(struct ipr_mode_page24));
6960
6961         if (mode_page)
6962                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6963
6964         length = mode_pages->hdr.length + 1;
6965         mode_pages->hdr.length = 0;
6966
6967         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6968                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6969                               length);
6970
6971         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6972         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6973
6974         LEAVE;
6975         return IPR_RC_JOB_RETURN;
6976 }
6977
6978 /**
6979  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6980  * @ipr_cmd:    ipr command struct
6981  *
6982  * This function handles the failure of a Mode Sense to the IOAFP.
6983  * Some adapters do not handle all mode pages.
6984  *
6985  * Return value:
6986  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6987  **/
6988 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6989 {
6990         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6991
6992         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6993                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6994                 return IPR_RC_JOB_CONTINUE;
6995         }
6996
6997         return ipr_reset_cmd_failed(ipr_cmd);
6998 }
6999
7000 /**
7001  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7002  * @ipr_cmd:    ipr command struct
7003  *
7004  * This function send a mode sense to the IOA to retrieve
7005  * the IOA Advanced Function Control mode page.
7006  *
7007  * Return value:
7008  *      IPR_RC_JOB_RETURN
7009  **/
7010 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7011 {
7012         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7013
7014         ENTER;
7015         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7016                              0x24, ioa_cfg->vpd_cbs_dma +
7017                              offsetof(struct ipr_misc_cbs, mode_pages),
7018                              sizeof(struct ipr_mode_pages));
7019
7020         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7021         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7022
7023         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7024
7025         LEAVE;
7026         return IPR_RC_JOB_RETURN;
7027 }
7028
7029 /**
7030  * ipr_init_res_table - Initialize the resource table
7031  * @ipr_cmd:    ipr command struct
7032  *
7033  * This function looks through the existing resource table, comparing
7034  * it with the config table. This function will take care of old/new
7035  * devices and schedule adding/removing them from the mid-layer
7036  * as appropriate.
7037  *
7038  * Return value:
7039  *      IPR_RC_JOB_CONTINUE
7040  **/
7041 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7042 {
7043         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7044         struct ipr_resource_entry *res, *temp;
7045         struct ipr_config_table_entry_wrapper cfgtew;
7046         int entries, found, flag, i;
7047         LIST_HEAD(old_res);
7048
7049         ENTER;
7050         if (ioa_cfg->sis64)
7051                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7052         else
7053                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7054
7055         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7056                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7057
7058         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7059                 list_move_tail(&res->queue, &old_res);
7060
7061         if (ioa_cfg->sis64)
7062                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7063         else
7064                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7065
7066         for (i = 0; i < entries; i++) {
7067                 if (ioa_cfg->sis64)
7068                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7069                 else
7070                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7071                 found = 0;
7072
7073                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7074                         if (ipr_is_same_device(res, &cfgtew)) {
7075                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7076                                 found = 1;
7077                                 break;
7078                         }
7079                 }
7080
7081                 if (!found) {
7082                         if (list_empty(&ioa_cfg->free_res_q)) {
7083                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7084                                 break;
7085                         }
7086
7087                         found = 1;
7088                         res = list_entry(ioa_cfg->free_res_q.next,
7089                                          struct ipr_resource_entry, queue);
7090                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7091                         ipr_init_res_entry(res, &cfgtew);
7092                         res->add_to_ml = 1;
7093                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7094                         res->sdev->allow_restart = 1;
7095
7096                 if (found)
7097                         ipr_update_res_entry(res, &cfgtew);
7098         }
7099
7100         list_for_each_entry_safe(res, temp, &old_res, queue) {
7101                 if (res->sdev) {
7102                         res->del_from_ml = 1;
7103                         res->res_handle = IPR_INVALID_RES_HANDLE;
7104                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7105                 }
7106         }
7107
7108         list_for_each_entry_safe(res, temp, &old_res, queue) {
7109                 ipr_clear_res_target(res);
7110                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7111         }
7112
7113         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7114                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7115         else
7116                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7117
7118         LEAVE;
7119         return IPR_RC_JOB_CONTINUE;
7120 }
7121
7122 /**
7123  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7124  * @ipr_cmd:    ipr command struct
7125  *
7126  * This function sends a Query IOA Configuration command
7127  * to the adapter to retrieve the IOA configuration table.
7128  *
7129  * Return value:
7130  *      IPR_RC_JOB_RETURN
7131  **/
7132 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7133 {
7134         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7135         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7136         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7137         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7138
7139         ENTER;
7140         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7141                 ioa_cfg->dual_raid = 1;
7142         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7143                  ucode_vpd->major_release, ucode_vpd->card_type,
7144                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7145         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7146         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7147
7148         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7149         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7150         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7151         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7152
7153         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7154                        IPR_IOADL_FLAGS_READ_LAST);
7155
7156         ipr_cmd->job_step = ipr_init_res_table;
7157
7158         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7159
7160         LEAVE;
7161         return IPR_RC_JOB_RETURN;
7162 }
7163
7164 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7165 {
7166         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7167
7168         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7169                 return IPR_RC_JOB_CONTINUE;
7170
7171         return ipr_reset_cmd_failed(ipr_cmd);
7172 }
7173
7174 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7175                                          __be32 res_handle, u8 sa_code)
7176 {
7177         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7178
7179         ioarcb->res_handle = res_handle;
7180         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7181         ioarcb->cmd_pkt.cdb[1] = sa_code;
7182         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7183 }
7184
7185 /**
7186  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7187  * action
7188  * @ipr_cmd:    ipr command struct
7189  *
7190  * Return value:
7191  *      none
7192  **/
7193 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7194 {
7195         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7196         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7197         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7198
7199         ENTER;
7200
7201         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7202
7203         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7204                 ipr_build_ioa_service_action(ipr_cmd,
7205                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7206                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7207
7208                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7209
7210                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7211                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7212                            IPR_SET_SUP_DEVICE_TIMEOUT);
7213
7214                 LEAVE;
7215                 return IPR_RC_JOB_RETURN;
7216         }
7217
7218         LEAVE;
7219         return IPR_RC_JOB_CONTINUE;
7220 }
7221
7222 /**
7223  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7224  * @ipr_cmd:    ipr command struct
7225  * @flags:      flags to send
7226  * @page:       page to inquire
7227  * @dma_addr:   DMA address
7228  * @xfer_len:   transfer data length
7229  *
7230  * This utility function sends an inquiry to the adapter.
7231  *
7232  * Return value:
7233  *      none
7234  **/
7235 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7236                               dma_addr_t dma_addr, u8 xfer_len)
7237 {
7238         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7239
7240         ENTER;
7241         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7242         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7243
7244         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7245         ioarcb->cmd_pkt.cdb[1] = flags;
7246         ioarcb->cmd_pkt.cdb[2] = page;
7247         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7248
7249         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7250
7251         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7252         LEAVE;
7253 }
7254
7255 /**
7256  * ipr_inquiry_page_supported - Is the given inquiry page supported
7257  * @page0:              inquiry page 0 buffer
7258  * @page:               page code.
7259  *
7260  * This function determines if the specified inquiry page is supported.
7261  *
7262  * Return value:
7263  *      1 if page is supported / 0 if not
7264  **/
7265 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7266 {
7267         int i;
7268
7269         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7270                 if (page0->page[i] == page)
7271                         return 1;
7272
7273         return 0;
7274 }
7275
7276 /**
7277  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7278  * @ipr_cmd:    ipr command struct
7279  *
7280  * This function sends a Page 0xC4 inquiry to the adapter
7281  * to retrieve software VPD information.
7282  *
7283  * Return value:
7284  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7285  **/
7286 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
7287 {
7288         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7289         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7290         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7291
7292         ENTER;
7293         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
7294         memset(pageC4, 0, sizeof(*pageC4));
7295
7296         if (ipr_inquiry_page_supported(page0, 0xC4)) {
7297                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
7298                                   (ioa_cfg->vpd_cbs_dma
7299                                    + offsetof(struct ipr_misc_cbs,
7300                                               pageC4_data)),
7301                                   sizeof(struct ipr_inquiry_pageC4));
7302                 return IPR_RC_JOB_RETURN;
7303         }
7304
7305         LEAVE;
7306         return IPR_RC_JOB_CONTINUE;
7307 }
7308
7309 /**
7310  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7311  * @ipr_cmd:    ipr command struct
7312  *
7313  * This function sends a Page 0xD0 inquiry to the adapter
7314  * to retrieve adapter capabilities.
7315  *
7316  * Return value:
7317  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7318  **/
7319 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7320 {
7321         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7322         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7323         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7324
7325         ENTER;
7326         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
7327         memset(cap, 0, sizeof(*cap));
7328
7329         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7330                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7331                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7332                                   sizeof(struct ipr_inquiry_cap));
7333                 return IPR_RC_JOB_RETURN;
7334         }
7335
7336         LEAVE;
7337         return IPR_RC_JOB_CONTINUE;
7338 }
7339
7340 /**
7341  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7342  * @ipr_cmd:    ipr command struct
7343  *
7344  * This function sends a Page 3 inquiry to the adapter
7345  * to retrieve software VPD information.
7346  *
7347  * Return value:
7348  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7349  **/
7350 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7351 {
7352         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7353
7354         ENTER;
7355
7356         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7357
7358         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7359                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7360                           sizeof(struct ipr_inquiry_page3));
7361
7362         LEAVE;
7363         return IPR_RC_JOB_RETURN;
7364 }
7365
7366 /**
7367  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7368  * @ipr_cmd:    ipr command struct
7369  *
7370  * This function sends a Page 0 inquiry to the adapter
7371  * to retrieve supported inquiry pages.
7372  *
7373  * Return value:
7374  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7375  **/
7376 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7377 {
7378         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7379         char type[5];
7380
7381         ENTER;
7382
7383         /* Grab the type out of the VPD and store it away */
7384         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7385         type[4] = '\0';
7386         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7387
7388         if (ipr_invalid_adapter(ioa_cfg)) {
7389                 dev_err(&ioa_cfg->pdev->dev,
7390                         "Adapter not supported in this hardware configuration.\n");
7391
7392                 if (!ipr_testmode) {
7393                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7394                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7395                         list_add_tail(&ipr_cmd->queue,
7396                                         &ioa_cfg->hrrq->hrrq_free_q);
7397                         return IPR_RC_JOB_RETURN;
7398                 }
7399         }
7400
7401         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7402
7403         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7404                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7405                           sizeof(struct ipr_inquiry_page0));
7406
7407         LEAVE;
7408         return IPR_RC_JOB_RETURN;
7409 }
7410
7411 /**
7412  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7413  * @ipr_cmd:    ipr command struct
7414  *
7415  * This function sends a standard inquiry to the adapter.
7416  *
7417  * Return value:
7418  *      IPR_RC_JOB_RETURN
7419  **/
7420 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7421 {
7422         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7423
7424         ENTER;
7425         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7426
7427         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7428                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7429                           sizeof(struct ipr_ioa_vpd));
7430
7431         LEAVE;
7432         return IPR_RC_JOB_RETURN;
7433 }
7434
7435 /**
7436  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7437  * @ipr_cmd:    ipr command struct
7438  *
7439  * This function send an Identify Host Request Response Queue
7440  * command to establish the HRRQ with the adapter.
7441  *
7442  * Return value:
7443  *      IPR_RC_JOB_RETURN
7444  **/
7445 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7446 {
7447         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7448         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7449         struct ipr_hrr_queue *hrrq;
7450
7451         ENTER;
7452         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7453         if (ioa_cfg->identify_hrrq_index == 0)
7454                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7455
7456         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7457                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7458
7459                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7460                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7461
7462                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7463                 if (ioa_cfg->sis64)
7464                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7465
7466                 if (ioa_cfg->nvectors == 1)
7467                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7468                 else
7469                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7470
7471                 ioarcb->cmd_pkt.cdb[2] =
7472                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7473                 ioarcb->cmd_pkt.cdb[3] =
7474                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7475                 ioarcb->cmd_pkt.cdb[4] =
7476                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7477                 ioarcb->cmd_pkt.cdb[5] =
7478                         ((u64) hrrq->host_rrq_dma) & 0xff;
7479                 ioarcb->cmd_pkt.cdb[7] =
7480                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7481                 ioarcb->cmd_pkt.cdb[8] =
7482                         (sizeof(u32) * hrrq->size) & 0xff;
7483
7484                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7485                         ioarcb->cmd_pkt.cdb[9] =
7486                                         ioa_cfg->identify_hrrq_index;
7487
7488                 if (ioa_cfg->sis64) {
7489                         ioarcb->cmd_pkt.cdb[10] =
7490                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7491                         ioarcb->cmd_pkt.cdb[11] =
7492                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7493                         ioarcb->cmd_pkt.cdb[12] =
7494                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7495                         ioarcb->cmd_pkt.cdb[13] =
7496                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7497                 }
7498
7499                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7500                         ioarcb->cmd_pkt.cdb[14] =
7501                                         ioa_cfg->identify_hrrq_index;
7502
7503                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7504                            IPR_INTERNAL_TIMEOUT);
7505
7506                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7507                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7508
7509                 LEAVE;
7510                 return IPR_RC_JOB_RETURN;
7511         }
7512
7513         LEAVE;
7514         return IPR_RC_JOB_CONTINUE;
7515 }
7516
7517 /**
7518  * ipr_reset_timer_done - Adapter reset timer function
7519  * @t: Timer context used to fetch ipr command struct
7520  *
7521  * Description: This function is used in adapter reset processing
7522  * for timing events. If the reset_cmd pointer in the IOA
7523  * config struct is not this adapter's we are doing nested
7524  * resets and fail_all_ops will take care of freeing the
7525  * command block.
7526  *
7527  * Return value:
7528  *      none
7529  **/
7530 static void ipr_reset_timer_done(struct timer_list *t)
7531 {
7532         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
7533         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7534         unsigned long lock_flags = 0;
7535
7536         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7537
7538         if (ioa_cfg->reset_cmd == ipr_cmd) {
7539                 list_del(&ipr_cmd->queue);
7540                 ipr_cmd->done(ipr_cmd);
7541         }
7542
7543         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7544 }
7545
7546 /**
7547  * ipr_reset_start_timer - Start a timer for adapter reset job
7548  * @ipr_cmd:    ipr command struct
7549  * @timeout:    timeout value
7550  *
7551  * Description: This function is used in adapter reset processing
7552  * for timing events. If the reset_cmd pointer in the IOA
7553  * config struct is not this adapter's we are doing nested
7554  * resets and fail_all_ops will take care of freeing the
7555  * command block.
7556  *
7557  * Return value:
7558  *      none
7559  **/
7560 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7561                                   unsigned long timeout)
7562 {
7563
7564         ENTER;
7565         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7566         ipr_cmd->done = ipr_reset_ioa_job;
7567
7568         ipr_cmd->timer.expires = jiffies + timeout;
7569         ipr_cmd->timer.function = ipr_reset_timer_done;
7570         add_timer(&ipr_cmd->timer);
7571 }
7572
7573 /**
7574  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7575  * @ioa_cfg:    ioa cfg struct
7576  *
7577  * Return value:
7578  *      nothing
7579  **/
7580 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7581 {
7582         struct ipr_hrr_queue *hrrq;
7583
7584         for_each_hrrq(hrrq, ioa_cfg) {
7585                 spin_lock(&hrrq->_lock);
7586                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7587
7588                 /* Initialize Host RRQ pointers */
7589                 hrrq->hrrq_start = hrrq->host_rrq;
7590                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7591                 hrrq->hrrq_curr = hrrq->hrrq_start;
7592                 hrrq->toggle_bit = 1;
7593                 spin_unlock(&hrrq->_lock);
7594         }
7595         wmb();
7596
7597         ioa_cfg->identify_hrrq_index = 0;
7598         if (ioa_cfg->hrrq_num == 1)
7599                 atomic_set(&ioa_cfg->hrrq_index, 0);
7600         else
7601                 atomic_set(&ioa_cfg->hrrq_index, 1);
7602
7603         /* Zero out config table */
7604         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7605 }
7606
7607 /**
7608  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7609  * @ipr_cmd:    ipr command struct
7610  *
7611  * Return value:
7612  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7613  **/
7614 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7615 {
7616         unsigned long stage, stage_time;
7617         u32 feedback;
7618         volatile u32 int_reg;
7619         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7620         u64 maskval = 0;
7621
7622         feedback = readl(ioa_cfg->regs.init_feedback_reg);
7623         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7624         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7625
7626         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7627
7628         /* sanity check the stage_time value */
7629         if (stage_time == 0)
7630                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7631         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7632                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7633         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7634                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7635
7636         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7637                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7638                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7639                 stage_time = ioa_cfg->transop_timeout;
7640                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7641         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7642                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7643                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7644                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7645                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
7646                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7647                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7648                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7649                         return IPR_RC_JOB_CONTINUE;
7650                 }
7651         }
7652
7653         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7654         ipr_cmd->timer.function = ipr_oper_timeout;
7655         ipr_cmd->done = ipr_reset_ioa_job;
7656         add_timer(&ipr_cmd->timer);
7657
7658         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7659
7660         return IPR_RC_JOB_RETURN;
7661 }
7662
7663 /**
7664  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7665  * @ipr_cmd:    ipr command struct
7666  *
7667  * This function reinitializes some control blocks and
7668  * enables destructive diagnostics on the adapter.
7669  *
7670  * Return value:
7671  *      IPR_RC_JOB_RETURN
7672  **/
7673 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7674 {
7675         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7676         volatile u32 int_reg;
7677         volatile u64 maskval;
7678         int i;
7679
7680         ENTER;
7681         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7682         ipr_init_ioa_mem(ioa_cfg);
7683
7684         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7685                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7686                 ioa_cfg->hrrq[i].allow_interrupts = 1;
7687                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7688         }
7689         if (ioa_cfg->sis64) {
7690                 /* Set the adapter to the correct endian mode. */
7691                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7692                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7693         }
7694
7695         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7696
7697         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7698                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7699                        ioa_cfg->regs.clr_interrupt_mask_reg32);
7700                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7701                 return IPR_RC_JOB_CONTINUE;
7702         }
7703
7704         /* Enable destructive diagnostics on IOA */
7705         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7706
7707         if (ioa_cfg->sis64) {
7708                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7709                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7710                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7711         } else
7712                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7713
7714         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7715
7716         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7717
7718         if (ioa_cfg->sis64) {
7719                 ipr_cmd->job_step = ipr_reset_next_stage;
7720                 return IPR_RC_JOB_CONTINUE;
7721         }
7722
7723         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7724         ipr_cmd->timer.function = ipr_oper_timeout;
7725         ipr_cmd->done = ipr_reset_ioa_job;
7726         add_timer(&ipr_cmd->timer);
7727         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7728
7729         LEAVE;
7730         return IPR_RC_JOB_RETURN;
7731 }
7732
7733 /**
7734  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7735  * @ipr_cmd:    ipr command struct
7736  *
7737  * This function is invoked when an adapter dump has run out
7738  * of processing time.
7739  *
7740  * Return value:
7741  *      IPR_RC_JOB_CONTINUE
7742  **/
7743 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7744 {
7745         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7746
7747         if (ioa_cfg->sdt_state == GET_DUMP)
7748                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7749         else if (ioa_cfg->sdt_state == READ_DUMP)
7750                 ioa_cfg->sdt_state = ABORT_DUMP;
7751
7752         ioa_cfg->dump_timeout = 1;
7753         ipr_cmd->job_step = ipr_reset_alert;
7754
7755         return IPR_RC_JOB_CONTINUE;
7756 }
7757
7758 /**
7759  * ipr_unit_check_no_data - Log a unit check/no data error log
7760  * @ioa_cfg:            ioa config struct
7761  *
7762  * Logs an error indicating the adapter unit checked, but for some
7763  * reason, we were unable to fetch the unit check buffer.
7764  *
7765  * Return value:
7766  *      nothing
7767  **/
7768 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7769 {
7770         ioa_cfg->errors_logged++;
7771         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7772 }
7773
7774 /**
7775  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7776  * @ioa_cfg:            ioa config struct
7777  *
7778  * Fetches the unit check buffer from the adapter by clocking the data
7779  * through the mailbox register.
7780  *
7781  * Return value:
7782  *      nothing
7783  **/
7784 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7785 {
7786         unsigned long mailbox;
7787         struct ipr_hostrcb *hostrcb;
7788         struct ipr_uc_sdt sdt;
7789         int rc, length;
7790         u32 ioasc;
7791
7792         mailbox = readl(ioa_cfg->ioa_mailbox);
7793
7794         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7795                 ipr_unit_check_no_data(ioa_cfg);
7796                 return;
7797         }
7798
7799         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7800         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7801                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7802
7803         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7804             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7805             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7806                 ipr_unit_check_no_data(ioa_cfg);
7807                 return;
7808         }
7809
7810         /* Find length of the first sdt entry (UC buffer) */
7811         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7812                 length = be32_to_cpu(sdt.entry[0].end_token);
7813         else
7814                 length = (be32_to_cpu(sdt.entry[0].end_token) -
7815                           be32_to_cpu(sdt.entry[0].start_token)) &
7816                           IPR_FMT2_MBX_ADDR_MASK;
7817
7818         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7819                              struct ipr_hostrcb, queue);
7820         list_del_init(&hostrcb->queue);
7821         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7822
7823         rc = ipr_get_ldump_data_section(ioa_cfg,
7824                                         be32_to_cpu(sdt.entry[0].start_token),
7825                                         (__be32 *)&hostrcb->hcam,
7826                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7827
7828         if (!rc) {
7829                 ipr_handle_log_data(ioa_cfg, hostrcb);
7830                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7831                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7832                     ioa_cfg->sdt_state == GET_DUMP)
7833                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7834         } else
7835                 ipr_unit_check_no_data(ioa_cfg);
7836
7837         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7838 }
7839
7840 /**
7841  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7842  * @ipr_cmd:    ipr command struct
7843  *
7844  * Description: This function will call to get the unit check buffer.
7845  *
7846  * Return value:
7847  *      IPR_RC_JOB_RETURN
7848  **/
7849 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7850 {
7851         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7852
7853         ENTER;
7854         ioa_cfg->ioa_unit_checked = 0;
7855         ipr_get_unit_check_buffer(ioa_cfg);
7856         ipr_cmd->job_step = ipr_reset_alert;
7857         ipr_reset_start_timer(ipr_cmd, 0);
7858
7859         LEAVE;
7860         return IPR_RC_JOB_RETURN;
7861 }
7862
7863 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
7864 {
7865         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7866
7867         ENTER;
7868
7869         if (ioa_cfg->sdt_state != GET_DUMP)
7870                 return IPR_RC_JOB_RETURN;
7871
7872         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
7873             (readl(ioa_cfg->regs.sense_interrupt_reg) &
7874              IPR_PCII_MAILBOX_STABLE)) {
7875
7876                 if (!ipr_cmd->u.time_left)
7877                         dev_err(&ioa_cfg->pdev->dev,
7878                                 "Timed out waiting for Mailbox register.\n");
7879
7880                 ioa_cfg->sdt_state = READ_DUMP;
7881                 ioa_cfg->dump_timeout = 0;
7882                 if (ioa_cfg->sis64)
7883                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7884                 else
7885                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
7886                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7887                 schedule_work(&ioa_cfg->work_q);
7888
7889         } else {
7890                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7891                 ipr_reset_start_timer(ipr_cmd,
7892                                       IPR_CHECK_FOR_RESET_TIMEOUT);
7893         }
7894
7895         LEAVE;
7896         return IPR_RC_JOB_RETURN;
7897 }
7898
7899 /**
7900  * ipr_reset_restore_cfg_space - Restore PCI config space.
7901  * @ipr_cmd:    ipr command struct
7902  *
7903  * Description: This function restores the saved PCI config space of
7904  * the adapter, fails all outstanding ops back to the callers, and
7905  * fetches the dump/unit check if applicable to this reset.
7906  *
7907  * Return value:
7908  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7909  **/
7910 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7911 {
7912         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7913
7914         ENTER;
7915         ioa_cfg->pdev->state_saved = true;
7916         pci_restore_state(ioa_cfg->pdev);
7917
7918         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7919                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7920                 return IPR_RC_JOB_CONTINUE;
7921         }
7922
7923         ipr_fail_all_ops(ioa_cfg);
7924
7925         if (ioa_cfg->sis64) {
7926                 /* Set the adapter to the correct endian mode. */
7927                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7928                 readl(ioa_cfg->regs.endian_swap_reg);
7929         }
7930
7931         if (ioa_cfg->ioa_unit_checked) {
7932                 if (ioa_cfg->sis64) {
7933                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7934                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7935                         return IPR_RC_JOB_RETURN;
7936                 } else {
7937                         ioa_cfg->ioa_unit_checked = 0;
7938                         ipr_get_unit_check_buffer(ioa_cfg);
7939                         ipr_cmd->job_step = ipr_reset_alert;
7940                         ipr_reset_start_timer(ipr_cmd, 0);
7941                         return IPR_RC_JOB_RETURN;
7942                 }
7943         }
7944
7945         if (ioa_cfg->in_ioa_bringdown) {
7946                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7947         } else if (ioa_cfg->sdt_state == GET_DUMP) {
7948                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
7949                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
7950         } else {
7951                 ipr_cmd->job_step = ipr_reset_enable_ioa;
7952         }
7953
7954         LEAVE;
7955         return IPR_RC_JOB_CONTINUE;
7956 }
7957
7958 /**
7959  * ipr_reset_bist_done - BIST has completed on the adapter.
7960  * @ipr_cmd:    ipr command struct
7961  *
7962  * Description: Unblock config space and resume the reset process.
7963  *
7964  * Return value:
7965  *      IPR_RC_JOB_CONTINUE
7966  **/
7967 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7968 {
7969         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7970
7971         ENTER;
7972         if (ioa_cfg->cfg_locked)
7973                 pci_cfg_access_unlock(ioa_cfg->pdev);
7974         ioa_cfg->cfg_locked = 0;
7975         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7976         LEAVE;
7977         return IPR_RC_JOB_CONTINUE;
7978 }
7979
7980 /**
7981  * ipr_reset_start_bist - Run BIST on the adapter.
7982  * @ipr_cmd:    ipr command struct
7983  *
7984  * Description: This function runs BIST on the adapter, then delays 2 seconds.
7985  *
7986  * Return value:
7987  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7988  **/
7989 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7990 {
7991         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7992         int rc = PCIBIOS_SUCCESSFUL;
7993
7994         ENTER;
7995         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7996                 writel(IPR_UPROCI_SIS64_START_BIST,
7997                        ioa_cfg->regs.set_uproc_interrupt_reg32);
7998         else
7999                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8000
8001         if (rc == PCIBIOS_SUCCESSFUL) {
8002                 ipr_cmd->job_step = ipr_reset_bist_done;
8003                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8004                 rc = IPR_RC_JOB_RETURN;
8005         } else {
8006                 if (ioa_cfg->cfg_locked)
8007                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8008                 ioa_cfg->cfg_locked = 0;
8009                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8010                 rc = IPR_RC_JOB_CONTINUE;
8011         }
8012
8013         LEAVE;
8014         return rc;
8015 }
8016
8017 /**
8018  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8019  * @ipr_cmd:    ipr command struct
8020  *
8021  * Description: This clears PCI reset to the adapter and delays two seconds.
8022  *
8023  * Return value:
8024  *      IPR_RC_JOB_RETURN
8025  **/
8026 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8027 {
8028         ENTER;
8029         ipr_cmd->job_step = ipr_reset_bist_done;
8030         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8031         LEAVE;
8032         return IPR_RC_JOB_RETURN;
8033 }
8034
8035 /**
8036  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8037  * @work:       work struct
8038  *
8039  * Description: This pulses warm reset to a slot.
8040  *
8041  **/
8042 static void ipr_reset_reset_work(struct work_struct *work)
8043 {
8044         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8045         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8046         struct pci_dev *pdev = ioa_cfg->pdev;
8047         unsigned long lock_flags = 0;
8048
8049         ENTER;
8050         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8051         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8052         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8053
8054         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8055         if (ioa_cfg->reset_cmd == ipr_cmd)
8056                 ipr_reset_ioa_job(ipr_cmd);
8057         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8058         LEAVE;
8059 }
8060
8061 /**
8062  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8063  * @ipr_cmd:    ipr command struct
8064  *
8065  * Description: This asserts PCI reset to the adapter.
8066  *
8067  * Return value:
8068  *      IPR_RC_JOB_RETURN
8069  **/
8070 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8071 {
8072         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8073
8074         ENTER;
8075         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8076         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8077         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8078         LEAVE;
8079         return IPR_RC_JOB_RETURN;
8080 }
8081
8082 /**
8083  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8084  * @ipr_cmd:    ipr command struct
8085  *
8086  * Description: This attempts to block config access to the IOA.
8087  *
8088  * Return value:
8089  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8090  **/
8091 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8092 {
8093         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8094         int rc = IPR_RC_JOB_CONTINUE;
8095
8096         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8097                 ioa_cfg->cfg_locked = 1;
8098                 ipr_cmd->job_step = ioa_cfg->reset;
8099         } else {
8100                 if (ipr_cmd->u.time_left) {
8101                         rc = IPR_RC_JOB_RETURN;
8102                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8103                         ipr_reset_start_timer(ipr_cmd,
8104                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8105                 } else {
8106                         ipr_cmd->job_step = ioa_cfg->reset;
8107                         dev_err(&ioa_cfg->pdev->dev,
8108                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8109                 }
8110         }
8111
8112         return rc;
8113 }
8114
8115 /**
8116  * ipr_reset_block_config_access - Block config access to the IOA
8117  * @ipr_cmd:    ipr command struct
8118  *
8119  * Description: This attempts to block config access to the IOA
8120  *
8121  * Return value:
8122  *      IPR_RC_JOB_CONTINUE
8123  **/
8124 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8125 {
8126         ipr_cmd->ioa_cfg->cfg_locked = 0;
8127         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8128         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8129         return IPR_RC_JOB_CONTINUE;
8130 }
8131
8132 /**
8133  * ipr_reset_allowed - Query whether or not IOA can be reset
8134  * @ioa_cfg:    ioa config struct
8135  *
8136  * Return value:
8137  *      0 if reset not allowed / non-zero if reset is allowed
8138  **/
8139 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8140 {
8141         volatile u32 temp_reg;
8142
8143         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8144         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8145 }
8146
8147 /**
8148  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8149  * @ipr_cmd:    ipr command struct
8150  *
8151  * Description: This function waits for adapter permission to run BIST,
8152  * then runs BIST. If the adapter does not give permission after a
8153  * reasonable time, we will reset the adapter anyway. The impact of
8154  * resetting the adapter without warning the adapter is the risk of
8155  * losing the persistent error log on the adapter. If the adapter is
8156  * reset while it is writing to the flash on the adapter, the flash
8157  * segment will have bad ECC and be zeroed.
8158  *
8159  * Return value:
8160  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8161  **/
8162 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8163 {
8164         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8165         int rc = IPR_RC_JOB_RETURN;
8166
8167         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8168                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8169                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8170         } else {
8171                 ipr_cmd->job_step = ipr_reset_block_config_access;
8172                 rc = IPR_RC_JOB_CONTINUE;
8173         }
8174
8175         return rc;
8176 }
8177
8178 /**
8179  * ipr_reset_alert - Alert the adapter of a pending reset
8180  * @ipr_cmd:    ipr command struct
8181  *
8182  * Description: This function alerts the adapter that it will be reset.
8183  * If memory space is not currently enabled, proceed directly
8184  * to running BIST on the adapter. The timer must always be started
8185  * so we guarantee we do not run BIST from ipr_isr.
8186  *
8187  * Return value:
8188  *      IPR_RC_JOB_RETURN
8189  **/
8190 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8191 {
8192         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8193         u16 cmd_reg;
8194         int rc;
8195
8196         ENTER;
8197         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8198
8199         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8200                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8201                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8202                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8203         } else {
8204                 ipr_cmd->job_step = ipr_reset_block_config_access;
8205         }
8206
8207         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8208         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8209
8210         LEAVE;
8211         return IPR_RC_JOB_RETURN;
8212 }
8213
8214 /**
8215  * ipr_reset_quiesce_done - Complete IOA disconnect
8216  * @ipr_cmd:    ipr command struct
8217  *
8218  * Description: Freeze the adapter to complete quiesce processing
8219  *
8220  * Return value:
8221  *      IPR_RC_JOB_CONTINUE
8222  **/
8223 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8224 {
8225         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8226
8227         ENTER;
8228         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8229         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8230         LEAVE;
8231         return IPR_RC_JOB_CONTINUE;
8232 }
8233
8234 /**
8235  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8236  * @ipr_cmd:    ipr command struct
8237  *
8238  * Description: Ensure nothing is outstanding to the IOA and
8239  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8240  *
8241  * Return value:
8242  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8243  **/
8244 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8245 {
8246         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8247         struct ipr_cmnd *loop_cmd;
8248         struct ipr_hrr_queue *hrrq;
8249         int rc = IPR_RC_JOB_CONTINUE;
8250         int count = 0;
8251
8252         ENTER;
8253         ipr_cmd->job_step = ipr_reset_quiesce_done;
8254
8255         for_each_hrrq(hrrq, ioa_cfg) {
8256                 spin_lock(&hrrq->_lock);
8257                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8258                         count++;
8259                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8260                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8261                         rc = IPR_RC_JOB_RETURN;
8262                         break;
8263                 }
8264                 spin_unlock(&hrrq->_lock);
8265
8266                 if (count)
8267                         break;
8268         }
8269
8270         LEAVE;
8271         return rc;
8272 }
8273
8274 /**
8275  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8276  * @ipr_cmd:    ipr command struct
8277  *
8278  * Description: Cancel any oustanding HCAMs to the IOA.
8279  *
8280  * Return value:
8281  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8282  **/
8283 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8284 {
8285         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8286         int rc = IPR_RC_JOB_CONTINUE;
8287         struct ipr_cmd_pkt *cmd_pkt;
8288         struct ipr_cmnd *hcam_cmd;
8289         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8290
8291         ENTER;
8292         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8293
8294         if (!hrrq->ioa_is_dead) {
8295                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8296                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8297                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8298                                         continue;
8299
8300                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8301                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8302                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8303                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8304                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8305                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8306                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8307                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8308                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8309                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8310                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8311                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8312                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8313                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8314
8315                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8316                                            IPR_CANCEL_TIMEOUT);
8317
8318                                 rc = IPR_RC_JOB_RETURN;
8319                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8320                                 break;
8321                         }
8322                 }
8323         } else
8324                 ipr_cmd->job_step = ipr_reset_alert;
8325
8326         LEAVE;
8327         return rc;
8328 }
8329
8330 /**
8331  * ipr_reset_ucode_download_done - Microcode download completion
8332  * @ipr_cmd:    ipr command struct
8333  *
8334  * Description: This function unmaps the microcode download buffer.
8335  *
8336  * Return value:
8337  *      IPR_RC_JOB_CONTINUE
8338  **/
8339 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8340 {
8341         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8342         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8343
8344         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8345                      sglist->num_sg, DMA_TO_DEVICE);
8346
8347         ipr_cmd->job_step = ipr_reset_alert;
8348         return IPR_RC_JOB_CONTINUE;
8349 }
8350
8351 /**
8352  * ipr_reset_ucode_download - Download microcode to the adapter
8353  * @ipr_cmd:    ipr command struct
8354  *
8355  * Description: This function checks to see if it there is microcode
8356  * to download to the adapter. If there is, a download is performed.
8357  *
8358  * Return value:
8359  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8360  **/
8361 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8362 {
8363         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8364         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8365
8366         ENTER;
8367         ipr_cmd->job_step = ipr_reset_alert;
8368
8369         if (!sglist)
8370                 return IPR_RC_JOB_CONTINUE;
8371
8372         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8373         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8374         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8375         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8376         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8377         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8378         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8379
8380         if (ioa_cfg->sis64)
8381                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8382         else
8383                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8384         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8385
8386         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8387                    IPR_WRITE_BUFFER_TIMEOUT);
8388
8389         LEAVE;
8390         return IPR_RC_JOB_RETURN;
8391 }
8392
8393 /**
8394  * ipr_reset_shutdown_ioa - Shutdown the adapter
8395  * @ipr_cmd:    ipr command struct
8396  *
8397  * Description: This function issues an adapter shutdown of the
8398  * specified type to the specified adapter as part of the
8399  * adapter reset job.
8400  *
8401  * Return value:
8402  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8403  **/
8404 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8405 {
8406         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8407         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8408         unsigned long timeout;
8409         int rc = IPR_RC_JOB_CONTINUE;
8410
8411         ENTER;
8412         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8413                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8414         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
8415                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8416                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8417                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8418                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8419                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8420
8421                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8422                         timeout = IPR_SHUTDOWN_TIMEOUT;
8423                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8424                         timeout = IPR_INTERNAL_TIMEOUT;
8425                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8426                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8427                 else
8428                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8429
8430                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8431
8432                 rc = IPR_RC_JOB_RETURN;
8433                 ipr_cmd->job_step = ipr_reset_ucode_download;
8434         } else
8435                 ipr_cmd->job_step = ipr_reset_alert;
8436
8437         LEAVE;
8438         return rc;
8439 }
8440
8441 /**
8442  * ipr_reset_ioa_job - Adapter reset job
8443  * @ipr_cmd:    ipr command struct
8444  *
8445  * Description: This function is the job router for the adapter reset job.
8446  *
8447  * Return value:
8448  *      none
8449  **/
8450 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8451 {
8452         u32 rc, ioasc;
8453         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8454
8455         do {
8456                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8457
8458                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8459                         /*
8460                          * We are doing nested adapter resets and this is
8461                          * not the current reset job.
8462                          */
8463                         list_add_tail(&ipr_cmd->queue,
8464                                         &ipr_cmd->hrrq->hrrq_free_q);
8465                         return;
8466                 }
8467
8468                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8469                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8470                         if (rc == IPR_RC_JOB_RETURN)
8471                                 return;
8472                 }
8473
8474                 ipr_reinit_ipr_cmnd(ipr_cmd);
8475                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8476                 rc = ipr_cmd->job_step(ipr_cmd);
8477         } while (rc == IPR_RC_JOB_CONTINUE);
8478 }
8479
8480 /**
8481  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8482  * @ioa_cfg:            ioa config struct
8483  * @job_step:           first job step of reset job
8484  * @shutdown_type:      shutdown type
8485  *
8486  * Description: This function will initiate the reset of the given adapter
8487  * starting at the selected job step.
8488  * If the caller needs to wait on the completion of the reset,
8489  * the caller must sleep on the reset_wait_q.
8490  *
8491  * Return value:
8492  *      none
8493  **/
8494 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8495                                     int (*job_step) (struct ipr_cmnd *),
8496                                     enum ipr_shutdown_type shutdown_type)
8497 {
8498         struct ipr_cmnd *ipr_cmd;
8499         int i;
8500
8501         ioa_cfg->in_reset_reload = 1;
8502         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8503                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8504                 ioa_cfg->hrrq[i].allow_cmds = 0;
8505                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8506         }
8507         wmb();
8508         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8509                 ioa_cfg->scsi_unblock = 0;
8510                 ioa_cfg->scsi_blocked = 1;
8511                 scsi_block_requests(ioa_cfg->host);
8512         }
8513
8514         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8515         ioa_cfg->reset_cmd = ipr_cmd;
8516         ipr_cmd->job_step = job_step;
8517         ipr_cmd->u.shutdown_type = shutdown_type;
8518
8519         ipr_reset_ioa_job(ipr_cmd);
8520 }
8521
8522 /**
8523  * ipr_initiate_ioa_reset - Initiate an adapter reset
8524  * @ioa_cfg:            ioa config struct
8525  * @shutdown_type:      shutdown type
8526  *
8527  * Description: This function will initiate the reset of the given adapter.
8528  * If the caller needs to wait on the completion of the reset,
8529  * the caller must sleep on the reset_wait_q.
8530  *
8531  * Return value:
8532  *      none
8533  **/
8534 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8535                                    enum ipr_shutdown_type shutdown_type)
8536 {
8537         int i;
8538
8539         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8540                 return;
8541
8542         if (ioa_cfg->in_reset_reload) {
8543                 if (ioa_cfg->sdt_state == GET_DUMP)
8544                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8545                 else if (ioa_cfg->sdt_state == READ_DUMP)
8546                         ioa_cfg->sdt_state = ABORT_DUMP;
8547         }
8548
8549         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8550                 dev_err(&ioa_cfg->pdev->dev,
8551                         "IOA taken offline - error recovery failed\n");
8552
8553                 ioa_cfg->reset_retries = 0;
8554                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8555                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8556                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
8557                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8558                 }
8559                 wmb();
8560
8561                 if (ioa_cfg->in_ioa_bringdown) {
8562                         ioa_cfg->reset_cmd = NULL;
8563                         ioa_cfg->in_reset_reload = 0;
8564                         ipr_fail_all_ops(ioa_cfg);
8565                         wake_up_all(&ioa_cfg->reset_wait_q);
8566
8567                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8568                                 ioa_cfg->scsi_unblock = 1;
8569                                 schedule_work(&ioa_cfg->work_q);
8570                         }
8571                         return;
8572                 } else {
8573                         ioa_cfg->in_ioa_bringdown = 1;
8574                         shutdown_type = IPR_SHUTDOWN_NONE;
8575                 }
8576         }
8577
8578         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8579                                 shutdown_type);
8580 }
8581
8582 /**
8583  * ipr_reset_freeze - Hold off all I/O activity
8584  * @ipr_cmd:    ipr command struct
8585  *
8586  * Description: If the PCI slot is frozen, hold off all I/O
8587  * activity; then, as soon as the slot is available again,
8588  * initiate an adapter reset.
8589  */
8590 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8591 {
8592         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8593         int i;
8594
8595         /* Disallow new interrupts, avoid loop */
8596         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8597                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8598                 ioa_cfg->hrrq[i].allow_interrupts = 0;
8599                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8600         }
8601         wmb();
8602         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8603         ipr_cmd->done = ipr_reset_ioa_job;
8604         return IPR_RC_JOB_RETURN;
8605 }
8606
8607 /**
8608  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8609  * @pdev:       PCI device struct
8610  *
8611  * Description: This routine is called to tell us that the MMIO
8612  * access to the IOA has been restored
8613  */
8614 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8615 {
8616         unsigned long flags = 0;
8617         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8618
8619         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8620         if (!ioa_cfg->probe_done)
8621                 pci_save_state(pdev);
8622         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8623         return PCI_ERS_RESULT_NEED_RESET;
8624 }
8625
8626 /**
8627  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8628  * @pdev:       PCI device struct
8629  *
8630  * Description: This routine is called to tell us that the PCI bus
8631  * is down. Can't do anything here, except put the device driver
8632  * into a holding pattern, waiting for the PCI bus to come back.
8633  */
8634 static void ipr_pci_frozen(struct pci_dev *pdev)
8635 {
8636         unsigned long flags = 0;
8637         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8638
8639         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8640         if (ioa_cfg->probe_done)
8641                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8642         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8643 }
8644
8645 /**
8646  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8647  * @pdev:       PCI device struct
8648  *
8649  * Description: This routine is called by the pci error recovery
8650  * code after the PCI slot has been reset, just before we
8651  * should resume normal operations.
8652  */
8653 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8654 {
8655         unsigned long flags = 0;
8656         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8657
8658         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8659         if (ioa_cfg->probe_done) {
8660                 if (ioa_cfg->needs_warm_reset)
8661                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8662                 else
8663                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8664                                                 IPR_SHUTDOWN_NONE);
8665         } else
8666                 wake_up_all(&ioa_cfg->eeh_wait_q);
8667         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8668         return PCI_ERS_RESULT_RECOVERED;
8669 }
8670
8671 /**
8672  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8673  * @pdev:       PCI device struct
8674  *
8675  * Description: This routine is called when the PCI bus has
8676  * permanently failed.
8677  */
8678 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8679 {
8680         unsigned long flags = 0;
8681         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8682         int i;
8683
8684         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8685         if (ioa_cfg->probe_done) {
8686                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8687                         ioa_cfg->sdt_state = ABORT_DUMP;
8688                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8689                 ioa_cfg->in_ioa_bringdown = 1;
8690                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8691                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8692                         ioa_cfg->hrrq[i].allow_cmds = 0;
8693                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8694                 }
8695                 wmb();
8696                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8697         } else
8698                 wake_up_all(&ioa_cfg->eeh_wait_q);
8699         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8700 }
8701
8702 /**
8703  * ipr_pci_error_detected - Called when a PCI error is detected.
8704  * @pdev:       PCI device struct
8705  * @state:      PCI channel state
8706  *
8707  * Description: Called when a PCI error is detected.
8708  *
8709  * Return value:
8710  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8711  */
8712 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8713                                                pci_channel_state_t state)
8714 {
8715         switch (state) {
8716         case pci_channel_io_frozen:
8717                 ipr_pci_frozen(pdev);
8718                 return PCI_ERS_RESULT_CAN_RECOVER;
8719         case pci_channel_io_perm_failure:
8720                 ipr_pci_perm_failure(pdev);
8721                 return PCI_ERS_RESULT_DISCONNECT;
8722         default:
8723                 break;
8724         }
8725         return PCI_ERS_RESULT_NEED_RESET;
8726 }
8727
8728 /**
8729  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8730  * @ioa_cfg:    ioa cfg struct
8731  *
8732  * Description: This is the second phase of adapter initialization
8733  * This function takes care of initilizing the adapter to the point
8734  * where it can accept new commands.
8735  * Return value:
8736  *     none
8737  **/
8738 static void ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8739 {
8740         unsigned long host_lock_flags = 0;
8741
8742         ENTER;
8743         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8744         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8745         ioa_cfg->probe_done = 1;
8746         if (ioa_cfg->needs_hard_reset) {
8747                 ioa_cfg->needs_hard_reset = 0;
8748                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8749         } else
8750                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8751                                         IPR_SHUTDOWN_NONE);
8752         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8753
8754         LEAVE;
8755 }
8756
8757 /**
8758  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8759  * @ioa_cfg:    ioa config struct
8760  *
8761  * Return value:
8762  *      none
8763  **/
8764 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8765 {
8766         int i;
8767
8768         if (ioa_cfg->ipr_cmnd_list) {
8769                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8770                         if (ioa_cfg->ipr_cmnd_list[i])
8771                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
8772                                               ioa_cfg->ipr_cmnd_list[i],
8773                                               ioa_cfg->ipr_cmnd_list_dma[i]);
8774
8775                         ioa_cfg->ipr_cmnd_list[i] = NULL;
8776                 }
8777         }
8778
8779         dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
8780
8781         kfree(ioa_cfg->ipr_cmnd_list);
8782         kfree(ioa_cfg->ipr_cmnd_list_dma);
8783         ioa_cfg->ipr_cmnd_list = NULL;
8784         ioa_cfg->ipr_cmnd_list_dma = NULL;
8785         ioa_cfg->ipr_cmd_pool = NULL;
8786 }
8787
8788 /**
8789  * ipr_free_mem - Frees memory allocated for an adapter
8790  * @ioa_cfg:    ioa cfg struct
8791  *
8792  * Return value:
8793  *      nothing
8794  **/
8795 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8796 {
8797         int i;
8798
8799         kfree(ioa_cfg->res_entries);
8800         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8801                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8802         ipr_free_cmd_blks(ioa_cfg);
8803
8804         for (i = 0; i < ioa_cfg->hrrq_num; i++)
8805                 dma_free_coherent(&ioa_cfg->pdev->dev,
8806                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
8807                                   ioa_cfg->hrrq[i].host_rrq,
8808                                   ioa_cfg->hrrq[i].host_rrq_dma);
8809
8810         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8811                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
8812
8813         for (i = 0; i < IPR_MAX_HCAMS; i++) {
8814                 dma_free_coherent(&ioa_cfg->pdev->dev,
8815                                   sizeof(struct ipr_hostrcb),
8816                                   ioa_cfg->hostrcb[i],
8817                                   ioa_cfg->hostrcb_dma[i]);
8818         }
8819
8820         ipr_free_dump(ioa_cfg);
8821         kfree(ioa_cfg->trace);
8822 }
8823
8824 /**
8825  * ipr_free_irqs - Free all allocated IRQs for the adapter.
8826  * @ioa_cfg:    ipr cfg struct
8827  *
8828  * This function frees all allocated IRQs for the
8829  * specified adapter.
8830  *
8831  * Return value:
8832  *      none
8833  **/
8834 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
8835 {
8836         struct pci_dev *pdev = ioa_cfg->pdev;
8837         int i;
8838
8839         for (i = 0; i < ioa_cfg->nvectors; i++)
8840                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
8841         pci_free_irq_vectors(pdev);
8842 }
8843
8844 /**
8845  * ipr_free_all_resources - Free all allocated resources for an adapter.
8846  * @ioa_cfg:    ioa config struct
8847  *
8848  * This function frees all allocated resources for the
8849  * specified adapter.
8850  *
8851  * Return value:
8852  *      none
8853  **/
8854 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8855 {
8856         struct pci_dev *pdev = ioa_cfg->pdev;
8857
8858         ENTER;
8859         ipr_free_irqs(ioa_cfg);
8860         if (ioa_cfg->reset_work_q)
8861                 destroy_workqueue(ioa_cfg->reset_work_q);
8862         iounmap(ioa_cfg->hdw_dma_regs);
8863         pci_release_regions(pdev);
8864         ipr_free_mem(ioa_cfg);
8865         scsi_host_put(ioa_cfg->host);
8866         pci_disable_device(pdev);
8867         LEAVE;
8868 }
8869
8870 /**
8871  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8872  * @ioa_cfg:    ioa config struct
8873  *
8874  * Return value:
8875  *      0 on success / -ENOMEM on allocation failure
8876  **/
8877 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8878 {
8879         struct ipr_cmnd *ipr_cmd;
8880         struct ipr_ioarcb *ioarcb;
8881         dma_addr_t dma_addr;
8882         int i, entries_each_hrrq, hrrq_id = 0;
8883
8884         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
8885                                                 sizeof(struct ipr_cmnd), 512, 0);
8886
8887         if (!ioa_cfg->ipr_cmd_pool)
8888                 return -ENOMEM;
8889
8890         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8891         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8892
8893         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8894                 ipr_free_cmd_blks(ioa_cfg);
8895                 return -ENOMEM;
8896         }
8897
8898         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8899                 if (ioa_cfg->hrrq_num > 1) {
8900                         if (i == 0) {
8901                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8902                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
8903                                 ioa_cfg->hrrq[i].max_cmd_id =
8904                                         (entries_each_hrrq - 1);
8905                         } else {
8906                                 entries_each_hrrq =
8907                                         IPR_NUM_BASE_CMD_BLKS/
8908                                         (ioa_cfg->hrrq_num - 1);
8909                                 ioa_cfg->hrrq[i].min_cmd_id =
8910                                         IPR_NUM_INTERNAL_CMD_BLKS +
8911                                         (i - 1) * entries_each_hrrq;
8912                                 ioa_cfg->hrrq[i].max_cmd_id =
8913                                         (IPR_NUM_INTERNAL_CMD_BLKS +
8914                                         i * entries_each_hrrq - 1);
8915                         }
8916                 } else {
8917                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
8918                         ioa_cfg->hrrq[i].min_cmd_id = 0;
8919                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8920                 }
8921                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8922         }
8923
8924         BUG_ON(ioa_cfg->hrrq_num == 0);
8925
8926         i = IPR_NUM_CMD_BLKS -
8927                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8928         if (i > 0) {
8929                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8930                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8931         }
8932
8933         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8934                 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
8935                                 GFP_KERNEL, &dma_addr);
8936
8937                 if (!ipr_cmd) {
8938                         ipr_free_cmd_blks(ioa_cfg);
8939                         return -ENOMEM;
8940                 }
8941
8942                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8943                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8944
8945                 ioarcb = &ipr_cmd->ioarcb;
8946                 ipr_cmd->dma_addr = dma_addr;
8947                 if (ioa_cfg->sis64)
8948                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8949                 else
8950                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8951
8952                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
8953                 if (ioa_cfg->sis64) {
8954                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
8955                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8956                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8957                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8958                 } else {
8959                         ioarcb->write_ioadl_addr =
8960                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8961                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8962                         ioarcb->ioasa_host_pci_addr =
8963                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8964                 }
8965                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8966                 ipr_cmd->cmd_index = i;
8967                 ipr_cmd->ioa_cfg = ioa_cfg;
8968                 ipr_cmd->sense_buffer_dma = dma_addr +
8969                         offsetof(struct ipr_cmnd, sense_buffer);
8970
8971                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8972                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8973                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8974                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8975                         hrrq_id++;
8976         }
8977
8978         return 0;
8979 }
8980
8981 /**
8982  * ipr_alloc_mem - Allocate memory for an adapter
8983  * @ioa_cfg:    ioa config struct
8984  *
8985  * Return value:
8986  *      0 on success / non-zero for error
8987  **/
8988 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8989 {
8990         struct pci_dev *pdev = ioa_cfg->pdev;
8991         int i, rc = -ENOMEM;
8992
8993         ENTER;
8994         ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
8995                                        sizeof(struct ipr_resource_entry),
8996                                        GFP_KERNEL);
8997
8998         if (!ioa_cfg->res_entries)
8999                 goto out;
9000
9001         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9002                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9003                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9004         }
9005
9006         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9007                                               sizeof(struct ipr_misc_cbs),
9008                                               &ioa_cfg->vpd_cbs_dma,
9009                                               GFP_KERNEL);
9010
9011         if (!ioa_cfg->vpd_cbs)
9012                 goto out_free_res_entries;
9013
9014         if (ipr_alloc_cmd_blks(ioa_cfg))
9015                 goto out_free_vpd_cbs;
9016
9017         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9018                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9019                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9020                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9021                                         GFP_KERNEL);
9022
9023                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9024                         while (--i >= 0)
9025                                 dma_free_coherent(&pdev->dev,
9026                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9027                                         ioa_cfg->hrrq[i].host_rrq,
9028                                         ioa_cfg->hrrq[i].host_rrq_dma);
9029                         goto out_ipr_free_cmd_blocks;
9030                 }
9031                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9032         }
9033
9034         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9035                                                   ioa_cfg->cfg_table_size,
9036                                                   &ioa_cfg->cfg_table_dma,
9037                                                   GFP_KERNEL);
9038
9039         if (!ioa_cfg->u.cfg_table)
9040                 goto out_free_host_rrq;
9041
9042         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9043                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9044                                                          sizeof(struct ipr_hostrcb),
9045                                                          &ioa_cfg->hostrcb_dma[i],
9046                                                          GFP_KERNEL);
9047
9048                 if (!ioa_cfg->hostrcb[i])
9049                         goto out_free_hostrcb_dma;
9050
9051                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9052                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9053                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9054                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9055         }
9056
9057         ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9058                                  sizeof(struct ipr_trace_entry),
9059                                  GFP_KERNEL);
9060
9061         if (!ioa_cfg->trace)
9062                 goto out_free_hostrcb_dma;
9063
9064         rc = 0;
9065 out:
9066         LEAVE;
9067         return rc;
9068
9069 out_free_hostrcb_dma:
9070         while (i-- > 0) {
9071                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9072                                   ioa_cfg->hostrcb[i],
9073                                   ioa_cfg->hostrcb_dma[i]);
9074         }
9075         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9076                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9077 out_free_host_rrq:
9078         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9079                 dma_free_coherent(&pdev->dev,
9080                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9081                                   ioa_cfg->hrrq[i].host_rrq,
9082                                   ioa_cfg->hrrq[i].host_rrq_dma);
9083         }
9084 out_ipr_free_cmd_blocks:
9085         ipr_free_cmd_blks(ioa_cfg);
9086 out_free_vpd_cbs:
9087         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9088                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9089 out_free_res_entries:
9090         kfree(ioa_cfg->res_entries);
9091         goto out;
9092 }
9093
9094 /**
9095  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9096  * @ioa_cfg:    ioa config struct
9097  *
9098  * Return value:
9099  *      none
9100  **/
9101 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9102 {
9103         int i;
9104
9105         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9106                 ioa_cfg->bus_attr[i].bus = i;
9107                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9108                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9109                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9110                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9111                 else
9112                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9113         }
9114 }
9115
9116 /**
9117  * ipr_init_regs - Initialize IOA registers
9118  * @ioa_cfg:    ioa config struct
9119  *
9120  * Return value:
9121  *      none
9122  **/
9123 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9124 {
9125         const struct ipr_interrupt_offsets *p;
9126         struct ipr_interrupts *t;
9127         void __iomem *base;
9128
9129         p = &ioa_cfg->chip_cfg->regs;
9130         t = &ioa_cfg->regs;
9131         base = ioa_cfg->hdw_dma_regs;
9132
9133         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9134         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9135         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9136         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9137         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9138         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9139         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9140         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9141         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9142         t->ioarrin_reg = base + p->ioarrin_reg;
9143         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9144         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9145         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9146         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9147         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9148         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9149
9150         if (ioa_cfg->sis64) {
9151                 t->init_feedback_reg = base + p->init_feedback_reg;
9152                 t->dump_addr_reg = base + p->dump_addr_reg;
9153                 t->dump_data_reg = base + p->dump_data_reg;
9154                 t->endian_swap_reg = base + p->endian_swap_reg;
9155         }
9156 }
9157
9158 /**
9159  * ipr_init_ioa_cfg - Initialize IOA config struct
9160  * @ioa_cfg:    ioa config struct
9161  * @host:               scsi host struct
9162  * @pdev:               PCI dev struct
9163  *
9164  * Return value:
9165  *      none
9166  **/
9167 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9168                              struct Scsi_Host *host, struct pci_dev *pdev)
9169 {
9170         int i;
9171
9172         ioa_cfg->host = host;
9173         ioa_cfg->pdev = pdev;
9174         ioa_cfg->log_level = ipr_log_level;
9175         ioa_cfg->doorbell = IPR_DOORBELL;
9176         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9177         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9178         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9179         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9180         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9181         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9182
9183         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9184         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9185         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9186         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9187         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9188         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9189         INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9190         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9191         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9192         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9193         ioa_cfg->sdt_state = INACTIVE;
9194
9195         ipr_initialize_bus_attr(ioa_cfg);
9196         ioa_cfg->max_devs_supported = ipr_max_devs;
9197
9198         if (ioa_cfg->sis64) {
9199                 host->max_channel = IPR_MAX_SIS64_BUSES;
9200                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9201                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9202                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9203                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9204                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9205                                            + ((sizeof(struct ipr_config_table_entry64)
9206                                                * ioa_cfg->max_devs_supported)));
9207         } else {
9208                 host->max_channel = IPR_VSET_BUS;
9209                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9210                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9211                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9212                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9213                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9214                                            + ((sizeof(struct ipr_config_table_entry)
9215                                                * ioa_cfg->max_devs_supported)));
9216         }
9217
9218         host->unique_id = host->host_no;
9219         host->max_cmd_len = IPR_MAX_CDB_LEN;
9220         host->can_queue = ioa_cfg->max_cmds;
9221         pci_set_drvdata(pdev, ioa_cfg);
9222
9223         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9224                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9225                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9226                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9227                 if (i == 0)
9228                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9229                 else
9230                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9231         }
9232 }
9233
9234 /**
9235  * ipr_get_chip_info - Find adapter chip information
9236  * @dev_id:             PCI device id struct
9237  *
9238  * Return value:
9239  *      ptr to chip information on success / NULL on failure
9240  **/
9241 static const struct ipr_chip_t *
9242 ipr_get_chip_info(const struct pci_device_id *dev_id)
9243 {
9244         int i;
9245
9246         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9247                 if (ipr_chip[i].vendor == dev_id->vendor &&
9248                     ipr_chip[i].device == dev_id->device)
9249                         return &ipr_chip[i];
9250         return NULL;
9251 }
9252
9253 /**
9254  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9255  *                                              during probe time
9256  * @ioa_cfg:    ioa config struct
9257  *
9258  * Return value:
9259  *      None
9260  **/
9261 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9262 {
9263         struct pci_dev *pdev = ioa_cfg->pdev;
9264
9265         if (pci_channel_offline(pdev)) {
9266                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9267                                    !pci_channel_offline(pdev),
9268                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9269                 pci_restore_state(pdev);
9270         }
9271 }
9272
9273 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9274 {
9275         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9276
9277         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9278                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9279                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9280                 ioa_cfg->vectors_info[vec_idx].
9281                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9282         }
9283 }
9284
9285 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
9286                 struct pci_dev *pdev)
9287 {
9288         int i, rc;
9289
9290         for (i = 1; i < ioa_cfg->nvectors; i++) {
9291                 rc = request_irq(pci_irq_vector(pdev, i),
9292                         ipr_isr_mhrrq,
9293                         0,
9294                         ioa_cfg->vectors_info[i].desc,
9295                         &ioa_cfg->hrrq[i]);
9296                 if (rc) {
9297                         while (--i > 0)
9298                                 free_irq(pci_irq_vector(pdev, i),
9299                                         &ioa_cfg->hrrq[i]);
9300                         return rc;
9301                 }
9302         }
9303         return 0;
9304 }
9305
9306 /**
9307  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9308  * @devp:               PCI device struct
9309  * @irq:                IRQ number
9310  *
9311  * Description: Simply set the msi_received flag to 1 indicating that
9312  * Message Signaled Interrupts are supported.
9313  *
9314  * Return value:
9315  *      0 on success / non-zero on failure
9316  **/
9317 static irqreturn_t ipr_test_intr(int irq, void *devp)
9318 {
9319         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9320         unsigned long lock_flags = 0;
9321
9322         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9323         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9324
9325         ioa_cfg->msi_received = 1;
9326         wake_up(&ioa_cfg->msi_wait_q);
9327
9328         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9329         return IRQ_HANDLED;
9330 }
9331
9332 /**
9333  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9334  * @ioa_cfg:            ioa config struct
9335  * @pdev:               PCI device struct
9336  *
9337  * Description: This routine sets up and initiates a test interrupt to determine
9338  * if the interrupt is received via the ipr_test_intr() service routine.
9339  * If the tests fails, the driver will fall back to LSI.
9340  *
9341  * Return value:
9342  *      0 on success / non-zero on failure
9343  **/
9344 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9345 {
9346         int rc;
9347         unsigned long lock_flags = 0;
9348         int irq = pci_irq_vector(pdev, 0);
9349
9350         ENTER;
9351
9352         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9353         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9354         ioa_cfg->msi_received = 0;
9355         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9356         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9357         readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9358         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9359
9360         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9361         if (rc) {
9362                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
9363                 return rc;
9364         } else if (ipr_debug)
9365                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
9366
9367         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9368         readl(ioa_cfg->regs.sense_interrupt_reg);
9369         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9370         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9371         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9372
9373         if (!ioa_cfg->msi_received) {
9374                 /* MSI test failed */
9375                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9376                 rc = -EOPNOTSUPP;
9377         } else if (ipr_debug)
9378                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9379
9380         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9381
9382         free_irq(irq, ioa_cfg);
9383
9384         LEAVE;
9385
9386         return rc;
9387 }
9388
9389  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9390  * @pdev:               PCI device struct
9391  * @dev_id:             PCI device id struct
9392  *
9393  * Return value:
9394  *      0 on success / non-zero on failure
9395  **/
9396 static int ipr_probe_ioa(struct pci_dev *pdev,
9397                          const struct pci_device_id *dev_id)
9398 {
9399         struct ipr_ioa_cfg *ioa_cfg;
9400         struct Scsi_Host *host;
9401         unsigned long ipr_regs_pci;
9402         void __iomem *ipr_regs;
9403         int rc = PCIBIOS_SUCCESSFUL;
9404         volatile u32 mask, uproc, interrupts;
9405         unsigned long lock_flags, driver_lock_flags;
9406         unsigned int irq_flag;
9407
9408         ENTER;
9409
9410         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9411         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9412
9413         if (!host) {
9414                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9415                 rc = -ENOMEM;
9416                 goto out;
9417         }
9418
9419         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9420         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9421
9422         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9423
9424         if (!ioa_cfg->ipr_chip) {
9425                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9426                         dev_id->vendor, dev_id->device);
9427                 goto out_scsi_host_put;
9428         }
9429
9430         /* set SIS 32 or SIS 64 */
9431         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9432         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9433         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9434         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9435
9436         if (ipr_transop_timeout)
9437                 ioa_cfg->transop_timeout = ipr_transop_timeout;
9438         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9439                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9440         else
9441                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9442
9443         ioa_cfg->revid = pdev->revision;
9444
9445         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9446
9447         ipr_regs_pci = pci_resource_start(pdev, 0);
9448
9449         rc = pci_request_regions(pdev, IPR_NAME);
9450         if (rc < 0) {
9451                 dev_err(&pdev->dev,
9452                         "Couldn't register memory range of registers\n");
9453                 goto out_scsi_host_put;
9454         }
9455
9456         rc = pci_enable_device(pdev);
9457
9458         if (rc || pci_channel_offline(pdev)) {
9459                 if (pci_channel_offline(pdev)) {
9460                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9461                         rc = pci_enable_device(pdev);
9462                 }
9463
9464                 if (rc) {
9465                         dev_err(&pdev->dev, "Cannot enable adapter\n");
9466                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9467                         goto out_release_regions;
9468                 }
9469         }
9470
9471         ipr_regs = pci_ioremap_bar(pdev, 0);
9472
9473         if (!ipr_regs) {
9474                 dev_err(&pdev->dev,
9475                         "Couldn't map memory range of registers\n");
9476                 rc = -ENOMEM;
9477                 goto out_disable;
9478         }
9479
9480         ioa_cfg->hdw_dma_regs = ipr_regs;
9481         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9482         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9483
9484         ipr_init_regs(ioa_cfg);
9485
9486         if (ioa_cfg->sis64) {
9487                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9488                 if (rc < 0) {
9489                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9490                         rc = dma_set_mask_and_coherent(&pdev->dev,
9491                                                        DMA_BIT_MASK(32));
9492                 }
9493         } else
9494                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9495
9496         if (rc < 0) {
9497                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
9498                 goto cleanup_nomem;
9499         }
9500
9501         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9502                                    ioa_cfg->chip_cfg->cache_line_size);
9503
9504         if (rc != PCIBIOS_SUCCESSFUL) {
9505                 dev_err(&pdev->dev, "Write of cache line size failed\n");
9506                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9507                 rc = -EIO;
9508                 goto cleanup_nomem;
9509         }
9510
9511         /* Issue MMIO read to ensure card is not in EEH */
9512         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9513         ipr_wait_for_pci_err_recovery(ioa_cfg);
9514
9515         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9516                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9517                         IPR_MAX_MSIX_VECTORS);
9518                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9519         }
9520
9521         irq_flag = PCI_IRQ_LEGACY;
9522         if (ioa_cfg->ipr_chip->has_msi)
9523                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
9524         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
9525         if (rc < 0) {
9526                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9527                 goto cleanup_nomem;
9528         }
9529         ioa_cfg->nvectors = rc;
9530
9531         if (!pdev->msi_enabled && !pdev->msix_enabled)
9532                 ioa_cfg->clear_isr = 1;
9533
9534         pci_set_master(pdev);
9535
9536         if (pci_channel_offline(pdev)) {
9537                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9538                 pci_set_master(pdev);
9539                 if (pci_channel_offline(pdev)) {
9540                         rc = -EIO;
9541                         goto out_msi_disable;
9542                 }
9543         }
9544
9545         if (pdev->msi_enabled || pdev->msix_enabled) {
9546                 rc = ipr_test_msi(ioa_cfg, pdev);
9547                 switch (rc) {
9548                 case 0:
9549                         dev_info(&pdev->dev,
9550                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
9551                                 pdev->msix_enabled ? "-X" : "");
9552                         break;
9553                 case -EOPNOTSUPP:
9554                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9555                         pci_free_irq_vectors(pdev);
9556
9557                         ioa_cfg->nvectors = 1;
9558                         ioa_cfg->clear_isr = 1;
9559                         break;
9560                 default:
9561                         goto out_msi_disable;
9562                 }
9563         }
9564
9565         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9566                                 (unsigned int)num_online_cpus(),
9567                                 (unsigned int)IPR_MAX_HRRQ_NUM);
9568
9569         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9570                 goto out_msi_disable;
9571
9572         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9573                 goto out_msi_disable;
9574
9575         rc = ipr_alloc_mem(ioa_cfg);
9576         if (rc < 0) {
9577                 dev_err(&pdev->dev,
9578                         "Couldn't allocate enough memory for device driver!\n");
9579                 goto out_msi_disable;
9580         }
9581
9582         /* Save away PCI config space for use following IOA reset */
9583         rc = pci_save_state(pdev);
9584
9585         if (rc != PCIBIOS_SUCCESSFUL) {
9586                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9587                 rc = -EIO;
9588                 goto cleanup_nolog;
9589         }
9590
9591         /*
9592          * If HRRQ updated interrupt is not masked, or reset alert is set,
9593          * the card is in an unknown state and needs a hard reset
9594          */
9595         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9596         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9597         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9598         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9599                 ioa_cfg->needs_hard_reset = 1;
9600         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9601                 ioa_cfg->needs_hard_reset = 1;
9602         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9603                 ioa_cfg->ioa_unit_checked = 1;
9604
9605         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9606         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9607         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9608
9609         if (pdev->msi_enabled || pdev->msix_enabled) {
9610                 name_msi_vectors(ioa_cfg);
9611                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
9612                         ioa_cfg->vectors_info[0].desc,
9613                         &ioa_cfg->hrrq[0]);
9614                 if (!rc)
9615                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
9616         } else {
9617                 rc = request_irq(pdev->irq, ipr_isr,
9618                          IRQF_SHARED,
9619                          IPR_NAME, &ioa_cfg->hrrq[0]);
9620         }
9621         if (rc) {
9622                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9623                         pdev->irq, rc);
9624                 goto cleanup_nolog;
9625         }
9626
9627         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9628             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9629                 ioa_cfg->needs_warm_reset = 1;
9630                 ioa_cfg->reset = ipr_reset_slot_reset;
9631
9632                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
9633                                                                 WQ_MEM_RECLAIM, host->host_no);
9634
9635                 if (!ioa_cfg->reset_work_q) {
9636                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
9637                         rc = -ENOMEM;
9638                         goto out_free_irq;
9639                 }
9640         } else
9641                 ioa_cfg->reset = ipr_reset_start_bist;
9642
9643         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9644         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9645         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9646
9647         LEAVE;
9648 out:
9649         return rc;
9650
9651 out_free_irq:
9652         ipr_free_irqs(ioa_cfg);
9653 cleanup_nolog:
9654         ipr_free_mem(ioa_cfg);
9655 out_msi_disable:
9656         ipr_wait_for_pci_err_recovery(ioa_cfg);
9657         pci_free_irq_vectors(pdev);
9658 cleanup_nomem:
9659         iounmap(ipr_regs);
9660 out_disable:
9661         pci_disable_device(pdev);
9662 out_release_regions:
9663         pci_release_regions(pdev);
9664 out_scsi_host_put:
9665         scsi_host_put(host);
9666         goto out;
9667 }
9668
9669 /**
9670  * ipr_initiate_ioa_bringdown - Bring down an adapter
9671  * @ioa_cfg:            ioa config struct
9672  * @shutdown_type:      shutdown type
9673  *
9674  * Description: This function will initiate bringing down the adapter.
9675  * This consists of issuing an IOA shutdown to the adapter
9676  * to flush the cache, and running BIST.
9677  * If the caller needs to wait on the completion of the reset,
9678  * the caller must sleep on the reset_wait_q.
9679  *
9680  * Return value:
9681  *      none
9682  **/
9683 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9684                                        enum ipr_shutdown_type shutdown_type)
9685 {
9686         ENTER;
9687         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9688                 ioa_cfg->sdt_state = ABORT_DUMP;
9689         ioa_cfg->reset_retries = 0;
9690         ioa_cfg->in_ioa_bringdown = 1;
9691         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9692         LEAVE;
9693 }
9694
9695 /**
9696  * __ipr_remove - Remove a single adapter
9697  * @pdev:       pci device struct
9698  *
9699  * Adapter hot plug remove entry point.
9700  *
9701  * Return value:
9702  *      none
9703  **/
9704 static void __ipr_remove(struct pci_dev *pdev)
9705 {
9706         unsigned long host_lock_flags = 0;
9707         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9708         int i;
9709         unsigned long driver_lock_flags;
9710         ENTER;
9711
9712         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9713         while (ioa_cfg->in_reset_reload) {
9714                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9715                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9716                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9717         }
9718
9719         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9720                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9721                 ioa_cfg->hrrq[i].removing_ioa = 1;
9722                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9723         }
9724         wmb();
9725         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9726
9727         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9728         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9729         flush_work(&ioa_cfg->work_q);
9730         if (ioa_cfg->reset_work_q)
9731                 flush_workqueue(ioa_cfg->reset_work_q);
9732         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9733         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9734
9735         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9736         list_del(&ioa_cfg->queue);
9737         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9738
9739         if (ioa_cfg->sdt_state == ABORT_DUMP)
9740                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9741         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9742
9743         ipr_free_all_resources(ioa_cfg);
9744
9745         LEAVE;
9746 }
9747
9748 /**
9749  * ipr_remove - IOA hot plug remove entry point
9750  * @pdev:       pci device struct
9751  *
9752  * Adapter hot plug remove entry point.
9753  *
9754  * Return value:
9755  *      none
9756  **/
9757 static void ipr_remove(struct pci_dev *pdev)
9758 {
9759         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9760
9761         ENTER;
9762
9763         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9764                               &ipr_trace_attr);
9765         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9766                              &ipr_dump_attr);
9767         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
9768                         &ipr_ioa_async_err_log);
9769         scsi_remove_host(ioa_cfg->host);
9770
9771         __ipr_remove(pdev);
9772
9773         LEAVE;
9774 }
9775
9776 /**
9777  * ipr_probe - Adapter hot plug add entry point
9778  * @pdev:       pci device struct
9779  * @dev_id:     pci device ID
9780  *
9781  * Return value:
9782  *      0 on success / non-zero on failure
9783  **/
9784 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9785 {
9786         struct ipr_ioa_cfg *ioa_cfg;
9787         unsigned long flags;
9788         int rc, i;
9789
9790         rc = ipr_probe_ioa(pdev, dev_id);
9791
9792         if (rc)
9793                 return rc;
9794
9795         ioa_cfg = pci_get_drvdata(pdev);
9796         ipr_probe_ioa_part2(ioa_cfg);
9797
9798         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9799
9800         if (rc) {
9801                 __ipr_remove(pdev);
9802                 return rc;
9803         }
9804
9805         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9806                                    &ipr_trace_attr);
9807
9808         if (rc) {
9809                 scsi_remove_host(ioa_cfg->host);
9810                 __ipr_remove(pdev);
9811                 return rc;
9812         }
9813
9814         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
9815                         &ipr_ioa_async_err_log);
9816
9817         if (rc) {
9818                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9819                                 &ipr_dump_attr);
9820                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9821                                 &ipr_trace_attr);
9822                 scsi_remove_host(ioa_cfg->host);
9823                 __ipr_remove(pdev);
9824                 return rc;
9825         }
9826
9827         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9828                                    &ipr_dump_attr);
9829
9830         if (rc) {
9831                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
9832                                       &ipr_ioa_async_err_log);
9833                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9834                                       &ipr_trace_attr);
9835                 scsi_remove_host(ioa_cfg->host);
9836                 __ipr_remove(pdev);
9837                 return rc;
9838         }
9839         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9840         ioa_cfg->scan_enabled = 1;
9841         schedule_work(&ioa_cfg->work_q);
9842         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9843
9844         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9845
9846         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9847                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9848                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
9849                                         ioa_cfg->iopoll_weight, ipr_iopoll);
9850                 }
9851         }
9852
9853         scsi_scan_host(ioa_cfg->host);
9854
9855         return 0;
9856 }
9857
9858 /**
9859  * ipr_shutdown - Shutdown handler.
9860  * @pdev:       pci device struct
9861  *
9862  * This function is invoked upon system shutdown/reboot. It will issue
9863  * an adapter shutdown to the adapter to flush the write cache.
9864  *
9865  * Return value:
9866  *      none
9867  **/
9868 static void ipr_shutdown(struct pci_dev *pdev)
9869 {
9870         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9871         unsigned long lock_flags = 0;
9872         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
9873         int i;
9874
9875         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9876         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9877                 ioa_cfg->iopoll_weight = 0;
9878                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9879                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
9880         }
9881
9882         while (ioa_cfg->in_reset_reload) {
9883                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9884                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9885                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9886         }
9887
9888         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
9889                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
9890
9891         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
9892         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9893         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9894         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
9895                 ipr_free_irqs(ioa_cfg);
9896                 pci_disable_device(ioa_cfg->pdev);
9897         }
9898 }
9899
9900 static struct pci_device_id ipr_pci_table[] = {
9901         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9902                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9903         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9904                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9905         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9906                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9907         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9908                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9909         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9910                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9911         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9912                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9913         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9914                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9915         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9916                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9917                 IPR_USE_LONG_TRANSOP_TIMEOUT },
9918         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9919               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9920         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9921               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9922               IPR_USE_LONG_TRANSOP_TIMEOUT },
9923         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9924               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9925               IPR_USE_LONG_TRANSOP_TIMEOUT },
9926         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9927               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9928         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9929               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9930               IPR_USE_LONG_TRANSOP_TIMEOUT},
9931         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9932               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9933               IPR_USE_LONG_TRANSOP_TIMEOUT },
9934         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9935               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9936               IPR_USE_LONG_TRANSOP_TIMEOUT },
9937         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9938               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9939         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9940               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9941         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9942               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
9943               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
9944         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
9945                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
9946         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9947                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
9948         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9949                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9950                 IPR_USE_LONG_TRANSOP_TIMEOUT },
9951         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9952                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9953                 IPR_USE_LONG_TRANSOP_TIMEOUT },
9954         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9955                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9956         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9957                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9958         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9959                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9960         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9961                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
9962         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9963                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9964         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9965                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9966         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9967                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9968         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9969                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9970         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9971                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9972         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9973                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9974         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9975                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9976         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9977                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9978         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9979                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9980         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9981                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9982         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9983                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
9984         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9985                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
9986         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9987                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
9988         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9989                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
9990         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9991                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
9992         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9993                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
9994         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9995                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
9996         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9997                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
9998         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9999                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10000         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10001                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10002         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10003                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10004         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10005                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10006         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10007                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10008         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10009                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10010         { }
10011 };
10012 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10013
10014 static const struct pci_error_handlers ipr_err_handler = {
10015         .error_detected = ipr_pci_error_detected,
10016         .mmio_enabled = ipr_pci_mmio_enabled,
10017         .slot_reset = ipr_pci_slot_reset,
10018 };
10019
10020 static struct pci_driver ipr_driver = {
10021         .name = IPR_NAME,
10022         .id_table = ipr_pci_table,
10023         .probe = ipr_probe,
10024         .remove = ipr_remove,
10025         .shutdown = ipr_shutdown,
10026         .err_handler = &ipr_err_handler,
10027 };
10028
10029 /**
10030  * ipr_halt_done - Shutdown prepare completion
10031  * @ipr_cmd:   ipr command struct
10032  *
10033  * Return value:
10034  *      none
10035  **/
10036 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10037 {
10038         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10039 }
10040
10041 /**
10042  * ipr_halt - Issue shutdown prepare to all adapters
10043  * @nb: Notifier block
10044  * @event: Notifier event
10045  * @buf: Notifier data (unused)
10046  *
10047  * Return value:
10048  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10049  **/
10050 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10051 {
10052         struct ipr_cmnd *ipr_cmd;
10053         struct ipr_ioa_cfg *ioa_cfg;
10054         unsigned long flags = 0, driver_lock_flags;
10055
10056         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10057                 return NOTIFY_DONE;
10058
10059         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10060
10061         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10062                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10063                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10064                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10065                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10066                         continue;
10067                 }
10068
10069                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10070                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10071                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10072                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10073                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10074
10075                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10076                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10077         }
10078         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10079
10080         return NOTIFY_OK;
10081 }
10082
10083 static struct notifier_block ipr_notifier = {
10084         ipr_halt, NULL, 0
10085 };
10086
10087 /**
10088  * ipr_init - Module entry point
10089  *
10090  * Return value:
10091  *      0 on success / negative value on failure
10092  **/
10093 static int __init ipr_init(void)
10094 {
10095         int rc;
10096
10097         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10098                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10099
10100         register_reboot_notifier(&ipr_notifier);
10101         rc = pci_register_driver(&ipr_driver);
10102         if (rc) {
10103                 unregister_reboot_notifier(&ipr_notifier);
10104                 return rc;
10105         }
10106
10107         return 0;
10108 }
10109
10110 /**
10111  * ipr_exit - Module unload
10112  *
10113  * Module unload entry point.
10114  *
10115  * Return value:
10116  *      none
10117  **/
10118 static void __exit ipr_exit(void)
10119 {
10120         unregister_reboot_notifier(&ipr_notifier);
10121         pci_unregister_driver(&ipr_driver);
10122 }
10123
10124 module_init(ipr_init);
10125 module_exit(ipr_exit);