GNU Linux-libre 5.4.241-gnu1
[releases.git] / drivers / scsi / ipr.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ipr.c -- driver for IBM Power Linux RAID adapters
4  *
5  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) 2003, 2004 IBM Corporation
8  */
9
10 /*
11  * Notes:
12  *
13  * This driver is used to control the following SCSI adapters:
14  *
15  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
16  *
17  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
19  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
20  *              Embedded SCSI adapter on p615 and p655 systems
21  *
22  * Supported Hardware Features:
23  *      - Ultra 320 SCSI controller
24  *      - PCI-X host interface
25  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26  *      - Non-Volatile Write Cache
27  *      - Supports attachment of non-RAID disks, tape, and optical devices
28  *      - RAID Levels 0, 5, 10
29  *      - Hot spare
30  *      - Background Parity Checking
31  *      - Background Data Scrubbing
32  *      - Ability to increase the capacity of an existing RAID 5 disk array
33  *              by adding disks
34  *
35  * Driver Features:
36  *      - Tagged command queuing
37  *      - Adapter microcode download
38  *      - PCI hot plug
39  *      - SCSI device hot plug
40  *
41  */
42
43 #include <linux/fs.h>
44 #include <linux/init.h>
45 #include <linux/types.h>
46 #include <linux/errno.h>
47 #include <linux/kernel.h>
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <linux/ioport.h>
51 #include <linux/delay.h>
52 #include <linux/pci.h>
53 #include <linux/wait.h>
54 #include <linux/spinlock.h>
55 #include <linux/sched.h>
56 #include <linux/interrupt.h>
57 #include <linux/blkdev.h>
58 #include <linux/firmware.h>
59 #include <linux/module.h>
60 #include <linux/moduleparam.h>
61 #include <linux/libata.h>
62 #include <linux/hdreg.h>
63 #include <linux/reboot.h>
64 #include <linux/stringify.h>
65 #include <asm/io.h>
66 #include <asm/irq.h>
67 #include <asm/processor.h>
68 #include <scsi/scsi.h>
69 #include <scsi/scsi_host.h>
70 #include <scsi/scsi_tcq.h>
71 #include <scsi/scsi_eh.h>
72 #include <scsi/scsi_cmnd.h>
73 #include "ipr.h"
74
75 /*
76  *   Global Data
77  */
78 static LIST_HEAD(ipr_ioa_head);
79 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
80 static unsigned int ipr_max_speed = 1;
81 static int ipr_testmode = 0;
82 static unsigned int ipr_fastfail = 0;
83 static unsigned int ipr_transop_timeout = 0;
84 static unsigned int ipr_debug = 0;
85 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
86 static unsigned int ipr_dual_ioa_raid = 1;
87 static unsigned int ipr_number_of_msix = 16;
88 static unsigned int ipr_fast_reboot;
89 static DEFINE_SPINLOCK(ipr_driver_lock);
90
91 /* This table describes the differences between DMA controller chips */
92 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
93         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
94                 .mailbox = 0x0042C,
95                 .max_cmds = 100,
96                 .cache_line_size = 0x20,
97                 .clear_isr = 1,
98                 .iopoll_weight = 0,
99                 {
100                         .set_interrupt_mask_reg = 0x0022C,
101                         .clr_interrupt_mask_reg = 0x00230,
102                         .clr_interrupt_mask_reg32 = 0x00230,
103                         .sense_interrupt_mask_reg = 0x0022C,
104                         .sense_interrupt_mask_reg32 = 0x0022C,
105                         .clr_interrupt_reg = 0x00228,
106                         .clr_interrupt_reg32 = 0x00228,
107                         .sense_interrupt_reg = 0x00224,
108                         .sense_interrupt_reg32 = 0x00224,
109                         .ioarrin_reg = 0x00404,
110                         .sense_uproc_interrupt_reg = 0x00214,
111                         .sense_uproc_interrupt_reg32 = 0x00214,
112                         .set_uproc_interrupt_reg = 0x00214,
113                         .set_uproc_interrupt_reg32 = 0x00214,
114                         .clr_uproc_interrupt_reg = 0x00218,
115                         .clr_uproc_interrupt_reg32 = 0x00218
116                 }
117         },
118         { /* Snipe and Scamp */
119                 .mailbox = 0x0052C,
120                 .max_cmds = 100,
121                 .cache_line_size = 0x20,
122                 .clear_isr = 1,
123                 .iopoll_weight = 0,
124                 {
125                         .set_interrupt_mask_reg = 0x00288,
126                         .clr_interrupt_mask_reg = 0x0028C,
127                         .clr_interrupt_mask_reg32 = 0x0028C,
128                         .sense_interrupt_mask_reg = 0x00288,
129                         .sense_interrupt_mask_reg32 = 0x00288,
130                         .clr_interrupt_reg = 0x00284,
131                         .clr_interrupt_reg32 = 0x00284,
132                         .sense_interrupt_reg = 0x00280,
133                         .sense_interrupt_reg32 = 0x00280,
134                         .ioarrin_reg = 0x00504,
135                         .sense_uproc_interrupt_reg = 0x00290,
136                         .sense_uproc_interrupt_reg32 = 0x00290,
137                         .set_uproc_interrupt_reg = 0x00290,
138                         .set_uproc_interrupt_reg32 = 0x00290,
139                         .clr_uproc_interrupt_reg = 0x00294,
140                         .clr_uproc_interrupt_reg32 = 0x00294
141                 }
142         },
143         { /* CRoC */
144                 .mailbox = 0x00044,
145                 .max_cmds = 1000,
146                 .cache_line_size = 0x20,
147                 .clear_isr = 0,
148                 .iopoll_weight = 64,
149                 {
150                         .set_interrupt_mask_reg = 0x00010,
151                         .clr_interrupt_mask_reg = 0x00018,
152                         .clr_interrupt_mask_reg32 = 0x0001C,
153                         .sense_interrupt_mask_reg = 0x00010,
154                         .sense_interrupt_mask_reg32 = 0x00014,
155                         .clr_interrupt_reg = 0x00008,
156                         .clr_interrupt_reg32 = 0x0000C,
157                         .sense_interrupt_reg = 0x00000,
158                         .sense_interrupt_reg32 = 0x00004,
159                         .ioarrin_reg = 0x00070,
160                         .sense_uproc_interrupt_reg = 0x00020,
161                         .sense_uproc_interrupt_reg32 = 0x00024,
162                         .set_uproc_interrupt_reg = 0x00020,
163                         .set_uproc_interrupt_reg32 = 0x00024,
164                         .clr_uproc_interrupt_reg = 0x00028,
165                         .clr_uproc_interrupt_reg32 = 0x0002C,
166                         .init_feedback_reg = 0x0005C,
167                         .dump_addr_reg = 0x00064,
168                         .dump_data_reg = 0x00068,
169                         .endian_swap_reg = 0x00084
170                 }
171         },
172 };
173
174 static const struct ipr_chip_t ipr_chip[] = {
175         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
176         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
177         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
180         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
181         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
182         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
183         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
184         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
185 };
186
187 static int ipr_max_bus_speeds[] = {
188         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
189 };
190
191 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
192 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
193 module_param_named(max_speed, ipr_max_speed, uint, 0);
194 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
195 module_param_named(log_level, ipr_log_level, uint, 0);
196 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
197 module_param_named(testmode, ipr_testmode, int, 0);
198 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
199 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
200 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
201 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
202 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
203 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
204 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
205 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
206 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
207 module_param_named(max_devs, ipr_max_devs, int, 0);
208 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
210 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
211 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
212 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
213 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
214 MODULE_LICENSE("GPL");
215 MODULE_VERSION(IPR_DRIVER_VERSION);
216
217 /*  A constant array of IOASCs/URCs/Error Messages */
218 static const
219 struct ipr_error_table_t ipr_error_table[] = {
220         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
221         "8155: An unknown error was received"},
222         {0x00330000, 0, 0,
223         "Soft underlength error"},
224         {0x005A0000, 0, 0,
225         "Command to be cancelled not found"},
226         {0x00808000, 0, 0,
227         "Qualified success"},
228         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
229         "FFFE: Soft device bus error recovered by the IOA"},
230         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
231         "4101: Soft device bus fabric error"},
232         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
233         "FFFC: Logical block guard error recovered by the device"},
234         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
235         "FFFC: Logical block reference tag error recovered by the device"},
236         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
237         "4171: Recovered scatter list tag / sequence number error"},
238         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
239         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
240         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
241         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
242         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
243         "FFFD: Recovered logical block reference tag error detected by the IOA"},
244         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "FFFD: Logical block guard error recovered by the IOA"},
246         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFF9: Device sector reassign successful"},
248         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFF7: Media error recovered by device rewrite procedures"},
250         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
251         "7001: IOA sector reassignment successful"},
252         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FFF9: Soft media error. Sector reassignment recommended"},
254         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
255         "FFF7: Media error recovered by IOA rewrite procedures"},
256         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FF3D: Soft PCI bus error recovered by the IOA"},
258         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
259         "FFF6: Device hardware error recovered by the IOA"},
260         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF6: Device hardware error recovered by the device"},
262         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
263         "FF3D: Soft IOA error recovered by the IOA"},
264         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
265         "FFFA: Undefined device response recovered by the IOA"},
266         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
267         "FFF6: Device bus error, message or command phase"},
268         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFFE: Task Management Function failed"},
270         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FFF6: Failure prediction threshold exceeded"},
272         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
273         "8009: Impending cache battery pack failure"},
274         {0x02040100, 0, 0,
275         "Logical Unit in process of becoming ready"},
276         {0x02040200, 0, 0,
277         "Initializing command required"},
278         {0x02040400, 0, 0,
279         "34FF: Disk device format in progress"},
280         {0x02040C00, 0, 0,
281         "Logical unit not accessible, target port in unavailable state"},
282         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "9070: IOA requested reset"},
284         {0x023F0000, 0, 0,
285         "Synchronization required"},
286         {0x02408500, 0, 0,
287         "IOA microcode download required"},
288         {0x02408600, 0, 0,
289         "Device bus connection is prohibited by host"},
290         {0x024E0000, 0, 0,
291         "No ready, IOA shutdown"},
292         {0x025A0000, 0, 0,
293         "Not ready, IOA has been shutdown"},
294         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
295         "3020: Storage subsystem configuration error"},
296         {0x03110B00, 0, 0,
297         "FFF5: Medium error, data unreadable, recommend reassign"},
298         {0x03110C00, 0, 0,
299         "7000: Medium error, data unreadable, do not reassign"},
300         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
301         "FFF3: Disk media format bad"},
302         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
303         "3002: Addressed device failed to respond to selection"},
304         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
305         "3100: Device bus error"},
306         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
307         "3109: IOA timed out a device command"},
308         {0x04088000, 0, 0,
309         "3120: SCSI bus is not operational"},
310         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
311         "4100: Hard device bus fabric error"},
312         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313         "310C: Logical block guard error detected by the device"},
314         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315         "310C: Logical block reference tag error detected by the device"},
316         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317         "4170: Scatter list tag / sequence number error"},
318         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "8150: Logical block CRC error on IOA to Host transfer"},
320         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321         "4170: Logical block sequence number error on IOA to Host transfer"},
322         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323         "310D: Logical block reference tag error detected by the IOA"},
324         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "310D: Logical block guard error detected by the IOA"},
326         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
327         "9000: IOA reserved area data check"},
328         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
329         "9001: IOA reserved area invalid data pattern"},
330         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
331         "9002: IOA reserved area LRC error"},
332         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333         "Hardware Error, IOA metadata access error"},
334         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
335         "102E: Out of alternate sectors for disk storage"},
336         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
337         "FFF4: Data transfer underlength error"},
338         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
339         "FFF4: Data transfer overlength error"},
340         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
341         "3400: Logical unit failure"},
342         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
343         "FFF4: Device microcode is corrupt"},
344         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
345         "8150: PCI bus error"},
346         {0x04430000, 1, 0,
347         "Unsupported device bus message received"},
348         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
349         "FFF4: Disk device problem"},
350         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
351         "8150: Permanent IOA failure"},
352         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
353         "3010: Disk device returned wrong response to IOA"},
354         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
355         "8151: IOA microcode error"},
356         {0x04448500, 0, 0,
357         "Device bus status error"},
358         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
359         "8157: IOA error requiring IOA reset to recover"},
360         {0x04448700, 0, 0,
361         "ATA device status error"},
362         {0x04490000, 0, 0,
363         "Message reject received from the device"},
364         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
365         "8008: A permanent cache battery pack failure occurred"},
366         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
367         "9090: Disk unit has been modified after the last known status"},
368         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
369         "9081: IOA detected device error"},
370         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
371         "9082: IOA detected device error"},
372         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
373         "3110: Device bus error, message or command phase"},
374         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
375         "3110: SAS Command / Task Management Function failed"},
376         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
377         "9091: Incorrect hardware configuration change has been detected"},
378         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
379         "9073: Invalid multi-adapter configuration"},
380         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
381         "4010: Incorrect connection between cascaded expanders"},
382         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "4020: Connections exceed IOA design limits"},
384         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "4030: Incorrect multipath connection"},
386         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
387         "4110: Unsupported enclosure function"},
388         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
389         "4120: SAS cable VPD cannot be read"},
390         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
391         "FFF4: Command to logical unit failed"},
392         {0x05240000, 1, 0,
393         "Illegal request, invalid request type or request packet"},
394         {0x05250000, 0, 0,
395         "Illegal request, invalid resource handle"},
396         {0x05258000, 0, 0,
397         "Illegal request, commands not allowed to this device"},
398         {0x05258100, 0, 0,
399         "Illegal request, command not allowed to a secondary adapter"},
400         {0x05258200, 0, 0,
401         "Illegal request, command not allowed to a non-optimized resource"},
402         {0x05260000, 0, 0,
403         "Illegal request, invalid field in parameter list"},
404         {0x05260100, 0, 0,
405         "Illegal request, parameter not supported"},
406         {0x05260200, 0, 0,
407         "Illegal request, parameter value invalid"},
408         {0x052C0000, 0, 0,
409         "Illegal request, command sequence error"},
410         {0x052C8000, 1, 0,
411         "Illegal request, dual adapter support not enabled"},
412         {0x052C8100, 1, 0,
413         "Illegal request, another cable connector was physically disabled"},
414         {0x054E8000, 1, 0,
415         "Illegal request, inconsistent group id/group count"},
416         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
417         "9031: Array protection temporarily suspended, protection resuming"},
418         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
419         "9040: Array protection temporarily suspended, protection resuming"},
420         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
421         "4080: IOA exceeded maximum operating temperature"},
422         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
423         "4085: Service required"},
424         {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
425         "4086: SAS Adapter Hardware Configuration Error"},
426         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
427         "3140: Device bus not ready to ready transition"},
428         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
429         "FFFB: SCSI bus was reset"},
430         {0x06290500, 0, 0,
431         "FFFE: SCSI bus transition to single ended"},
432         {0x06290600, 0, 0,
433         "FFFE: SCSI bus transition to LVD"},
434         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
435         "FFFB: SCSI bus was reset by another initiator"},
436         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
437         "3029: A device replacement has occurred"},
438         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
439         "4102: Device bus fabric performance degradation"},
440         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "9051: IOA cache data exists for a missing or failed device"},
442         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
443         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
444         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
445         "9025: Disk unit is not supported at its physical location"},
446         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
447         "3020: IOA detected a SCSI bus configuration error"},
448         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "3150: SCSI bus configuration error"},
450         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
451         "9074: Asymmetric advanced function disk configuration"},
452         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
453         "4040: Incomplete multipath connection between IOA and enclosure"},
454         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
455         "4041: Incomplete multipath connection between enclosure and device"},
456         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9075: Incomplete multipath connection between IOA and remote IOA"},
458         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9076: Configuration error, missing remote IOA"},
460         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
461         "4050: Enclosure does not support a required multipath function"},
462         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
463         "4121: Configuration error, required cable is missing"},
464         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
465         "4122: Cable is not plugged into the correct location on remote IOA"},
466         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4123: Configuration error, invalid cable vital product data"},
468         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4124: Configuration error, both cable ends are plugged into the same IOA"},
470         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
471         "4070: Logically bad block written on device"},
472         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
473         "9041: Array protection temporarily suspended"},
474         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
475         "9042: Corrupt array parity detected on specified device"},
476         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
477         "9030: Array no longer protected due to missing or failed disk unit"},
478         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
479         "9071: Link operational transition"},
480         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
481         "9072: Link not operational transition"},
482         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
483         "9032: Array exposed but still protected"},
484         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
485         "70DD: Device forced failed by disrupt device command"},
486         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
487         "4061: Multipath redundancy level got better"},
488         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "4060: Multipath redundancy level got worse"},
490         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
491         "9083: Device raw mode enabled"},
492         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
493         "9084: Device raw mode disabled"},
494         {0x07270000, 0, 0,
495         "Failure due to other device"},
496         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
497         "9008: IOA does not support functions expected by devices"},
498         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
499         "9010: Cache data associated with attached devices cannot be found"},
500         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
501         "9011: Cache data belongs to devices other than those attached"},
502         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
503         "9020: Array missing 2 or more devices with only 1 device present"},
504         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
505         "9021: Array missing 2 or more devices with 2 or more devices present"},
506         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
507         "9022: Exposed array is missing a required device"},
508         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9023: Array member(s) not at required physical locations"},
510         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9024: Array not functional due to present hardware configuration"},
512         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9026: Array not functional due to present hardware configuration"},
514         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9027: Array is missing a device and parity is out of sync"},
516         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9028: Maximum number of arrays already exist"},
518         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9050: Required cache data cannot be located for a disk unit"},
520         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9052: Cache data exists for a device that has been modified"},
522         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9054: IOA resources not available due to previous problems"},
524         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9092: Disk unit requires initialization before use"},
526         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9029: Incorrect hardware configuration change has been detected"},
528         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9060: One or more disk pairs are missing from an array"},
530         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9061: One or more disks are missing from an array"},
532         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9062: One or more disks are missing from an array"},
534         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9063: Maximum number of functional arrays has been exceeded"},
536         {0x07279A00, 0, 0,
537         "Data protect, other volume set problem"},
538         {0x0B260000, 0, 0,
539         "Aborted command, invalid descriptor"},
540         {0x0B3F9000, 0, 0,
541         "Target operating conditions have changed, dual adapter takeover"},
542         {0x0B530200, 0, 0,
543         "Aborted command, medium removal prevented"},
544         {0x0B5A0000, 0, 0,
545         "Command terminated by host"},
546         {0x0B5B8000, 0, 0,
547         "Aborted command, command terminated by host"}
548 };
549
550 static const struct ipr_ses_table_entry ipr_ses_table[] = {
551         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
552         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
553         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
554         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
555         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
556         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
557         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
558         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
559         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
560         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
561         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
562         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
563         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
564 };
565
566 /*
567  *  Function Prototypes
568  */
569 static int ipr_reset_alert(struct ipr_cmnd *);
570 static void ipr_process_ccn(struct ipr_cmnd *);
571 static void ipr_process_error(struct ipr_cmnd *);
572 static void ipr_reset_ioa_job(struct ipr_cmnd *);
573 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
574                                    enum ipr_shutdown_type);
575
576 #ifdef CONFIG_SCSI_IPR_TRACE
577 /**
578  * ipr_trc_hook - Add a trace entry to the driver trace
579  * @ipr_cmd:    ipr command struct
580  * @type:               trace type
581  * @add_data:   additional data
582  *
583  * Return value:
584  *      none
585  **/
586 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
587                          u8 type, u32 add_data)
588 {
589         struct ipr_trace_entry *trace_entry;
590         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
591         unsigned int trace_index;
592
593         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
594         trace_entry = &ioa_cfg->trace[trace_index];
595         trace_entry->time = jiffies;
596         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
597         trace_entry->type = type;
598         if (ipr_cmd->ioa_cfg->sis64)
599                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
600         else
601                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
602         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
603         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
604         trace_entry->u.add_data = add_data;
605         wmb();
606 }
607 #else
608 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
609 #endif
610
611 /**
612  * ipr_lock_and_done - Acquire lock and complete command
613  * @ipr_cmd:    ipr command struct
614  *
615  * Return value:
616  *      none
617  **/
618 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
619 {
620         unsigned long lock_flags;
621         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
622
623         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
624         ipr_cmd->done(ipr_cmd);
625         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
626 }
627
628 /**
629  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
630  * @ipr_cmd:    ipr command struct
631  *
632  * Return value:
633  *      none
634  **/
635 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
636 {
637         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
638         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
639         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
640         dma_addr_t dma_addr = ipr_cmd->dma_addr;
641         int hrrq_id;
642
643         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
644         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
645         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
646         ioarcb->data_transfer_length = 0;
647         ioarcb->read_data_transfer_length = 0;
648         ioarcb->ioadl_len = 0;
649         ioarcb->read_ioadl_len = 0;
650
651         if (ipr_cmd->ioa_cfg->sis64) {
652                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
653                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
654                 ioasa64->u.gata.status = 0;
655         } else {
656                 ioarcb->write_ioadl_addr =
657                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
658                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
659                 ioasa->u.gata.status = 0;
660         }
661
662         ioasa->hdr.ioasc = 0;
663         ioasa->hdr.residual_data_len = 0;
664         ipr_cmd->scsi_cmd = NULL;
665         ipr_cmd->qc = NULL;
666         ipr_cmd->sense_buffer[0] = 0;
667         ipr_cmd->dma_use_sg = 0;
668 }
669
670 /**
671  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
672  * @ipr_cmd:    ipr command struct
673  *
674  * Return value:
675  *      none
676  **/
677 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
678                               void (*fast_done) (struct ipr_cmnd *))
679 {
680         ipr_reinit_ipr_cmnd(ipr_cmd);
681         ipr_cmd->u.scratch = 0;
682         ipr_cmd->sibling = NULL;
683         ipr_cmd->eh_comp = NULL;
684         ipr_cmd->fast_done = fast_done;
685         timer_setup(&ipr_cmd->timer, NULL, 0);
686 }
687
688 /**
689  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
690  * @ioa_cfg:    ioa config struct
691  *
692  * Return value:
693  *      pointer to ipr command struct
694  **/
695 static
696 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
697 {
698         struct ipr_cmnd *ipr_cmd = NULL;
699
700         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
701                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
702                         struct ipr_cmnd, queue);
703                 list_del(&ipr_cmd->queue);
704         }
705
706
707         return ipr_cmd;
708 }
709
710 /**
711  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
712  * @ioa_cfg:    ioa config struct
713  *
714  * Return value:
715  *      pointer to ipr command struct
716  **/
717 static
718 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
719 {
720         struct ipr_cmnd *ipr_cmd =
721                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
722         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
723         return ipr_cmd;
724 }
725
726 /**
727  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
728  * @ioa_cfg:    ioa config struct
729  * @clr_ints:     interrupts to clear
730  *
731  * This function masks all interrupts on the adapter, then clears the
732  * interrupts specified in the mask
733  *
734  * Return value:
735  *      none
736  **/
737 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
738                                           u32 clr_ints)
739 {
740         volatile u32 int_reg;
741         int i;
742
743         /* Stop new interrupts */
744         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
745                 spin_lock(&ioa_cfg->hrrq[i]._lock);
746                 ioa_cfg->hrrq[i].allow_interrupts = 0;
747                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
748         }
749
750         /* Set interrupt mask to stop all new interrupts */
751         if (ioa_cfg->sis64)
752                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
753         else
754                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
755
756         /* Clear any pending interrupts */
757         if (ioa_cfg->sis64)
758                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
759         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
760         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
761 }
762
763 /**
764  * ipr_save_pcix_cmd_reg - Save PCI-X command register
765  * @ioa_cfg:    ioa config struct
766  *
767  * Return value:
768  *      0 on success / -EIO on failure
769  **/
770 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
771 {
772         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
773
774         if (pcix_cmd_reg == 0)
775                 return 0;
776
777         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
778                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
779                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
780                 return -EIO;
781         }
782
783         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
784         return 0;
785 }
786
787 /**
788  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
789  * @ioa_cfg:    ioa config struct
790  *
791  * Return value:
792  *      0 on success / -EIO on failure
793  **/
794 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
795 {
796         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
797
798         if (pcix_cmd_reg) {
799                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
800                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
801                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
802                         return -EIO;
803                 }
804         }
805
806         return 0;
807 }
808
809 /**
810  * __ipr_sata_eh_done - done function for aborted SATA commands
811  * @ipr_cmd:    ipr command struct
812  *
813  * This function is invoked for ops generated to SATA
814  * devices which are being aborted.
815  *
816  * Return value:
817  *      none
818  **/
819 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
820 {
821         struct ata_queued_cmd *qc = ipr_cmd->qc;
822         struct ipr_sata_port *sata_port = qc->ap->private_data;
823
824         qc->err_mask |= AC_ERR_OTHER;
825         sata_port->ioasa.status |= ATA_BUSY;
826         ata_qc_complete(qc);
827         if (ipr_cmd->eh_comp)
828                 complete(ipr_cmd->eh_comp);
829         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
830 }
831
832 /**
833  * ipr_sata_eh_done - done function for aborted SATA commands
834  * @ipr_cmd:    ipr command struct
835  *
836  * This function is invoked for ops generated to SATA
837  * devices which are being aborted.
838  *
839  * Return value:
840  *      none
841  **/
842 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
843 {
844         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
845         unsigned long hrrq_flags;
846
847         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
848         __ipr_sata_eh_done(ipr_cmd);
849         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
850 }
851
852 /**
853  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
854  * @ipr_cmd:    ipr command struct
855  *
856  * This function is invoked by the interrupt handler for
857  * ops generated by the SCSI mid-layer which are being aborted.
858  *
859  * Return value:
860  *      none
861  **/
862 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
863 {
864         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
865
866         scsi_cmd->result |= (DID_ERROR << 16);
867
868         scsi_dma_unmap(ipr_cmd->scsi_cmd);
869         scsi_cmd->scsi_done(scsi_cmd);
870         if (ipr_cmd->eh_comp)
871                 complete(ipr_cmd->eh_comp);
872         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
873 }
874
875 /**
876  * ipr_scsi_eh_done - mid-layer done function for aborted ops
877  * @ipr_cmd:    ipr command struct
878  *
879  * This function is invoked by the interrupt handler for
880  * ops generated by the SCSI mid-layer which are being aborted.
881  *
882  * Return value:
883  *      none
884  **/
885 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
886 {
887         unsigned long hrrq_flags;
888         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
889
890         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
891         __ipr_scsi_eh_done(ipr_cmd);
892         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
893 }
894
895 /**
896  * ipr_fail_all_ops - Fails all outstanding ops.
897  * @ioa_cfg:    ioa config struct
898  *
899  * This function fails all outstanding ops.
900  *
901  * Return value:
902  *      none
903  **/
904 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
905 {
906         struct ipr_cmnd *ipr_cmd, *temp;
907         struct ipr_hrr_queue *hrrq;
908
909         ENTER;
910         for_each_hrrq(hrrq, ioa_cfg) {
911                 spin_lock(&hrrq->_lock);
912                 list_for_each_entry_safe(ipr_cmd,
913                                         temp, &hrrq->hrrq_pending_q, queue) {
914                         list_del(&ipr_cmd->queue);
915
916                         ipr_cmd->s.ioasa.hdr.ioasc =
917                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
918                         ipr_cmd->s.ioasa.hdr.ilid =
919                                 cpu_to_be32(IPR_DRIVER_ILID);
920
921                         if (ipr_cmd->scsi_cmd)
922                                 ipr_cmd->done = __ipr_scsi_eh_done;
923                         else if (ipr_cmd->qc)
924                                 ipr_cmd->done = __ipr_sata_eh_done;
925
926                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
927                                      IPR_IOASC_IOA_WAS_RESET);
928                         del_timer(&ipr_cmd->timer);
929                         ipr_cmd->done(ipr_cmd);
930                 }
931                 spin_unlock(&hrrq->_lock);
932         }
933         LEAVE;
934 }
935
936 /**
937  * ipr_send_command -  Send driver initiated requests.
938  * @ipr_cmd:            ipr command struct
939  *
940  * This function sends a command to the adapter using the correct write call.
941  * In the case of sis64, calculate the ioarcb size required. Then or in the
942  * appropriate bits.
943  *
944  * Return value:
945  *      none
946  **/
947 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
948 {
949         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
950         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
951
952         if (ioa_cfg->sis64) {
953                 /* The default size is 256 bytes */
954                 send_dma_addr |= 0x1;
955
956                 /* If the number of ioadls * size of ioadl > 128 bytes,
957                    then use a 512 byte ioarcb */
958                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
959                         send_dma_addr |= 0x4;
960                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
961         } else
962                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
963 }
964
965 /**
966  * ipr_do_req -  Send driver initiated requests.
967  * @ipr_cmd:            ipr command struct
968  * @done:                       done function
969  * @timeout_func:       timeout function
970  * @timeout:            timeout value
971  *
972  * This function sends the specified command to the adapter with the
973  * timeout given. The done function is invoked on command completion.
974  *
975  * Return value:
976  *      none
977  **/
978 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
979                        void (*done) (struct ipr_cmnd *),
980                        void (*timeout_func) (struct timer_list *), u32 timeout)
981 {
982         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
983
984         ipr_cmd->done = done;
985
986         ipr_cmd->timer.expires = jiffies + timeout;
987         ipr_cmd->timer.function = timeout_func;
988
989         add_timer(&ipr_cmd->timer);
990
991         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
992
993         ipr_send_command(ipr_cmd);
994 }
995
996 /**
997  * ipr_internal_cmd_done - Op done function for an internally generated op.
998  * @ipr_cmd:    ipr command struct
999  *
1000  * This function is the op done function for an internally generated,
1001  * blocking op. It simply wakes the sleeping thread.
1002  *
1003  * Return value:
1004  *      none
1005  **/
1006 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1007 {
1008         if (ipr_cmd->sibling)
1009                 ipr_cmd->sibling = NULL;
1010         else
1011                 complete(&ipr_cmd->completion);
1012 }
1013
1014 /**
1015  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1016  * @ipr_cmd:    ipr command struct
1017  * @dma_addr:   dma address
1018  * @len:        transfer length
1019  * @flags:      ioadl flag value
1020  *
1021  * This function initializes an ioadl in the case where there is only a single
1022  * descriptor.
1023  *
1024  * Return value:
1025  *      nothing
1026  **/
1027 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1028                            u32 len, int flags)
1029 {
1030         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1031         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1032
1033         ipr_cmd->dma_use_sg = 1;
1034
1035         if (ipr_cmd->ioa_cfg->sis64) {
1036                 ioadl64->flags = cpu_to_be32(flags);
1037                 ioadl64->data_len = cpu_to_be32(len);
1038                 ioadl64->address = cpu_to_be64(dma_addr);
1039
1040                 ipr_cmd->ioarcb.ioadl_len =
1041                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1042                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1043         } else {
1044                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1045                 ioadl->address = cpu_to_be32(dma_addr);
1046
1047                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1048                         ipr_cmd->ioarcb.read_ioadl_len =
1049                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1050                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1051                 } else {
1052                         ipr_cmd->ioarcb.ioadl_len =
1053                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1054                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1055                 }
1056         }
1057 }
1058
1059 /**
1060  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1061  * @ipr_cmd:    ipr command struct
1062  * @timeout_func:       function to invoke if command times out
1063  * @timeout:    timeout
1064  *
1065  * Return value:
1066  *      none
1067  **/
1068 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1069                                   void (*timeout_func) (struct timer_list *),
1070                                   u32 timeout)
1071 {
1072         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1073
1074         init_completion(&ipr_cmd->completion);
1075         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1076
1077         spin_unlock_irq(ioa_cfg->host->host_lock);
1078         wait_for_completion(&ipr_cmd->completion);
1079         spin_lock_irq(ioa_cfg->host->host_lock);
1080 }
1081
1082 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1083 {
1084         unsigned int hrrq;
1085
1086         if (ioa_cfg->hrrq_num == 1)
1087                 hrrq = 0;
1088         else {
1089                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1090                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1091         }
1092         return hrrq;
1093 }
1094
1095 /**
1096  * ipr_send_hcam - Send an HCAM to the adapter.
1097  * @ioa_cfg:    ioa config struct
1098  * @type:               HCAM type
1099  * @hostrcb:    hostrcb struct
1100  *
1101  * This function will send a Host Controlled Async command to the adapter.
1102  * If HCAMs are currently not allowed to be issued to the adapter, it will
1103  * place the hostrcb on the free queue.
1104  *
1105  * Return value:
1106  *      none
1107  **/
1108 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1109                           struct ipr_hostrcb *hostrcb)
1110 {
1111         struct ipr_cmnd *ipr_cmd;
1112         struct ipr_ioarcb *ioarcb;
1113
1114         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1115                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1116                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1117                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1118
1119                 ipr_cmd->u.hostrcb = hostrcb;
1120                 ioarcb = &ipr_cmd->ioarcb;
1121
1122                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1123                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1124                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1125                 ioarcb->cmd_pkt.cdb[1] = type;
1126                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1127                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1128
1129                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1130                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1131
1132                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1133                         ipr_cmd->done = ipr_process_ccn;
1134                 else
1135                         ipr_cmd->done = ipr_process_error;
1136
1137                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1138
1139                 ipr_send_command(ipr_cmd);
1140         } else {
1141                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1142         }
1143 }
1144
1145 /**
1146  * ipr_update_ata_class - Update the ata class in the resource entry
1147  * @res:        resource entry struct
1148  * @proto:      cfgte device bus protocol value
1149  *
1150  * Return value:
1151  *      none
1152  **/
1153 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1154 {
1155         switch (proto) {
1156         case IPR_PROTO_SATA:
1157         case IPR_PROTO_SAS_STP:
1158                 res->ata_class = ATA_DEV_ATA;
1159                 break;
1160         case IPR_PROTO_SATA_ATAPI:
1161         case IPR_PROTO_SAS_STP_ATAPI:
1162                 res->ata_class = ATA_DEV_ATAPI;
1163                 break;
1164         default:
1165                 res->ata_class = ATA_DEV_UNKNOWN;
1166                 break;
1167         };
1168 }
1169
1170 /**
1171  * ipr_init_res_entry - Initialize a resource entry struct.
1172  * @res:        resource entry struct
1173  * @cfgtew:     config table entry wrapper struct
1174  *
1175  * Return value:
1176  *      none
1177  **/
1178 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1179                                struct ipr_config_table_entry_wrapper *cfgtew)
1180 {
1181         int found = 0;
1182         unsigned int proto;
1183         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1184         struct ipr_resource_entry *gscsi_res = NULL;
1185
1186         res->needs_sync_complete = 0;
1187         res->in_erp = 0;
1188         res->add_to_ml = 0;
1189         res->del_from_ml = 0;
1190         res->resetting_device = 0;
1191         res->reset_occurred = 0;
1192         res->sdev = NULL;
1193         res->sata_port = NULL;
1194
1195         if (ioa_cfg->sis64) {
1196                 proto = cfgtew->u.cfgte64->proto;
1197                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1198                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1199                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1200                 res->type = cfgtew->u.cfgte64->res_type;
1201
1202                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1203                         sizeof(res->res_path));
1204
1205                 res->bus = 0;
1206                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1207                         sizeof(res->dev_lun.scsi_lun));
1208                 res->lun = scsilun_to_int(&res->dev_lun);
1209
1210                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1211                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1212                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1213                                         found = 1;
1214                                         res->target = gscsi_res->target;
1215                                         break;
1216                                 }
1217                         }
1218                         if (!found) {
1219                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1220                                                                   ioa_cfg->max_devs_supported);
1221                                 set_bit(res->target, ioa_cfg->target_ids);
1222                         }
1223                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1224                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1225                         res->target = 0;
1226                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1227                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1228                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1229                                                           ioa_cfg->max_devs_supported);
1230                         set_bit(res->target, ioa_cfg->array_ids);
1231                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1232                         res->bus = IPR_VSET_VIRTUAL_BUS;
1233                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1234                                                           ioa_cfg->max_devs_supported);
1235                         set_bit(res->target, ioa_cfg->vset_ids);
1236                 } else {
1237                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1238                                                           ioa_cfg->max_devs_supported);
1239                         set_bit(res->target, ioa_cfg->target_ids);
1240                 }
1241         } else {
1242                 proto = cfgtew->u.cfgte->proto;
1243                 res->qmodel = IPR_QUEUEING_MODEL(res);
1244                 res->flags = cfgtew->u.cfgte->flags;
1245                 if (res->flags & IPR_IS_IOA_RESOURCE)
1246                         res->type = IPR_RES_TYPE_IOAFP;
1247                 else
1248                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1249
1250                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1251                 res->target = cfgtew->u.cfgte->res_addr.target;
1252                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1253                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1254         }
1255
1256         ipr_update_ata_class(res, proto);
1257 }
1258
1259 /**
1260  * ipr_is_same_device - Determine if two devices are the same.
1261  * @res:        resource entry struct
1262  * @cfgtew:     config table entry wrapper struct
1263  *
1264  * Return value:
1265  *      1 if the devices are the same / 0 otherwise
1266  **/
1267 static int ipr_is_same_device(struct ipr_resource_entry *res,
1268                               struct ipr_config_table_entry_wrapper *cfgtew)
1269 {
1270         if (res->ioa_cfg->sis64) {
1271                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1272                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1273                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1274                                         sizeof(cfgtew->u.cfgte64->lun))) {
1275                         return 1;
1276                 }
1277         } else {
1278                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1279                     res->target == cfgtew->u.cfgte->res_addr.target &&
1280                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1281                         return 1;
1282         }
1283
1284         return 0;
1285 }
1286
1287 /**
1288  * __ipr_format_res_path - Format the resource path for printing.
1289  * @res_path:   resource path
1290  * @buf:        buffer
1291  * @len:        length of buffer provided
1292  *
1293  * Return value:
1294  *      pointer to buffer
1295  **/
1296 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1297 {
1298         int i;
1299         char *p = buffer;
1300
1301         *p = '\0';
1302         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1303         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1304                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1305
1306         return buffer;
1307 }
1308
1309 /**
1310  * ipr_format_res_path - Format the resource path for printing.
1311  * @ioa_cfg:    ioa config struct
1312  * @res_path:   resource path
1313  * @buf:        buffer
1314  * @len:        length of buffer provided
1315  *
1316  * Return value:
1317  *      pointer to buffer
1318  **/
1319 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1320                                  u8 *res_path, char *buffer, int len)
1321 {
1322         char *p = buffer;
1323
1324         *p = '\0';
1325         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1326         __ipr_format_res_path(res_path, p, len - (buffer - p));
1327         return buffer;
1328 }
1329
1330 /**
1331  * ipr_update_res_entry - Update the resource entry.
1332  * @res:        resource entry struct
1333  * @cfgtew:     config table entry wrapper struct
1334  *
1335  * Return value:
1336  *      none
1337  **/
1338 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1339                                  struct ipr_config_table_entry_wrapper *cfgtew)
1340 {
1341         char buffer[IPR_MAX_RES_PATH_LENGTH];
1342         unsigned int proto;
1343         int new_path = 0;
1344
1345         if (res->ioa_cfg->sis64) {
1346                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1347                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1348                 res->type = cfgtew->u.cfgte64->res_type;
1349
1350                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1351                         sizeof(struct ipr_std_inq_data));
1352
1353                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1354                 proto = cfgtew->u.cfgte64->proto;
1355                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1356                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1357
1358                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1359                         sizeof(res->dev_lun.scsi_lun));
1360
1361                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1362                                         sizeof(res->res_path))) {
1363                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1364                                 sizeof(res->res_path));
1365                         new_path = 1;
1366                 }
1367
1368                 if (res->sdev && new_path)
1369                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1370                                     ipr_format_res_path(res->ioa_cfg,
1371                                         res->res_path, buffer, sizeof(buffer)));
1372         } else {
1373                 res->flags = cfgtew->u.cfgte->flags;
1374                 if (res->flags & IPR_IS_IOA_RESOURCE)
1375                         res->type = IPR_RES_TYPE_IOAFP;
1376                 else
1377                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1378
1379                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1380                         sizeof(struct ipr_std_inq_data));
1381
1382                 res->qmodel = IPR_QUEUEING_MODEL(res);
1383                 proto = cfgtew->u.cfgte->proto;
1384                 res->res_handle = cfgtew->u.cfgte->res_handle;
1385         }
1386
1387         ipr_update_ata_class(res, proto);
1388 }
1389
1390 /**
1391  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1392  *                        for the resource.
1393  * @res:        resource entry struct
1394  * @cfgtew:     config table entry wrapper struct
1395  *
1396  * Return value:
1397  *      none
1398  **/
1399 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1400 {
1401         struct ipr_resource_entry *gscsi_res = NULL;
1402         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1403
1404         if (!ioa_cfg->sis64)
1405                 return;
1406
1407         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1408                 clear_bit(res->target, ioa_cfg->array_ids);
1409         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1410                 clear_bit(res->target, ioa_cfg->vset_ids);
1411         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1412                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1413                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1414                                 return;
1415                 clear_bit(res->target, ioa_cfg->target_ids);
1416
1417         } else if (res->bus == 0)
1418                 clear_bit(res->target, ioa_cfg->target_ids);
1419 }
1420
1421 /**
1422  * ipr_handle_config_change - Handle a config change from the adapter
1423  * @ioa_cfg:    ioa config struct
1424  * @hostrcb:    hostrcb
1425  *
1426  * Return value:
1427  *      none
1428  **/
1429 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1430                                      struct ipr_hostrcb *hostrcb)
1431 {
1432         struct ipr_resource_entry *res = NULL;
1433         struct ipr_config_table_entry_wrapper cfgtew;
1434         __be32 cc_res_handle;
1435
1436         u32 is_ndn = 1;
1437
1438         if (ioa_cfg->sis64) {
1439                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1440                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1441         } else {
1442                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1443                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1444         }
1445
1446         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1447                 if (res->res_handle == cc_res_handle) {
1448                         is_ndn = 0;
1449                         break;
1450                 }
1451         }
1452
1453         if (is_ndn) {
1454                 if (list_empty(&ioa_cfg->free_res_q)) {
1455                         ipr_send_hcam(ioa_cfg,
1456                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1457                                       hostrcb);
1458                         return;
1459                 }
1460
1461                 res = list_entry(ioa_cfg->free_res_q.next,
1462                                  struct ipr_resource_entry, queue);
1463
1464                 list_del(&res->queue);
1465                 ipr_init_res_entry(res, &cfgtew);
1466                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1467         }
1468
1469         ipr_update_res_entry(res, &cfgtew);
1470
1471         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1472                 if (res->sdev) {
1473                         res->del_from_ml = 1;
1474                         res->res_handle = IPR_INVALID_RES_HANDLE;
1475                         schedule_work(&ioa_cfg->work_q);
1476                 } else {
1477                         ipr_clear_res_target(res);
1478                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1479                 }
1480         } else if (!res->sdev || res->del_from_ml) {
1481                 res->add_to_ml = 1;
1482                 schedule_work(&ioa_cfg->work_q);
1483         }
1484
1485         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1486 }
1487
1488 /**
1489  * ipr_process_ccn - Op done function for a CCN.
1490  * @ipr_cmd:    ipr command struct
1491  *
1492  * This function is the op done function for a configuration
1493  * change notification host controlled async from the adapter.
1494  *
1495  * Return value:
1496  *      none
1497  **/
1498 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1499 {
1500         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1501         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1502         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1503
1504         list_del_init(&hostrcb->queue);
1505         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1506
1507         if (ioasc) {
1508                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1509                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1510                         dev_err(&ioa_cfg->pdev->dev,
1511                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1512
1513                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1514         } else {
1515                 ipr_handle_config_change(ioa_cfg, hostrcb);
1516         }
1517 }
1518
1519 /**
1520  * strip_whitespace - Strip and pad trailing whitespace.
1521  * @i:          size of buffer
1522  * @buf:        string to modify
1523  *
1524  * This function will strip all trailing whitespace and
1525  * NUL terminate the string.
1526  *
1527  **/
1528 static void strip_whitespace(int i, char *buf)
1529 {
1530         if (i < 1)
1531                 return;
1532         i--;
1533         while (i && buf[i] == ' ')
1534                 i--;
1535         buf[i+1] = '\0';
1536 }
1537
1538 /**
1539  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1540  * @prefix:             string to print at start of printk
1541  * @hostrcb:    hostrcb pointer
1542  * @vpd:                vendor/product id/sn struct
1543  *
1544  * Return value:
1545  *      none
1546  **/
1547 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1548                                 struct ipr_vpd *vpd)
1549 {
1550         char vendor_id[IPR_VENDOR_ID_LEN + 1];
1551         char product_id[IPR_PROD_ID_LEN + 1];
1552         char sn[IPR_SERIAL_NUM_LEN + 1];
1553
1554         memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1555         strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
1556
1557         memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
1558         strip_whitespace(IPR_PROD_ID_LEN, product_id);
1559
1560         memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
1561         strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
1562
1563         ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
1564                      vendor_id, product_id, sn);
1565 }
1566
1567 /**
1568  * ipr_log_vpd - Log the passed VPD to the error log.
1569  * @vpd:                vendor/product id/sn struct
1570  *
1571  * Return value:
1572  *      none
1573  **/
1574 static void ipr_log_vpd(struct ipr_vpd *vpd)
1575 {
1576         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1577                     + IPR_SERIAL_NUM_LEN];
1578
1579         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1580         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1581                IPR_PROD_ID_LEN);
1582         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1583         ipr_err("Vendor/Product ID: %s\n", buffer);
1584
1585         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1586         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1587         ipr_err("    Serial Number: %s\n", buffer);
1588 }
1589
1590 /**
1591  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1592  * @prefix:             string to print at start of printk
1593  * @hostrcb:    hostrcb pointer
1594  * @vpd:                vendor/product id/sn/wwn struct
1595  *
1596  * Return value:
1597  *      none
1598  **/
1599 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1600                                     struct ipr_ext_vpd *vpd)
1601 {
1602         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1603         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1604                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1605 }
1606
1607 /**
1608  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1609  * @vpd:                vendor/product id/sn/wwn struct
1610  *
1611  * Return value:
1612  *      none
1613  **/
1614 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1615 {
1616         ipr_log_vpd(&vpd->vpd);
1617         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1618                 be32_to_cpu(vpd->wwid[1]));
1619 }
1620
1621 /**
1622  * ipr_log_enhanced_cache_error - Log a cache error.
1623  * @ioa_cfg:    ioa config struct
1624  * @hostrcb:    hostrcb struct
1625  *
1626  * Return value:
1627  *      none
1628  **/
1629 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1630                                          struct ipr_hostrcb *hostrcb)
1631 {
1632         struct ipr_hostrcb_type_12_error *error;
1633
1634         if (ioa_cfg->sis64)
1635                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1636         else
1637                 error = &hostrcb->hcam.u.error.u.type_12_error;
1638
1639         ipr_err("-----Current Configuration-----\n");
1640         ipr_err("Cache Directory Card Information:\n");
1641         ipr_log_ext_vpd(&error->ioa_vpd);
1642         ipr_err("Adapter Card Information:\n");
1643         ipr_log_ext_vpd(&error->cfc_vpd);
1644
1645         ipr_err("-----Expected Configuration-----\n");
1646         ipr_err("Cache Directory Card Information:\n");
1647         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1648         ipr_err("Adapter Card Information:\n");
1649         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1650
1651         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1652                      be32_to_cpu(error->ioa_data[0]),
1653                      be32_to_cpu(error->ioa_data[1]),
1654                      be32_to_cpu(error->ioa_data[2]));
1655 }
1656
1657 /**
1658  * ipr_log_cache_error - Log a cache error.
1659  * @ioa_cfg:    ioa config struct
1660  * @hostrcb:    hostrcb struct
1661  *
1662  * Return value:
1663  *      none
1664  **/
1665 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1666                                 struct ipr_hostrcb *hostrcb)
1667 {
1668         struct ipr_hostrcb_type_02_error *error =
1669                 &hostrcb->hcam.u.error.u.type_02_error;
1670
1671         ipr_err("-----Current Configuration-----\n");
1672         ipr_err("Cache Directory Card Information:\n");
1673         ipr_log_vpd(&error->ioa_vpd);
1674         ipr_err("Adapter Card Information:\n");
1675         ipr_log_vpd(&error->cfc_vpd);
1676
1677         ipr_err("-----Expected Configuration-----\n");
1678         ipr_err("Cache Directory Card Information:\n");
1679         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1680         ipr_err("Adapter Card Information:\n");
1681         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1682
1683         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1684                      be32_to_cpu(error->ioa_data[0]),
1685                      be32_to_cpu(error->ioa_data[1]),
1686                      be32_to_cpu(error->ioa_data[2]));
1687 }
1688
1689 /**
1690  * ipr_log_enhanced_config_error - Log a configuration error.
1691  * @ioa_cfg:    ioa config struct
1692  * @hostrcb:    hostrcb struct
1693  *
1694  * Return value:
1695  *      none
1696  **/
1697 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1698                                           struct ipr_hostrcb *hostrcb)
1699 {
1700         int errors_logged, i;
1701         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1702         struct ipr_hostrcb_type_13_error *error;
1703
1704         error = &hostrcb->hcam.u.error.u.type_13_error;
1705         errors_logged = be32_to_cpu(error->errors_logged);
1706
1707         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1708                 be32_to_cpu(error->errors_detected), errors_logged);
1709
1710         dev_entry = error->dev;
1711
1712         for (i = 0; i < errors_logged; i++, dev_entry++) {
1713                 ipr_err_separator;
1714
1715                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1716                 ipr_log_ext_vpd(&dev_entry->vpd);
1717
1718                 ipr_err("-----New Device Information-----\n");
1719                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1720
1721                 ipr_err("Cache Directory Card Information:\n");
1722                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1723
1724                 ipr_err("Adapter Card Information:\n");
1725                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1726         }
1727 }
1728
1729 /**
1730  * ipr_log_sis64_config_error - Log a device error.
1731  * @ioa_cfg:    ioa config struct
1732  * @hostrcb:    hostrcb struct
1733  *
1734  * Return value:
1735  *      none
1736  **/
1737 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1738                                        struct ipr_hostrcb *hostrcb)
1739 {
1740         int errors_logged, i;
1741         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1742         struct ipr_hostrcb_type_23_error *error;
1743         char buffer[IPR_MAX_RES_PATH_LENGTH];
1744
1745         error = &hostrcb->hcam.u.error64.u.type_23_error;
1746         errors_logged = be32_to_cpu(error->errors_logged);
1747
1748         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1749                 be32_to_cpu(error->errors_detected), errors_logged);
1750
1751         dev_entry = error->dev;
1752
1753         for (i = 0; i < errors_logged; i++, dev_entry++) {
1754                 ipr_err_separator;
1755
1756                 ipr_err("Device %d : %s", i + 1,
1757                         __ipr_format_res_path(dev_entry->res_path,
1758                                               buffer, sizeof(buffer)));
1759                 ipr_log_ext_vpd(&dev_entry->vpd);
1760
1761                 ipr_err("-----New Device Information-----\n");
1762                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1763
1764                 ipr_err("Cache Directory Card Information:\n");
1765                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1766
1767                 ipr_err("Adapter Card Information:\n");
1768                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1769         }
1770 }
1771
1772 /**
1773  * ipr_log_config_error - Log a configuration error.
1774  * @ioa_cfg:    ioa config struct
1775  * @hostrcb:    hostrcb struct
1776  *
1777  * Return value:
1778  *      none
1779  **/
1780 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1781                                  struct ipr_hostrcb *hostrcb)
1782 {
1783         int errors_logged, i;
1784         struct ipr_hostrcb_device_data_entry *dev_entry;
1785         struct ipr_hostrcb_type_03_error *error;
1786
1787         error = &hostrcb->hcam.u.error.u.type_03_error;
1788         errors_logged = be32_to_cpu(error->errors_logged);
1789
1790         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1791                 be32_to_cpu(error->errors_detected), errors_logged);
1792
1793         dev_entry = error->dev;
1794
1795         for (i = 0; i < errors_logged; i++, dev_entry++) {
1796                 ipr_err_separator;
1797
1798                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1799                 ipr_log_vpd(&dev_entry->vpd);
1800
1801                 ipr_err("-----New Device Information-----\n");
1802                 ipr_log_vpd(&dev_entry->new_vpd);
1803
1804                 ipr_err("Cache Directory Card Information:\n");
1805                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1806
1807                 ipr_err("Adapter Card Information:\n");
1808                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1809
1810                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1811                         be32_to_cpu(dev_entry->ioa_data[0]),
1812                         be32_to_cpu(dev_entry->ioa_data[1]),
1813                         be32_to_cpu(dev_entry->ioa_data[2]),
1814                         be32_to_cpu(dev_entry->ioa_data[3]),
1815                         be32_to_cpu(dev_entry->ioa_data[4]));
1816         }
1817 }
1818
1819 /**
1820  * ipr_log_enhanced_array_error - Log an array configuration error.
1821  * @ioa_cfg:    ioa config struct
1822  * @hostrcb:    hostrcb struct
1823  *
1824  * Return value:
1825  *      none
1826  **/
1827 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1828                                          struct ipr_hostrcb *hostrcb)
1829 {
1830         int i, num_entries;
1831         struct ipr_hostrcb_type_14_error *error;
1832         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1833         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1834
1835         error = &hostrcb->hcam.u.error.u.type_14_error;
1836
1837         ipr_err_separator;
1838
1839         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1840                 error->protection_level,
1841                 ioa_cfg->host->host_no,
1842                 error->last_func_vset_res_addr.bus,
1843                 error->last_func_vset_res_addr.target,
1844                 error->last_func_vset_res_addr.lun);
1845
1846         ipr_err_separator;
1847
1848         array_entry = error->array_member;
1849         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1850                             ARRAY_SIZE(error->array_member));
1851
1852         for (i = 0; i < num_entries; i++, array_entry++) {
1853                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1854                         continue;
1855
1856                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1857                         ipr_err("Exposed Array Member %d:\n", i);
1858                 else
1859                         ipr_err("Array Member %d:\n", i);
1860
1861                 ipr_log_ext_vpd(&array_entry->vpd);
1862                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1863                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1864                                  "Expected Location");
1865
1866                 ipr_err_separator;
1867         }
1868 }
1869
1870 /**
1871  * ipr_log_array_error - Log an array configuration error.
1872  * @ioa_cfg:    ioa config struct
1873  * @hostrcb:    hostrcb struct
1874  *
1875  * Return value:
1876  *      none
1877  **/
1878 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1879                                 struct ipr_hostrcb *hostrcb)
1880 {
1881         int i;
1882         struct ipr_hostrcb_type_04_error *error;
1883         struct ipr_hostrcb_array_data_entry *array_entry;
1884         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1885
1886         error = &hostrcb->hcam.u.error.u.type_04_error;
1887
1888         ipr_err_separator;
1889
1890         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1891                 error->protection_level,
1892                 ioa_cfg->host->host_no,
1893                 error->last_func_vset_res_addr.bus,
1894                 error->last_func_vset_res_addr.target,
1895                 error->last_func_vset_res_addr.lun);
1896
1897         ipr_err_separator;
1898
1899         array_entry = error->array_member;
1900
1901         for (i = 0; i < 18; i++) {
1902                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1903                         continue;
1904
1905                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1906                         ipr_err("Exposed Array Member %d:\n", i);
1907                 else
1908                         ipr_err("Array Member %d:\n", i);
1909
1910                 ipr_log_vpd(&array_entry->vpd);
1911
1912                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1913                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1914                                  "Expected Location");
1915
1916                 ipr_err_separator;
1917
1918                 if (i == 9)
1919                         array_entry = error->array_member2;
1920                 else
1921                         array_entry++;
1922         }
1923 }
1924
1925 /**
1926  * ipr_log_hex_data - Log additional hex IOA error data.
1927  * @ioa_cfg:    ioa config struct
1928  * @data:               IOA error data
1929  * @len:                data length
1930  *
1931  * Return value:
1932  *      none
1933  **/
1934 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1935 {
1936         int i;
1937
1938         if (len == 0)
1939                 return;
1940
1941         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1942                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1943
1944         for (i = 0; i < len / 4; i += 4) {
1945                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1946                         be32_to_cpu(data[i]),
1947                         be32_to_cpu(data[i+1]),
1948                         be32_to_cpu(data[i+2]),
1949                         be32_to_cpu(data[i+3]));
1950         }
1951 }
1952
1953 /**
1954  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1955  * @ioa_cfg:    ioa config struct
1956  * @hostrcb:    hostrcb struct
1957  *
1958  * Return value:
1959  *      none
1960  **/
1961 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1962                                             struct ipr_hostrcb *hostrcb)
1963 {
1964         struct ipr_hostrcb_type_17_error *error;
1965
1966         if (ioa_cfg->sis64)
1967                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1968         else
1969                 error = &hostrcb->hcam.u.error.u.type_17_error;
1970
1971         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1972         strim(error->failure_reason);
1973
1974         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1975                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1976         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1977         ipr_log_hex_data(ioa_cfg, error->data,
1978                          be32_to_cpu(hostrcb->hcam.length) -
1979                          (offsetof(struct ipr_hostrcb_error, u) +
1980                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1981 }
1982
1983 /**
1984  * ipr_log_dual_ioa_error - Log a dual adapter error.
1985  * @ioa_cfg:    ioa config struct
1986  * @hostrcb:    hostrcb struct
1987  *
1988  * Return value:
1989  *      none
1990  **/
1991 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1992                                    struct ipr_hostrcb *hostrcb)
1993 {
1994         struct ipr_hostrcb_type_07_error *error;
1995
1996         error = &hostrcb->hcam.u.error.u.type_07_error;
1997         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1998         strim(error->failure_reason);
1999
2000         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2001                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2002         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2003         ipr_log_hex_data(ioa_cfg, error->data,
2004                          be32_to_cpu(hostrcb->hcam.length) -
2005                          (offsetof(struct ipr_hostrcb_error, u) +
2006                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2007 }
2008
2009 static const struct {
2010         u8 active;
2011         char *desc;
2012 } path_active_desc[] = {
2013         { IPR_PATH_NO_INFO, "Path" },
2014         { IPR_PATH_ACTIVE, "Active path" },
2015         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2016 };
2017
2018 static const struct {
2019         u8 state;
2020         char *desc;
2021 } path_state_desc[] = {
2022         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2023         { IPR_PATH_HEALTHY, "is healthy" },
2024         { IPR_PATH_DEGRADED, "is degraded" },
2025         { IPR_PATH_FAILED, "is failed" }
2026 };
2027
2028 /**
2029  * ipr_log_fabric_path - Log a fabric path error
2030  * @hostrcb:    hostrcb struct
2031  * @fabric:             fabric descriptor
2032  *
2033  * Return value:
2034  *      none
2035  **/
2036 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2037                                 struct ipr_hostrcb_fabric_desc *fabric)
2038 {
2039         int i, j;
2040         u8 path_state = fabric->path_state;
2041         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2042         u8 state = path_state & IPR_PATH_STATE_MASK;
2043
2044         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2045                 if (path_active_desc[i].active != active)
2046                         continue;
2047
2048                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2049                         if (path_state_desc[j].state != state)
2050                                 continue;
2051
2052                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2053                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2054                                              path_active_desc[i].desc, path_state_desc[j].desc,
2055                                              fabric->ioa_port);
2056                         } else if (fabric->cascaded_expander == 0xff) {
2057                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2058                                              path_active_desc[i].desc, path_state_desc[j].desc,
2059                                              fabric->ioa_port, fabric->phy);
2060                         } else if (fabric->phy == 0xff) {
2061                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2062                                              path_active_desc[i].desc, path_state_desc[j].desc,
2063                                              fabric->ioa_port, fabric->cascaded_expander);
2064                         } else {
2065                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2066                                              path_active_desc[i].desc, path_state_desc[j].desc,
2067                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2068                         }
2069                         return;
2070                 }
2071         }
2072
2073         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2074                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2075 }
2076
2077 /**
2078  * ipr_log64_fabric_path - Log a fabric path error
2079  * @hostrcb:    hostrcb struct
2080  * @fabric:             fabric descriptor
2081  *
2082  * Return value:
2083  *      none
2084  **/
2085 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2086                                   struct ipr_hostrcb64_fabric_desc *fabric)
2087 {
2088         int i, j;
2089         u8 path_state = fabric->path_state;
2090         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2091         u8 state = path_state & IPR_PATH_STATE_MASK;
2092         char buffer[IPR_MAX_RES_PATH_LENGTH];
2093
2094         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2095                 if (path_active_desc[i].active != active)
2096                         continue;
2097
2098                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2099                         if (path_state_desc[j].state != state)
2100                                 continue;
2101
2102                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2103                                      path_active_desc[i].desc, path_state_desc[j].desc,
2104                                      ipr_format_res_path(hostrcb->ioa_cfg,
2105                                                 fabric->res_path,
2106                                                 buffer, sizeof(buffer)));
2107                         return;
2108                 }
2109         }
2110
2111         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2112                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2113                                     buffer, sizeof(buffer)));
2114 }
2115
2116 static const struct {
2117         u8 type;
2118         char *desc;
2119 } path_type_desc[] = {
2120         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2121         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2122         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2123         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2124 };
2125
2126 static const struct {
2127         u8 status;
2128         char *desc;
2129 } path_status_desc[] = {
2130         { IPR_PATH_CFG_NO_PROB, "Functional" },
2131         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2132         { IPR_PATH_CFG_FAILED, "Failed" },
2133         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2134         { IPR_PATH_NOT_DETECTED, "Missing" },
2135         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2136 };
2137
2138 static const char *link_rate[] = {
2139         "unknown",
2140         "disabled",
2141         "phy reset problem",
2142         "spinup hold",
2143         "port selector",
2144         "unknown",
2145         "unknown",
2146         "unknown",
2147         "1.5Gbps",
2148         "3.0Gbps",
2149         "unknown",
2150         "unknown",
2151         "unknown",
2152         "unknown",
2153         "unknown",
2154         "unknown"
2155 };
2156
2157 /**
2158  * ipr_log_path_elem - Log a fabric path element.
2159  * @hostrcb:    hostrcb struct
2160  * @cfg:                fabric path element struct
2161  *
2162  * Return value:
2163  *      none
2164  **/
2165 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2166                               struct ipr_hostrcb_config_element *cfg)
2167 {
2168         int i, j;
2169         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2170         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2171
2172         if (type == IPR_PATH_CFG_NOT_EXIST)
2173                 return;
2174
2175         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2176                 if (path_type_desc[i].type != type)
2177                         continue;
2178
2179                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2180                         if (path_status_desc[j].status != status)
2181                                 continue;
2182
2183                         if (type == IPR_PATH_CFG_IOA_PORT) {
2184                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2185                                              path_status_desc[j].desc, path_type_desc[i].desc,
2186                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2187                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2188                         } else {
2189                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2190                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2191                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2192                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2193                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2194                                 } else if (cfg->cascaded_expander == 0xff) {
2195                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2196                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2197                                                      path_type_desc[i].desc, cfg->phy,
2198                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2199                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2200                                 } else if (cfg->phy == 0xff) {
2201                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2202                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2203                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2204                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2205                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2206                                 } else {
2207                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2208                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2209                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2210                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2212                                 }
2213                         }
2214                         return;
2215                 }
2216         }
2217
2218         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2219                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2220                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2221                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2222 }
2223
2224 /**
2225  * ipr_log64_path_elem - Log a fabric path element.
2226  * @hostrcb:    hostrcb struct
2227  * @cfg:                fabric path element struct
2228  *
2229  * Return value:
2230  *      none
2231  **/
2232 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2233                                 struct ipr_hostrcb64_config_element *cfg)
2234 {
2235         int i, j;
2236         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2237         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2238         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2239         char buffer[IPR_MAX_RES_PATH_LENGTH];
2240
2241         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2242                 return;
2243
2244         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2245                 if (path_type_desc[i].type != type)
2246                         continue;
2247
2248                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2249                         if (path_status_desc[j].status != status)
2250                                 continue;
2251
2252                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2253                                      path_status_desc[j].desc, path_type_desc[i].desc,
2254                                      ipr_format_res_path(hostrcb->ioa_cfg,
2255                                         cfg->res_path, buffer, sizeof(buffer)),
2256                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2257                                         be32_to_cpu(cfg->wwid[0]),
2258                                         be32_to_cpu(cfg->wwid[1]));
2259                         return;
2260                 }
2261         }
2262         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2263                      "WWN=%08X%08X\n", cfg->type_status,
2264                      ipr_format_res_path(hostrcb->ioa_cfg,
2265                         cfg->res_path, buffer, sizeof(buffer)),
2266                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2267                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2268 }
2269
2270 /**
2271  * ipr_log_fabric_error - Log a fabric error.
2272  * @ioa_cfg:    ioa config struct
2273  * @hostrcb:    hostrcb struct
2274  *
2275  * Return value:
2276  *      none
2277  **/
2278 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2279                                  struct ipr_hostrcb *hostrcb)
2280 {
2281         struct ipr_hostrcb_type_20_error *error;
2282         struct ipr_hostrcb_fabric_desc *fabric;
2283         struct ipr_hostrcb_config_element *cfg;
2284         int i, add_len;
2285
2286         error = &hostrcb->hcam.u.error.u.type_20_error;
2287         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2288         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2289
2290         add_len = be32_to_cpu(hostrcb->hcam.length) -
2291                 (offsetof(struct ipr_hostrcb_error, u) +
2292                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2293
2294         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2295                 ipr_log_fabric_path(hostrcb, fabric);
2296                 for_each_fabric_cfg(fabric, cfg)
2297                         ipr_log_path_elem(hostrcb, cfg);
2298
2299                 add_len -= be16_to_cpu(fabric->length);
2300                 fabric = (struct ipr_hostrcb_fabric_desc *)
2301                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2302         }
2303
2304         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2305 }
2306
2307 /**
2308  * ipr_log_sis64_array_error - Log a sis64 array error.
2309  * @ioa_cfg:    ioa config struct
2310  * @hostrcb:    hostrcb struct
2311  *
2312  * Return value:
2313  *      none
2314  **/
2315 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2316                                       struct ipr_hostrcb *hostrcb)
2317 {
2318         int i, num_entries;
2319         struct ipr_hostrcb_type_24_error *error;
2320         struct ipr_hostrcb64_array_data_entry *array_entry;
2321         char buffer[IPR_MAX_RES_PATH_LENGTH];
2322         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2323
2324         error = &hostrcb->hcam.u.error64.u.type_24_error;
2325
2326         ipr_err_separator;
2327
2328         ipr_err("RAID %s Array Configuration: %s\n",
2329                 error->protection_level,
2330                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2331                         buffer, sizeof(buffer)));
2332
2333         ipr_err_separator;
2334
2335         array_entry = error->array_member;
2336         num_entries = min_t(u32, error->num_entries,
2337                             ARRAY_SIZE(error->array_member));
2338
2339         for (i = 0; i < num_entries; i++, array_entry++) {
2340
2341                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2342                         continue;
2343
2344                 if (error->exposed_mode_adn == i)
2345                         ipr_err("Exposed Array Member %d:\n", i);
2346                 else
2347                         ipr_err("Array Member %d:\n", i);
2348
2349                 ipr_err("Array Member %d:\n", i);
2350                 ipr_log_ext_vpd(&array_entry->vpd);
2351                 ipr_err("Current Location: %s\n",
2352                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2353                                 buffer, sizeof(buffer)));
2354                 ipr_err("Expected Location: %s\n",
2355                          ipr_format_res_path(ioa_cfg,
2356                                 array_entry->expected_res_path,
2357                                 buffer, sizeof(buffer)));
2358
2359                 ipr_err_separator;
2360         }
2361 }
2362
2363 /**
2364  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2365  * @ioa_cfg:    ioa config struct
2366  * @hostrcb:    hostrcb struct
2367  *
2368  * Return value:
2369  *      none
2370  **/
2371 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2372                                        struct ipr_hostrcb *hostrcb)
2373 {
2374         struct ipr_hostrcb_type_30_error *error;
2375         struct ipr_hostrcb64_fabric_desc *fabric;
2376         struct ipr_hostrcb64_config_element *cfg;
2377         int i, add_len;
2378
2379         error = &hostrcb->hcam.u.error64.u.type_30_error;
2380
2381         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2382         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2383
2384         add_len = be32_to_cpu(hostrcb->hcam.length) -
2385                 (offsetof(struct ipr_hostrcb64_error, u) +
2386                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2387
2388         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2389                 ipr_log64_fabric_path(hostrcb, fabric);
2390                 for_each_fabric_cfg(fabric, cfg)
2391                         ipr_log64_path_elem(hostrcb, cfg);
2392
2393                 add_len -= be16_to_cpu(fabric->length);
2394                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2395                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2396         }
2397
2398         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2399 }
2400
2401 /**
2402  * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2403  * @ioa_cfg:    ioa config struct
2404  * @hostrcb:    hostrcb struct
2405  *
2406  * Return value:
2407  *      none
2408  **/
2409 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2410                                        struct ipr_hostrcb *hostrcb)
2411 {
2412         struct ipr_hostrcb_type_41_error *error;
2413
2414         error = &hostrcb->hcam.u.error64.u.type_41_error;
2415
2416         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2417         ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2418         ipr_log_hex_data(ioa_cfg, error->data,
2419                          be32_to_cpu(hostrcb->hcam.length) -
2420                          (offsetof(struct ipr_hostrcb_error, u) +
2421                           offsetof(struct ipr_hostrcb_type_41_error, data)));
2422 }
2423 /**
2424  * ipr_log_generic_error - Log an adapter error.
2425  * @ioa_cfg:    ioa config struct
2426  * @hostrcb:    hostrcb struct
2427  *
2428  * Return value:
2429  *      none
2430  **/
2431 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2432                                   struct ipr_hostrcb *hostrcb)
2433 {
2434         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2435                          be32_to_cpu(hostrcb->hcam.length));
2436 }
2437
2438 /**
2439  * ipr_log_sis64_device_error - Log a cache error.
2440  * @ioa_cfg:    ioa config struct
2441  * @hostrcb:    hostrcb struct
2442  *
2443  * Return value:
2444  *      none
2445  **/
2446 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2447                                          struct ipr_hostrcb *hostrcb)
2448 {
2449         struct ipr_hostrcb_type_21_error *error;
2450         char buffer[IPR_MAX_RES_PATH_LENGTH];
2451
2452         error = &hostrcb->hcam.u.error64.u.type_21_error;
2453
2454         ipr_err("-----Failing Device Information-----\n");
2455         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2456                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2457                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2458         ipr_err("Device Resource Path: %s\n",
2459                 __ipr_format_res_path(error->res_path,
2460                                       buffer, sizeof(buffer)));
2461         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2462         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2463         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2464         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2465         ipr_err("SCSI Sense Data:\n");
2466         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2467         ipr_err("SCSI Command Descriptor Block: \n");
2468         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2469
2470         ipr_err("Additional IOA Data:\n");
2471         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2472 }
2473
2474 /**
2475  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2476  * @ioasc:      IOASC
2477  *
2478  * This function will return the index of into the ipr_error_table
2479  * for the specified IOASC. If the IOASC is not in the table,
2480  * 0 will be returned, which points to the entry used for unknown errors.
2481  *
2482  * Return value:
2483  *      index into the ipr_error_table
2484  **/
2485 static u32 ipr_get_error(u32 ioasc)
2486 {
2487         int i;
2488
2489         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2490                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2491                         return i;
2492
2493         return 0;
2494 }
2495
2496 /**
2497  * ipr_handle_log_data - Log an adapter error.
2498  * @ioa_cfg:    ioa config struct
2499  * @hostrcb:    hostrcb struct
2500  *
2501  * This function logs an adapter error to the system.
2502  *
2503  * Return value:
2504  *      none
2505  **/
2506 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2507                                 struct ipr_hostrcb *hostrcb)
2508 {
2509         u32 ioasc;
2510         int error_index;
2511         struct ipr_hostrcb_type_21_error *error;
2512
2513         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2514                 return;
2515
2516         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2517                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2518
2519         if (ioa_cfg->sis64)
2520                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2521         else
2522                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2523
2524         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2525             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2526                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2527                 scsi_report_bus_reset(ioa_cfg->host,
2528                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2529         }
2530
2531         error_index = ipr_get_error(ioasc);
2532
2533         if (!ipr_error_table[error_index].log_hcam)
2534                 return;
2535
2536         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2537             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2538                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2539
2540                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2541                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2542                                 return;
2543         }
2544
2545         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2546
2547         /* Set indication we have logged an error */
2548         ioa_cfg->errors_logged++;
2549
2550         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2551                 return;
2552         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2553                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2554
2555         switch (hostrcb->hcam.overlay_id) {
2556         case IPR_HOST_RCB_OVERLAY_ID_2:
2557                 ipr_log_cache_error(ioa_cfg, hostrcb);
2558                 break;
2559         case IPR_HOST_RCB_OVERLAY_ID_3:
2560                 ipr_log_config_error(ioa_cfg, hostrcb);
2561                 break;
2562         case IPR_HOST_RCB_OVERLAY_ID_4:
2563         case IPR_HOST_RCB_OVERLAY_ID_6:
2564                 ipr_log_array_error(ioa_cfg, hostrcb);
2565                 break;
2566         case IPR_HOST_RCB_OVERLAY_ID_7:
2567                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2568                 break;
2569         case IPR_HOST_RCB_OVERLAY_ID_12:
2570                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2571                 break;
2572         case IPR_HOST_RCB_OVERLAY_ID_13:
2573                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2574                 break;
2575         case IPR_HOST_RCB_OVERLAY_ID_14:
2576         case IPR_HOST_RCB_OVERLAY_ID_16:
2577                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2578                 break;
2579         case IPR_HOST_RCB_OVERLAY_ID_17:
2580                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2581                 break;
2582         case IPR_HOST_RCB_OVERLAY_ID_20:
2583                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2584                 break;
2585         case IPR_HOST_RCB_OVERLAY_ID_21:
2586                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2587                 break;
2588         case IPR_HOST_RCB_OVERLAY_ID_23:
2589                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2590                 break;
2591         case IPR_HOST_RCB_OVERLAY_ID_24:
2592         case IPR_HOST_RCB_OVERLAY_ID_26:
2593                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2594                 break;
2595         case IPR_HOST_RCB_OVERLAY_ID_30:
2596                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2597                 break;
2598         case IPR_HOST_RCB_OVERLAY_ID_41:
2599                 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2600                 break;
2601         case IPR_HOST_RCB_OVERLAY_ID_1:
2602         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2603         default:
2604                 ipr_log_generic_error(ioa_cfg, hostrcb);
2605                 break;
2606         }
2607 }
2608
2609 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2610 {
2611         struct ipr_hostrcb *hostrcb;
2612
2613         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2614                                         struct ipr_hostrcb, queue);
2615
2616         if (unlikely(!hostrcb)) {
2617                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2618                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2619                                                 struct ipr_hostrcb, queue);
2620         }
2621
2622         list_del_init(&hostrcb->queue);
2623         return hostrcb;
2624 }
2625
2626 /**
2627  * ipr_process_error - Op done function for an adapter error log.
2628  * @ipr_cmd:    ipr command struct
2629  *
2630  * This function is the op done function for an error log host
2631  * controlled async from the adapter. It will log the error and
2632  * send the HCAM back to the adapter.
2633  *
2634  * Return value:
2635  *      none
2636  **/
2637 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2638 {
2639         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2640         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2641         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2642         u32 fd_ioasc;
2643
2644         if (ioa_cfg->sis64)
2645                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2646         else
2647                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2648
2649         list_del_init(&hostrcb->queue);
2650         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2651
2652         if (!ioasc) {
2653                 ipr_handle_log_data(ioa_cfg, hostrcb);
2654                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2655                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2656         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2657                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2658                 dev_err(&ioa_cfg->pdev->dev,
2659                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2660         }
2661
2662         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2663         schedule_work(&ioa_cfg->work_q);
2664         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2665
2666         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2667 }
2668
2669 /**
2670  * ipr_timeout -  An internally generated op has timed out.
2671  * @ipr_cmd:    ipr command struct
2672  *
2673  * This function blocks host requests and initiates an
2674  * adapter reset.
2675  *
2676  * Return value:
2677  *      none
2678  **/
2679 static void ipr_timeout(struct timer_list *t)
2680 {
2681         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2682         unsigned long lock_flags = 0;
2683         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2684
2685         ENTER;
2686         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2687
2688         ioa_cfg->errors_logged++;
2689         dev_err(&ioa_cfg->pdev->dev,
2690                 "Adapter being reset due to command timeout.\n");
2691
2692         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2693                 ioa_cfg->sdt_state = GET_DUMP;
2694
2695         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2696                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2697
2698         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2699         LEAVE;
2700 }
2701
2702 /**
2703  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2704  * @ipr_cmd:    ipr command struct
2705  *
2706  * This function blocks host requests and initiates an
2707  * adapter reset.
2708  *
2709  * Return value:
2710  *      none
2711  **/
2712 static void ipr_oper_timeout(struct timer_list *t)
2713 {
2714         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2715         unsigned long lock_flags = 0;
2716         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2717
2718         ENTER;
2719         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2720
2721         ioa_cfg->errors_logged++;
2722         dev_err(&ioa_cfg->pdev->dev,
2723                 "Adapter timed out transitioning to operational.\n");
2724
2725         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2726                 ioa_cfg->sdt_state = GET_DUMP;
2727
2728         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2729                 if (ipr_fastfail)
2730                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2731                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2732         }
2733
2734         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2735         LEAVE;
2736 }
2737
2738 /**
2739  * ipr_find_ses_entry - Find matching SES in SES table
2740  * @res:        resource entry struct of SES
2741  *
2742  * Return value:
2743  *      pointer to SES table entry / NULL on failure
2744  **/
2745 static const struct ipr_ses_table_entry *
2746 ipr_find_ses_entry(struct ipr_resource_entry *res)
2747 {
2748         int i, j, matches;
2749         struct ipr_std_inq_vpids *vpids;
2750         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2751
2752         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2753                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2754                         if (ste->compare_product_id_byte[j] == 'X') {
2755                                 vpids = &res->std_inq_data.vpids;
2756                                 if (vpids->product_id[j] == ste->product_id[j])
2757                                         matches++;
2758                                 else
2759                                         break;
2760                         } else
2761                                 matches++;
2762                 }
2763
2764                 if (matches == IPR_PROD_ID_LEN)
2765                         return ste;
2766         }
2767
2768         return NULL;
2769 }
2770
2771 /**
2772  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2773  * @ioa_cfg:    ioa config struct
2774  * @bus:                SCSI bus
2775  * @bus_width:  bus width
2776  *
2777  * Return value:
2778  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2779  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2780  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2781  *      max 160MHz = max 320MB/sec).
2782  **/
2783 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2784 {
2785         struct ipr_resource_entry *res;
2786         const struct ipr_ses_table_entry *ste;
2787         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2788
2789         /* Loop through each config table entry in the config table buffer */
2790         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2791                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2792                         continue;
2793
2794                 if (bus != res->bus)
2795                         continue;
2796
2797                 if (!(ste = ipr_find_ses_entry(res)))
2798                         continue;
2799
2800                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2801         }
2802
2803         return max_xfer_rate;
2804 }
2805
2806 /**
2807  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2808  * @ioa_cfg:            ioa config struct
2809  * @max_delay:          max delay in micro-seconds to wait
2810  *
2811  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2812  *
2813  * Return value:
2814  *      0 on success / other on failure
2815  **/
2816 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2817 {
2818         volatile u32 pcii_reg;
2819         int delay = 1;
2820
2821         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2822         while (delay < max_delay) {
2823                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2824
2825                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2826                         return 0;
2827
2828                 /* udelay cannot be used if delay is more than a few milliseconds */
2829                 if ((delay / 1000) > MAX_UDELAY_MS)
2830                         mdelay(delay / 1000);
2831                 else
2832                         udelay(delay);
2833
2834                 delay += delay;
2835         }
2836         return -EIO;
2837 }
2838
2839 /**
2840  * ipr_get_sis64_dump_data_section - Dump IOA memory
2841  * @ioa_cfg:                    ioa config struct
2842  * @start_addr:                 adapter address to dump
2843  * @dest:                       destination kernel buffer
2844  * @length_in_words:            length to dump in 4 byte words
2845  *
2846  * Return value:
2847  *      0 on success
2848  **/
2849 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2850                                            u32 start_addr,
2851                                            __be32 *dest, u32 length_in_words)
2852 {
2853         int i;
2854
2855         for (i = 0; i < length_in_words; i++) {
2856                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2857                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2858                 dest++;
2859         }
2860
2861         return 0;
2862 }
2863
2864 /**
2865  * ipr_get_ldump_data_section - Dump IOA memory
2866  * @ioa_cfg:                    ioa config struct
2867  * @start_addr:                 adapter address to dump
2868  * @dest:                               destination kernel buffer
2869  * @length_in_words:    length to dump in 4 byte words
2870  *
2871  * Return value:
2872  *      0 on success / -EIO on failure
2873  **/
2874 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2875                                       u32 start_addr,
2876                                       __be32 *dest, u32 length_in_words)
2877 {
2878         volatile u32 temp_pcii_reg;
2879         int i, delay = 0;
2880
2881         if (ioa_cfg->sis64)
2882                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2883                                                        dest, length_in_words);
2884
2885         /* Write IOA interrupt reg starting LDUMP state  */
2886         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2887                ioa_cfg->regs.set_uproc_interrupt_reg32);
2888
2889         /* Wait for IO debug acknowledge */
2890         if (ipr_wait_iodbg_ack(ioa_cfg,
2891                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2892                 dev_err(&ioa_cfg->pdev->dev,
2893                         "IOA dump long data transfer timeout\n");
2894                 return -EIO;
2895         }
2896
2897         /* Signal LDUMP interlocked - clear IO debug ack */
2898         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2899                ioa_cfg->regs.clr_interrupt_reg);
2900
2901         /* Write Mailbox with starting address */
2902         writel(start_addr, ioa_cfg->ioa_mailbox);
2903
2904         /* Signal address valid - clear IOA Reset alert */
2905         writel(IPR_UPROCI_RESET_ALERT,
2906                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2907
2908         for (i = 0; i < length_in_words; i++) {
2909                 /* Wait for IO debug acknowledge */
2910                 if (ipr_wait_iodbg_ack(ioa_cfg,
2911                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2912                         dev_err(&ioa_cfg->pdev->dev,
2913                                 "IOA dump short data transfer timeout\n");
2914                         return -EIO;
2915                 }
2916
2917                 /* Read data from mailbox and increment destination pointer */
2918                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2919                 dest++;
2920
2921                 /* For all but the last word of data, signal data received */
2922                 if (i < (length_in_words - 1)) {
2923                         /* Signal dump data received - Clear IO debug Ack */
2924                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2925                                ioa_cfg->regs.clr_interrupt_reg);
2926                 }
2927         }
2928
2929         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2930         writel(IPR_UPROCI_RESET_ALERT,
2931                ioa_cfg->regs.set_uproc_interrupt_reg32);
2932
2933         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2934                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2935
2936         /* Signal dump data received - Clear IO debug Ack */
2937         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2938                ioa_cfg->regs.clr_interrupt_reg);
2939
2940         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2941         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2942                 temp_pcii_reg =
2943                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2944
2945                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2946                         return 0;
2947
2948                 udelay(10);
2949                 delay += 10;
2950         }
2951
2952         return 0;
2953 }
2954
2955 #ifdef CONFIG_SCSI_IPR_DUMP
2956 /**
2957  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2958  * @ioa_cfg:            ioa config struct
2959  * @pci_address:        adapter address
2960  * @length:                     length of data to copy
2961  *
2962  * Copy data from PCI adapter to kernel buffer.
2963  * Note: length MUST be a 4 byte multiple
2964  * Return value:
2965  *      0 on success / other on failure
2966  **/
2967 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2968                         unsigned long pci_address, u32 length)
2969 {
2970         int bytes_copied = 0;
2971         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2972         __be32 *page;
2973         unsigned long lock_flags = 0;
2974         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2975
2976         if (ioa_cfg->sis64)
2977                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2978         else
2979                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2980
2981         while (bytes_copied < length &&
2982                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2983                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2984                     ioa_dump->page_offset == 0) {
2985                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2986
2987                         if (!page) {
2988                                 ipr_trace;
2989                                 return bytes_copied;
2990                         }
2991
2992                         ioa_dump->page_offset = 0;
2993                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2994                         ioa_dump->next_page_index++;
2995                 } else
2996                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2997
2998                 rem_len = length - bytes_copied;
2999                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
3000                 cur_len = min(rem_len, rem_page_len);
3001
3002                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3003                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3004                         rc = -EIO;
3005                 } else {
3006                         rc = ipr_get_ldump_data_section(ioa_cfg,
3007                                                         pci_address + bytes_copied,
3008                                                         &page[ioa_dump->page_offset / 4],
3009                                                         (cur_len / sizeof(u32)));
3010                 }
3011                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3012
3013                 if (!rc) {
3014                         ioa_dump->page_offset += cur_len;
3015                         bytes_copied += cur_len;
3016                 } else {
3017                         ipr_trace;
3018                         break;
3019                 }
3020                 schedule();
3021         }
3022
3023         return bytes_copied;
3024 }
3025
3026 /**
3027  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3028  * @hdr:        dump entry header struct
3029  *
3030  * Return value:
3031  *      nothing
3032  **/
3033 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3034 {
3035         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3036         hdr->num_elems = 1;
3037         hdr->offset = sizeof(*hdr);
3038         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3039 }
3040
3041 /**
3042  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3043  * @ioa_cfg:    ioa config struct
3044  * @driver_dump:        driver dump struct
3045  *
3046  * Return value:
3047  *      nothing
3048  **/
3049 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3050                                    struct ipr_driver_dump *driver_dump)
3051 {
3052         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3053
3054         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3055         driver_dump->ioa_type_entry.hdr.len =
3056                 sizeof(struct ipr_dump_ioa_type_entry) -
3057                 sizeof(struct ipr_dump_entry_header);
3058         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3059         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3060         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3061         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3062                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3063                 ucode_vpd->minor_release[1];
3064         driver_dump->hdr.num_entries++;
3065 }
3066
3067 /**
3068  * ipr_dump_version_data - Fill in the driver version in the dump.
3069  * @ioa_cfg:    ioa config struct
3070  * @driver_dump:        driver dump struct
3071  *
3072  * Return value:
3073  *      nothing
3074  **/
3075 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3076                                   struct ipr_driver_dump *driver_dump)
3077 {
3078         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3079         driver_dump->version_entry.hdr.len =
3080                 sizeof(struct ipr_dump_version_entry) -
3081                 sizeof(struct ipr_dump_entry_header);
3082         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3083         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3084         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3085         driver_dump->hdr.num_entries++;
3086 }
3087
3088 /**
3089  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3090  * @ioa_cfg:    ioa config struct
3091  * @driver_dump:        driver dump struct
3092  *
3093  * Return value:
3094  *      nothing
3095  **/
3096 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3097                                    struct ipr_driver_dump *driver_dump)
3098 {
3099         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3100         driver_dump->trace_entry.hdr.len =
3101                 sizeof(struct ipr_dump_trace_entry) -
3102                 sizeof(struct ipr_dump_entry_header);
3103         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3104         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3105         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3106         driver_dump->hdr.num_entries++;
3107 }
3108
3109 /**
3110  * ipr_dump_location_data - Fill in the IOA location in the dump.
3111  * @ioa_cfg:    ioa config struct
3112  * @driver_dump:        driver dump struct
3113  *
3114  * Return value:
3115  *      nothing
3116  **/
3117 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3118                                    struct ipr_driver_dump *driver_dump)
3119 {
3120         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3121         driver_dump->location_entry.hdr.len =
3122                 sizeof(struct ipr_dump_location_entry) -
3123                 sizeof(struct ipr_dump_entry_header);
3124         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3125         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3126         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3127         driver_dump->hdr.num_entries++;
3128 }
3129
3130 /**
3131  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3132  * @ioa_cfg:    ioa config struct
3133  * @dump:               dump struct
3134  *
3135  * Return value:
3136  *      nothing
3137  **/
3138 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3139 {
3140         unsigned long start_addr, sdt_word;
3141         unsigned long lock_flags = 0;
3142         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3143         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3144         u32 num_entries, max_num_entries, start_off, end_off;
3145         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3146         struct ipr_sdt *sdt;
3147         int valid = 1;
3148         int i;
3149
3150         ENTER;
3151
3152         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3153
3154         if (ioa_cfg->sdt_state != READ_DUMP) {
3155                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3156                 return;
3157         }
3158
3159         if (ioa_cfg->sis64) {
3160                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3161                 ssleep(IPR_DUMP_DELAY_SECONDS);
3162                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3163         }
3164
3165         start_addr = readl(ioa_cfg->ioa_mailbox);
3166
3167         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3168                 dev_err(&ioa_cfg->pdev->dev,
3169                         "Invalid dump table format: %lx\n", start_addr);
3170                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3171                 return;
3172         }
3173
3174         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3175
3176         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3177
3178         /* Initialize the overall dump header */
3179         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3180         driver_dump->hdr.num_entries = 1;
3181         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3182         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3183         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3184         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3185
3186         ipr_dump_version_data(ioa_cfg, driver_dump);
3187         ipr_dump_location_data(ioa_cfg, driver_dump);
3188         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3189         ipr_dump_trace_data(ioa_cfg, driver_dump);
3190
3191         /* Update dump_header */
3192         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3193
3194         /* IOA Dump entry */
3195         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3196         ioa_dump->hdr.len = 0;
3197         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3198         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3199
3200         /* First entries in sdt are actually a list of dump addresses and
3201          lengths to gather the real dump data.  sdt represents the pointer
3202          to the ioa generated dump table.  Dump data will be extracted based
3203          on entries in this table */
3204         sdt = &ioa_dump->sdt;
3205
3206         if (ioa_cfg->sis64) {
3207                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3208                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3209         } else {
3210                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3211                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3212         }
3213
3214         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3215                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3216         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3217                                         bytes_to_copy / sizeof(__be32));
3218
3219         /* Smart Dump table is ready to use and the first entry is valid */
3220         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3221             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3222                 dev_err(&ioa_cfg->pdev->dev,
3223                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3224                         rc, be32_to_cpu(sdt->hdr.state));
3225                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3226                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3227                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3228                 return;
3229         }
3230
3231         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3232
3233         if (num_entries > max_num_entries)
3234                 num_entries = max_num_entries;
3235
3236         /* Update dump length to the actual data to be copied */
3237         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3238         if (ioa_cfg->sis64)
3239                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3240         else
3241                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3242
3243         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3244
3245         for (i = 0; i < num_entries; i++) {
3246                 if (ioa_dump->hdr.len > max_dump_size) {
3247                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3248                         break;
3249                 }
3250
3251                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3252                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3253                         if (ioa_cfg->sis64)
3254                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3255                         else {
3256                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3257                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3258
3259                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3260                                         bytes_to_copy = end_off - start_off;
3261                                 else
3262                                         valid = 0;
3263                         }
3264                         if (valid) {
3265                                 if (bytes_to_copy > max_dump_size) {
3266                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3267                                         continue;
3268                                 }
3269
3270                                 /* Copy data from adapter to driver buffers */
3271                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3272                                                             bytes_to_copy);
3273
3274                                 ioa_dump->hdr.len += bytes_copied;
3275
3276                                 if (bytes_copied != bytes_to_copy) {
3277                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3278                                         break;
3279                                 }
3280                         }
3281                 }
3282         }
3283
3284         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3285
3286         /* Update dump_header */
3287         driver_dump->hdr.len += ioa_dump->hdr.len;
3288         wmb();
3289         ioa_cfg->sdt_state = DUMP_OBTAINED;
3290         LEAVE;
3291 }
3292
3293 #else
3294 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3295 #endif
3296
3297 /**
3298  * ipr_release_dump - Free adapter dump memory
3299  * @kref:       kref struct
3300  *
3301  * Return value:
3302  *      nothing
3303  **/
3304 static void ipr_release_dump(struct kref *kref)
3305 {
3306         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3307         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3308         unsigned long lock_flags = 0;
3309         int i;
3310
3311         ENTER;
3312         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3313         ioa_cfg->dump = NULL;
3314         ioa_cfg->sdt_state = INACTIVE;
3315         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3316
3317         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3318                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3319
3320         vfree(dump->ioa_dump.ioa_data);
3321         kfree(dump);
3322         LEAVE;
3323 }
3324
3325 static void ipr_add_remove_thread(struct work_struct *work)
3326 {
3327         unsigned long lock_flags;
3328         struct ipr_resource_entry *res;
3329         struct scsi_device *sdev;
3330         struct ipr_ioa_cfg *ioa_cfg =
3331                 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3332         u8 bus, target, lun;
3333         int did_work;
3334
3335         ENTER;
3336         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3337
3338 restart:
3339         do {
3340                 did_work = 0;
3341                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3342                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3343                         return;
3344                 }
3345
3346                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3347                         if (res->del_from_ml && res->sdev) {
3348                                 did_work = 1;
3349                                 sdev = res->sdev;
3350                                 if (!scsi_device_get(sdev)) {
3351                                         if (!res->add_to_ml)
3352                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3353                                         else
3354                                                 res->del_from_ml = 0;
3355                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356                                         scsi_remove_device(sdev);
3357                                         scsi_device_put(sdev);
3358                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3359                                 }
3360                                 break;
3361                         }
3362                 }
3363         } while (did_work);
3364
3365         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3366                 if (res->add_to_ml) {
3367                         bus = res->bus;
3368                         target = res->target;
3369                         lun = res->lun;
3370                         res->add_to_ml = 0;
3371                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3372                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3373                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3374                         goto restart;
3375                 }
3376         }
3377
3378         ioa_cfg->scan_done = 1;
3379         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3380         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3381         LEAVE;
3382 }
3383
3384 /**
3385  * ipr_worker_thread - Worker thread
3386  * @work:               ioa config struct
3387  *
3388  * Called at task level from a work thread. This function takes care
3389  * of adding and removing device from the mid-layer as configuration
3390  * changes are detected by the adapter.
3391  *
3392  * Return value:
3393  *      nothing
3394  **/
3395 static void ipr_worker_thread(struct work_struct *work)
3396 {
3397         unsigned long lock_flags;
3398         struct ipr_dump *dump;
3399         struct ipr_ioa_cfg *ioa_cfg =
3400                 container_of(work, struct ipr_ioa_cfg, work_q);
3401
3402         ENTER;
3403         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404
3405         if (ioa_cfg->sdt_state == READ_DUMP) {
3406                 dump = ioa_cfg->dump;
3407                 if (!dump) {
3408                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3409                         return;
3410                 }
3411                 kref_get(&dump->kref);
3412                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3413                 ipr_get_ioa_dump(ioa_cfg, dump);
3414                 kref_put(&dump->kref, ipr_release_dump);
3415
3416                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3417                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3418                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3419                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3420                 return;
3421         }
3422
3423         if (ioa_cfg->scsi_unblock) {
3424                 ioa_cfg->scsi_unblock = 0;
3425                 ioa_cfg->scsi_blocked = 0;
3426                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3427                 scsi_unblock_requests(ioa_cfg->host);
3428                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3429                 if (ioa_cfg->scsi_blocked)
3430                         scsi_block_requests(ioa_cfg->host);
3431         }
3432
3433         if (!ioa_cfg->scan_enabled) {
3434                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3435                 return;
3436         }
3437
3438         schedule_work(&ioa_cfg->scsi_add_work_q);
3439
3440         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3441         LEAVE;
3442 }
3443
3444 #ifdef CONFIG_SCSI_IPR_TRACE
3445 /**
3446  * ipr_read_trace - Dump the adapter trace
3447  * @filp:               open sysfs file
3448  * @kobj:               kobject struct
3449  * @bin_attr:           bin_attribute struct
3450  * @buf:                buffer
3451  * @off:                offset
3452  * @count:              buffer size
3453  *
3454  * Return value:
3455  *      number of bytes printed to buffer
3456  **/
3457 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3458                               struct bin_attribute *bin_attr,
3459                               char *buf, loff_t off, size_t count)
3460 {
3461         struct device *dev = container_of(kobj, struct device, kobj);
3462         struct Scsi_Host *shost = class_to_shost(dev);
3463         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3464         unsigned long lock_flags = 0;
3465         ssize_t ret;
3466
3467         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3468         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3469                                 IPR_TRACE_SIZE);
3470         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3471
3472         return ret;
3473 }
3474
3475 static struct bin_attribute ipr_trace_attr = {
3476         .attr = {
3477                 .name = "trace",
3478                 .mode = S_IRUGO,
3479         },
3480         .size = 0,
3481         .read = ipr_read_trace,
3482 };
3483 #endif
3484
3485 /**
3486  * ipr_show_fw_version - Show the firmware version
3487  * @dev:        class device struct
3488  * @buf:        buffer
3489  *
3490  * Return value:
3491  *      number of bytes printed to buffer
3492  **/
3493 static ssize_t ipr_show_fw_version(struct device *dev,
3494                                    struct device_attribute *attr, char *buf)
3495 {
3496         struct Scsi_Host *shost = class_to_shost(dev);
3497         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3498         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3499         unsigned long lock_flags = 0;
3500         int len;
3501
3502         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3503         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3504                        ucode_vpd->major_release, ucode_vpd->card_type,
3505                        ucode_vpd->minor_release[0],
3506                        ucode_vpd->minor_release[1]);
3507         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3508         return len;
3509 }
3510
3511 static struct device_attribute ipr_fw_version_attr = {
3512         .attr = {
3513                 .name =         "fw_version",
3514                 .mode =         S_IRUGO,
3515         },
3516         .show = ipr_show_fw_version,
3517 };
3518
3519 /**
3520  * ipr_show_log_level - Show the adapter's error logging level
3521  * @dev:        class device struct
3522  * @buf:        buffer
3523  *
3524  * Return value:
3525  *      number of bytes printed to buffer
3526  **/
3527 static ssize_t ipr_show_log_level(struct device *dev,
3528                                    struct device_attribute *attr, char *buf)
3529 {
3530         struct Scsi_Host *shost = class_to_shost(dev);
3531         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3532         unsigned long lock_flags = 0;
3533         int len;
3534
3535         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3536         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3537         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3538         return len;
3539 }
3540
3541 /**
3542  * ipr_store_log_level - Change the adapter's error logging level
3543  * @dev:        class device struct
3544  * @buf:        buffer
3545  *
3546  * Return value:
3547  *      number of bytes printed to buffer
3548  **/
3549 static ssize_t ipr_store_log_level(struct device *dev,
3550                                    struct device_attribute *attr,
3551                                    const char *buf, size_t count)
3552 {
3553         struct Scsi_Host *shost = class_to_shost(dev);
3554         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3555         unsigned long lock_flags = 0;
3556
3557         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3558         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3559         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3560         return strlen(buf);
3561 }
3562
3563 static struct device_attribute ipr_log_level_attr = {
3564         .attr = {
3565                 .name =         "log_level",
3566                 .mode =         S_IRUGO | S_IWUSR,
3567         },
3568         .show = ipr_show_log_level,
3569         .store = ipr_store_log_level
3570 };
3571
3572 /**
3573  * ipr_store_diagnostics - IOA Diagnostics interface
3574  * @dev:        device struct
3575  * @buf:        buffer
3576  * @count:      buffer size
3577  *
3578  * This function will reset the adapter and wait a reasonable
3579  * amount of time for any errors that the adapter might log.
3580  *
3581  * Return value:
3582  *      count on success / other on failure
3583  **/
3584 static ssize_t ipr_store_diagnostics(struct device *dev,
3585                                      struct device_attribute *attr,
3586                                      const char *buf, size_t count)
3587 {
3588         struct Scsi_Host *shost = class_to_shost(dev);
3589         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3590         unsigned long lock_flags = 0;
3591         int rc = count;
3592
3593         if (!capable(CAP_SYS_ADMIN))
3594                 return -EACCES;
3595
3596         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3597         while (ioa_cfg->in_reset_reload) {
3598                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3599                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3600                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3601         }
3602
3603         ioa_cfg->errors_logged = 0;
3604         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3605
3606         if (ioa_cfg->in_reset_reload) {
3607                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3608                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3609
3610                 /* Wait for a second for any errors to be logged */
3611                 msleep(1000);
3612         } else {
3613                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3614                 return -EIO;
3615         }
3616
3617         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3618         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3619                 rc = -EIO;
3620         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3621
3622         return rc;
3623 }
3624
3625 static struct device_attribute ipr_diagnostics_attr = {
3626         .attr = {
3627                 .name =         "run_diagnostics",
3628                 .mode =         S_IWUSR,
3629         },
3630         .store = ipr_store_diagnostics
3631 };
3632
3633 /**
3634  * ipr_show_adapter_state - Show the adapter's state
3635  * @class_dev:  device struct
3636  * @buf:        buffer
3637  *
3638  * Return value:
3639  *      number of bytes printed to buffer
3640  **/
3641 static ssize_t ipr_show_adapter_state(struct device *dev,
3642                                       struct device_attribute *attr, char *buf)
3643 {
3644         struct Scsi_Host *shost = class_to_shost(dev);
3645         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3646         unsigned long lock_flags = 0;
3647         int len;
3648
3649         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3650         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3651                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3652         else
3653                 len = snprintf(buf, PAGE_SIZE, "online\n");
3654         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3655         return len;
3656 }
3657
3658 /**
3659  * ipr_store_adapter_state - Change adapter state
3660  * @dev:        device struct
3661  * @buf:        buffer
3662  * @count:      buffer size
3663  *
3664  * This function will change the adapter's state.
3665  *
3666  * Return value:
3667  *      count on success / other on failure
3668  **/
3669 static ssize_t ipr_store_adapter_state(struct device *dev,
3670                                        struct device_attribute *attr,
3671                                        const char *buf, size_t count)
3672 {
3673         struct Scsi_Host *shost = class_to_shost(dev);
3674         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3675         unsigned long lock_flags;
3676         int result = count, i;
3677
3678         if (!capable(CAP_SYS_ADMIN))
3679                 return -EACCES;
3680
3681         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3682         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3683             !strncmp(buf, "online", 6)) {
3684                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3685                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3686                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3687                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3688                 }
3689                 wmb();
3690                 ioa_cfg->reset_retries = 0;
3691                 ioa_cfg->in_ioa_bringdown = 0;
3692                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3693         }
3694         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3695         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3696
3697         return result;
3698 }
3699
3700 static struct device_attribute ipr_ioa_state_attr = {
3701         .attr = {
3702                 .name =         "online_state",
3703                 .mode =         S_IRUGO | S_IWUSR,
3704         },
3705         .show = ipr_show_adapter_state,
3706         .store = ipr_store_adapter_state
3707 };
3708
3709 /**
3710  * ipr_store_reset_adapter - Reset the adapter
3711  * @dev:        device struct
3712  * @buf:        buffer
3713  * @count:      buffer size
3714  *
3715  * This function will reset the adapter.
3716  *
3717  * Return value:
3718  *      count on success / other on failure
3719  **/
3720 static ssize_t ipr_store_reset_adapter(struct device *dev,
3721                                        struct device_attribute *attr,
3722                                        const char *buf, size_t count)
3723 {
3724         struct Scsi_Host *shost = class_to_shost(dev);
3725         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3726         unsigned long lock_flags;
3727         int result = count;
3728
3729         if (!capable(CAP_SYS_ADMIN))
3730                 return -EACCES;
3731
3732         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3733         if (!ioa_cfg->in_reset_reload)
3734                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3735         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3736         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3737
3738         return result;
3739 }
3740
3741 static struct device_attribute ipr_ioa_reset_attr = {
3742         .attr = {
3743                 .name =         "reset_host",
3744                 .mode =         S_IWUSR,
3745         },
3746         .store = ipr_store_reset_adapter
3747 };
3748
3749 static int ipr_iopoll(struct irq_poll *iop, int budget);
3750  /**
3751  * ipr_show_iopoll_weight - Show ipr polling mode
3752  * @dev:        class device struct
3753  * @buf:        buffer
3754  *
3755  * Return value:
3756  *      number of bytes printed to buffer
3757  **/
3758 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3759                                    struct device_attribute *attr, char *buf)
3760 {
3761         struct Scsi_Host *shost = class_to_shost(dev);
3762         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3763         unsigned long lock_flags = 0;
3764         int len;
3765
3766         spin_lock_irqsave(shost->host_lock, lock_flags);
3767         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3768         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3769
3770         return len;
3771 }
3772
3773 /**
3774  * ipr_store_iopoll_weight - Change the adapter's polling mode
3775  * @dev:        class device struct
3776  * @buf:        buffer
3777  *
3778  * Return value:
3779  *      number of bytes printed to buffer
3780  **/
3781 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3782                                         struct device_attribute *attr,
3783                                         const char *buf, size_t count)
3784 {
3785         struct Scsi_Host *shost = class_to_shost(dev);
3786         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3787         unsigned long user_iopoll_weight;
3788         unsigned long lock_flags = 0;
3789         int i;
3790
3791         if (!ioa_cfg->sis64) {
3792                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3793                 return -EINVAL;
3794         }
3795         if (kstrtoul(buf, 10, &user_iopoll_weight))
3796                 return -EINVAL;
3797
3798         if (user_iopoll_weight > 256) {
3799                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3800                 return -EINVAL;
3801         }
3802
3803         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3804                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3805                 return strlen(buf);
3806         }
3807
3808         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3809                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3810                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3811         }
3812
3813         spin_lock_irqsave(shost->host_lock, lock_flags);
3814         ioa_cfg->iopoll_weight = user_iopoll_weight;
3815         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3816                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3817                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3818                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3819                 }
3820         }
3821         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3822
3823         return strlen(buf);
3824 }
3825
3826 static struct device_attribute ipr_iopoll_weight_attr = {
3827         .attr = {
3828                 .name =         "iopoll_weight",
3829                 .mode =         S_IRUGO | S_IWUSR,
3830         },
3831         .show = ipr_show_iopoll_weight,
3832         .store = ipr_store_iopoll_weight
3833 };
3834
3835 /**
3836  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3837  * @buf_len:            buffer length
3838  *
3839  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3840  * list to use for microcode download
3841  *
3842  * Return value:
3843  *      pointer to sglist / NULL on failure
3844  **/
3845 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3846 {
3847         int sg_size, order;
3848         struct ipr_sglist *sglist;
3849
3850         /* Get the minimum size per scatter/gather element */
3851         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3852
3853         /* Get the actual size per element */
3854         order = get_order(sg_size);
3855
3856         /* Allocate a scatter/gather list for the DMA */
3857         sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3858         if (sglist == NULL) {
3859                 ipr_trace;
3860                 return NULL;
3861         }
3862         sglist->order = order;
3863         sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3864                                               &sglist->num_sg);
3865         if (!sglist->scatterlist) {
3866                 kfree(sglist);
3867                 return NULL;
3868         }
3869
3870         return sglist;
3871 }
3872
3873 /**
3874  * ipr_free_ucode_buffer - Frees a microcode download buffer
3875  * @p_dnld:             scatter/gather list pointer
3876  *
3877  * Free a DMA'able ucode download buffer previously allocated with
3878  * ipr_alloc_ucode_buffer
3879  *
3880  * Return value:
3881  *      nothing
3882  **/
3883 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3884 {
3885         sgl_free_order(sglist->scatterlist, sglist->order);
3886         kfree(sglist);
3887 }
3888
3889 /**
3890  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3891  * @sglist:             scatter/gather list pointer
3892  * @buffer:             buffer pointer
3893  * @len:                buffer length
3894  *
3895  * Copy a microcode image from a user buffer into a buffer allocated by
3896  * ipr_alloc_ucode_buffer
3897  *
3898  * Return value:
3899  *      0 on success / other on failure
3900  **/
3901 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3902                                  u8 *buffer, u32 len)
3903 {
3904         int bsize_elem, i, result = 0;
3905         struct scatterlist *sg;
3906         void *kaddr;
3907
3908         /* Determine the actual number of bytes per element */
3909         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3910
3911         sg = sglist->scatterlist;
3912
3913         for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
3914                         buffer += bsize_elem) {
3915                 struct page *page = sg_page(sg);
3916
3917                 kaddr = kmap(page);
3918                 memcpy(kaddr, buffer, bsize_elem);
3919                 kunmap(page);
3920
3921                 sg->length = bsize_elem;
3922
3923                 if (result != 0) {
3924                         ipr_trace;
3925                         return result;
3926                 }
3927         }
3928
3929         if (len % bsize_elem) {
3930                 struct page *page = sg_page(sg);
3931
3932                 kaddr = kmap(page);
3933                 memcpy(kaddr, buffer, len % bsize_elem);
3934                 kunmap(page);
3935
3936                 sg->length = len % bsize_elem;
3937         }
3938
3939         sglist->buffer_len = len;
3940         return result;
3941 }
3942
3943 /**
3944  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3945  * @ipr_cmd:            ipr command struct
3946  * @sglist:             scatter/gather list
3947  *
3948  * Builds a microcode download IOA data list (IOADL).
3949  *
3950  **/
3951 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3952                                     struct ipr_sglist *sglist)
3953 {
3954         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3955         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3956         struct scatterlist *scatterlist = sglist->scatterlist;
3957         struct scatterlist *sg;
3958         int i;
3959
3960         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3961         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3962         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3963
3964         ioarcb->ioadl_len =
3965                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3966         for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3967                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3968                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
3969                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
3970         }
3971
3972         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3973 }
3974
3975 /**
3976  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3977  * @ipr_cmd:    ipr command struct
3978  * @sglist:             scatter/gather list
3979  *
3980  * Builds a microcode download IOA data list (IOADL).
3981  *
3982  **/
3983 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3984                                   struct ipr_sglist *sglist)
3985 {
3986         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3987         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3988         struct scatterlist *scatterlist = sglist->scatterlist;
3989         struct scatterlist *sg;
3990         int i;
3991
3992         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3993         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3994         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3995
3996         ioarcb->ioadl_len =
3997                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3998
3999         for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
4000                 ioadl[i].flags_and_data_len =
4001                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
4002                 ioadl[i].address =
4003                         cpu_to_be32(sg_dma_address(sg));
4004         }
4005
4006         ioadl[i-1].flags_and_data_len |=
4007                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4008 }
4009
4010 /**
4011  * ipr_update_ioa_ucode - Update IOA's microcode
4012  * @ioa_cfg:    ioa config struct
4013  * @sglist:             scatter/gather list
4014  *
4015  * Initiate an adapter reset to update the IOA's microcode
4016  *
4017  * Return value:
4018  *      0 on success / -EIO on failure
4019  **/
4020 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4021                                 struct ipr_sglist *sglist)
4022 {
4023         unsigned long lock_flags;
4024
4025         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4026         while (ioa_cfg->in_reset_reload) {
4027                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4028                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4029                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4030         }
4031
4032         if (ioa_cfg->ucode_sglist) {
4033                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4034                 dev_err(&ioa_cfg->pdev->dev,
4035                         "Microcode download already in progress\n");
4036                 return -EIO;
4037         }
4038
4039         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4040                                         sglist->scatterlist, sglist->num_sg,
4041                                         DMA_TO_DEVICE);
4042
4043         if (!sglist->num_dma_sg) {
4044                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4045                 dev_err(&ioa_cfg->pdev->dev,
4046                         "Failed to map microcode download buffer!\n");
4047                 return -EIO;
4048         }
4049
4050         ioa_cfg->ucode_sglist = sglist;
4051         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4052         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4053         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4054
4055         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4056         ioa_cfg->ucode_sglist = NULL;
4057         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4058         return 0;
4059 }
4060
4061 /**
4062  * ipr_store_update_fw - Update the firmware on the adapter
4063  * @class_dev:  device struct
4064  * @buf:        buffer
4065  * @count:      buffer size
4066  *
4067  * This function will update the firmware on the adapter.
4068  *
4069  * Return value:
4070  *      count on success / other on failure
4071  **/
4072 static ssize_t ipr_store_update_fw(struct device *dev,
4073                                    struct device_attribute *attr,
4074                                    const char *buf, size_t count)
4075 {
4076         struct Scsi_Host *shost = class_to_shost(dev);
4077         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4078         struct ipr_ucode_image_header *image_hdr;
4079         const struct firmware *fw_entry;
4080         struct ipr_sglist *sglist;
4081         char fname[100];
4082         char *src;
4083         char *endline;
4084         int result, dnld_size;
4085
4086         if (!capable(CAP_SYS_ADMIN))
4087                 return -EACCES;
4088
4089         snprintf(fname, sizeof(fname), "%s", buf);
4090
4091         endline = strchr(fname, '\n');
4092         if (endline)
4093                 *endline = '\0';
4094
4095         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4096                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4097                 return -EIO;
4098         }
4099
4100         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4101
4102         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4103         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4104         sglist = ipr_alloc_ucode_buffer(dnld_size);
4105
4106         if (!sglist) {
4107                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4108                 release_firmware(fw_entry);
4109                 return -ENOMEM;
4110         }
4111
4112         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4113
4114         if (result) {
4115                 dev_err(&ioa_cfg->pdev->dev,
4116                         "Microcode buffer copy to DMA buffer failed\n");
4117                 goto out;
4118         }
4119
4120         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4121
4122         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4123
4124         if (!result)
4125                 result = count;
4126 out:
4127         ipr_free_ucode_buffer(sglist);
4128         release_firmware(fw_entry);
4129         return result;
4130 }
4131
4132 static struct device_attribute ipr_update_fw_attr = {
4133         .attr = {
4134                 .name =         "update_fw",
4135                 .mode =         S_IWUSR,
4136         },
4137         .store = ipr_store_update_fw
4138 };
4139
4140 /**
4141  * ipr_show_fw_type - Show the adapter's firmware type.
4142  * @dev:        class device struct
4143  * @buf:        buffer
4144  *
4145  * Return value:
4146  *      number of bytes printed to buffer
4147  **/
4148 static ssize_t ipr_show_fw_type(struct device *dev,
4149                                 struct device_attribute *attr, char *buf)
4150 {
4151         struct Scsi_Host *shost = class_to_shost(dev);
4152         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4153         unsigned long lock_flags = 0;
4154         int len;
4155
4156         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4157         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4158         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4159         return len;
4160 }
4161
4162 static struct device_attribute ipr_ioa_fw_type_attr = {
4163         .attr = {
4164                 .name =         "fw_type",
4165                 .mode =         S_IRUGO,
4166         },
4167         .show = ipr_show_fw_type
4168 };
4169
4170 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4171                                 struct bin_attribute *bin_attr, char *buf,
4172                                 loff_t off, size_t count)
4173 {
4174         struct device *cdev = container_of(kobj, struct device, kobj);
4175         struct Scsi_Host *shost = class_to_shost(cdev);
4176         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4177         struct ipr_hostrcb *hostrcb;
4178         unsigned long lock_flags = 0;
4179         int ret;
4180
4181         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4182         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4183                                         struct ipr_hostrcb, queue);
4184         if (!hostrcb) {
4185                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4186                 return 0;
4187         }
4188         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4189                                 sizeof(hostrcb->hcam));
4190         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4191         return ret;
4192 }
4193
4194 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4195                                 struct bin_attribute *bin_attr, char *buf,
4196                                 loff_t off, size_t count)
4197 {
4198         struct device *cdev = container_of(kobj, struct device, kobj);
4199         struct Scsi_Host *shost = class_to_shost(cdev);
4200         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4201         struct ipr_hostrcb *hostrcb;
4202         unsigned long lock_flags = 0;
4203
4204         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4205         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4206                                         struct ipr_hostrcb, queue);
4207         if (!hostrcb) {
4208                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4209                 return count;
4210         }
4211
4212         /* Reclaim hostrcb before exit */
4213         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4214         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4215         return count;
4216 }
4217
4218 static struct bin_attribute ipr_ioa_async_err_log = {
4219         .attr = {
4220                 .name =         "async_err_log",
4221                 .mode =         S_IRUGO | S_IWUSR,
4222         },
4223         .size = 0,
4224         .read = ipr_read_async_err_log,
4225         .write = ipr_next_async_err_log
4226 };
4227
4228 static struct device_attribute *ipr_ioa_attrs[] = {
4229         &ipr_fw_version_attr,
4230         &ipr_log_level_attr,
4231         &ipr_diagnostics_attr,
4232         &ipr_ioa_state_attr,
4233         &ipr_ioa_reset_attr,
4234         &ipr_update_fw_attr,
4235         &ipr_ioa_fw_type_attr,
4236         &ipr_iopoll_weight_attr,
4237         NULL,
4238 };
4239
4240 #ifdef CONFIG_SCSI_IPR_DUMP
4241 /**
4242  * ipr_read_dump - Dump the adapter
4243  * @filp:               open sysfs file
4244  * @kobj:               kobject struct
4245  * @bin_attr:           bin_attribute struct
4246  * @buf:                buffer
4247  * @off:                offset
4248  * @count:              buffer size
4249  *
4250  * Return value:
4251  *      number of bytes printed to buffer
4252  **/
4253 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4254                              struct bin_attribute *bin_attr,
4255                              char *buf, loff_t off, size_t count)
4256 {
4257         struct device *cdev = container_of(kobj, struct device, kobj);
4258         struct Scsi_Host *shost = class_to_shost(cdev);
4259         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4260         struct ipr_dump *dump;
4261         unsigned long lock_flags = 0;
4262         char *src;
4263         int len, sdt_end;
4264         size_t rc = count;
4265
4266         if (!capable(CAP_SYS_ADMIN))
4267                 return -EACCES;
4268
4269         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4270         dump = ioa_cfg->dump;
4271
4272         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4273                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4274                 return 0;
4275         }
4276         kref_get(&dump->kref);
4277         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4278
4279         if (off > dump->driver_dump.hdr.len) {
4280                 kref_put(&dump->kref, ipr_release_dump);
4281                 return 0;
4282         }
4283
4284         if (off + count > dump->driver_dump.hdr.len) {
4285                 count = dump->driver_dump.hdr.len - off;
4286                 rc = count;
4287         }
4288
4289         if (count && off < sizeof(dump->driver_dump)) {
4290                 if (off + count > sizeof(dump->driver_dump))
4291                         len = sizeof(dump->driver_dump) - off;
4292                 else
4293                         len = count;
4294                 src = (u8 *)&dump->driver_dump + off;
4295                 memcpy(buf, src, len);
4296                 buf += len;
4297                 off += len;
4298                 count -= len;
4299         }
4300
4301         off -= sizeof(dump->driver_dump);
4302
4303         if (ioa_cfg->sis64)
4304                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4305                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4306                            sizeof(struct ipr_sdt_entry));
4307         else
4308                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4309                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4310
4311         if (count && off < sdt_end) {
4312                 if (off + count > sdt_end)
4313                         len = sdt_end - off;
4314                 else
4315                         len = count;
4316                 src = (u8 *)&dump->ioa_dump + off;
4317                 memcpy(buf, src, len);
4318                 buf += len;
4319                 off += len;
4320                 count -= len;
4321         }
4322
4323         off -= sdt_end;
4324
4325         while (count) {
4326                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4327                         len = PAGE_ALIGN(off) - off;
4328                 else
4329                         len = count;
4330                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4331                 src += off & ~PAGE_MASK;
4332                 memcpy(buf, src, len);
4333                 buf += len;
4334                 off += len;
4335                 count -= len;
4336         }
4337
4338         kref_put(&dump->kref, ipr_release_dump);
4339         return rc;
4340 }
4341
4342 /**
4343  * ipr_alloc_dump - Prepare for adapter dump
4344  * @ioa_cfg:    ioa config struct
4345  *
4346  * Return value:
4347  *      0 on success / other on failure
4348  **/
4349 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4350 {
4351         struct ipr_dump *dump;
4352         __be32 **ioa_data;
4353         unsigned long lock_flags = 0;
4354
4355         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4356
4357         if (!dump) {
4358                 ipr_err("Dump memory allocation failed\n");
4359                 return -ENOMEM;
4360         }
4361
4362         if (ioa_cfg->sis64)
4363                 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4364                                               sizeof(__be32 *)));
4365         else
4366                 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4367                                               sizeof(__be32 *)));
4368
4369         if (!ioa_data) {
4370                 ipr_err("Dump memory allocation failed\n");
4371                 kfree(dump);
4372                 return -ENOMEM;
4373         }
4374
4375         dump->ioa_dump.ioa_data = ioa_data;
4376
4377         kref_init(&dump->kref);
4378         dump->ioa_cfg = ioa_cfg;
4379
4380         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4381
4382         if (INACTIVE != ioa_cfg->sdt_state) {
4383                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4384                 vfree(dump->ioa_dump.ioa_data);
4385                 kfree(dump);
4386                 return 0;
4387         }
4388
4389         ioa_cfg->dump = dump;
4390         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4391         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4392                 ioa_cfg->dump_taken = 1;
4393                 schedule_work(&ioa_cfg->work_q);
4394         }
4395         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4396
4397         return 0;
4398 }
4399
4400 /**
4401  * ipr_free_dump - Free adapter dump memory
4402  * @ioa_cfg:    ioa config struct
4403  *
4404  * Return value:
4405  *      0 on success / other on failure
4406  **/
4407 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4408 {
4409         struct ipr_dump *dump;
4410         unsigned long lock_flags = 0;
4411
4412         ENTER;
4413
4414         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4415         dump = ioa_cfg->dump;
4416         if (!dump) {
4417                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4418                 return 0;
4419         }
4420
4421         ioa_cfg->dump = NULL;
4422         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4423
4424         kref_put(&dump->kref, ipr_release_dump);
4425
4426         LEAVE;
4427         return 0;
4428 }
4429
4430 /**
4431  * ipr_write_dump - Setup dump state of adapter
4432  * @filp:               open sysfs file
4433  * @kobj:               kobject struct
4434  * @bin_attr:           bin_attribute struct
4435  * @buf:                buffer
4436  * @off:                offset
4437  * @count:              buffer size
4438  *
4439  * Return value:
4440  *      number of bytes printed to buffer
4441  **/
4442 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4443                               struct bin_attribute *bin_attr,
4444                               char *buf, loff_t off, size_t count)
4445 {
4446         struct device *cdev = container_of(kobj, struct device, kobj);
4447         struct Scsi_Host *shost = class_to_shost(cdev);
4448         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4449         int rc;
4450
4451         if (!capable(CAP_SYS_ADMIN))
4452                 return -EACCES;
4453
4454         if (buf[0] == '1')
4455                 rc = ipr_alloc_dump(ioa_cfg);
4456         else if (buf[0] == '0')
4457                 rc = ipr_free_dump(ioa_cfg);
4458         else
4459                 return -EINVAL;
4460
4461         if (rc)
4462                 return rc;
4463         else
4464                 return count;
4465 }
4466
4467 static struct bin_attribute ipr_dump_attr = {
4468         .attr = {
4469                 .name = "dump",
4470                 .mode = S_IRUSR | S_IWUSR,
4471         },
4472         .size = 0,
4473         .read = ipr_read_dump,
4474         .write = ipr_write_dump
4475 };
4476 #else
4477 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4478 #endif
4479
4480 /**
4481  * ipr_change_queue_depth - Change the device's queue depth
4482  * @sdev:       scsi device struct
4483  * @qdepth:     depth to set
4484  * @reason:     calling context
4485  *
4486  * Return value:
4487  *      actual depth set
4488  **/
4489 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4490 {
4491         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4492         struct ipr_resource_entry *res;
4493         unsigned long lock_flags = 0;
4494
4495         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4496         res = (struct ipr_resource_entry *)sdev->hostdata;
4497
4498         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4499                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4500         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4501
4502         scsi_change_queue_depth(sdev, qdepth);
4503         return sdev->queue_depth;
4504 }
4505
4506 /**
4507  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4508  * @dev:        device struct
4509  * @attr:       device attribute structure
4510  * @buf:        buffer
4511  *
4512  * Return value:
4513  *      number of bytes printed to buffer
4514  **/
4515 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4516 {
4517         struct scsi_device *sdev = to_scsi_device(dev);
4518         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4519         struct ipr_resource_entry *res;
4520         unsigned long lock_flags = 0;
4521         ssize_t len = -ENXIO;
4522
4523         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4524         res = (struct ipr_resource_entry *)sdev->hostdata;
4525         if (res)
4526                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4527         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4528         return len;
4529 }
4530
4531 static struct device_attribute ipr_adapter_handle_attr = {
4532         .attr = {
4533                 .name =         "adapter_handle",
4534                 .mode =         S_IRUSR,
4535         },
4536         .show = ipr_show_adapter_handle
4537 };
4538
4539 /**
4540  * ipr_show_resource_path - Show the resource path or the resource address for
4541  *                          this device.
4542  * @dev:        device struct
4543  * @attr:       device attribute structure
4544  * @buf:        buffer
4545  *
4546  * Return value:
4547  *      number of bytes printed to buffer
4548  **/
4549 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4550 {
4551         struct scsi_device *sdev = to_scsi_device(dev);
4552         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4553         struct ipr_resource_entry *res;
4554         unsigned long lock_flags = 0;
4555         ssize_t len = -ENXIO;
4556         char buffer[IPR_MAX_RES_PATH_LENGTH];
4557
4558         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4559         res = (struct ipr_resource_entry *)sdev->hostdata;
4560         if (res && ioa_cfg->sis64)
4561                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4562                                __ipr_format_res_path(res->res_path, buffer,
4563                                                      sizeof(buffer)));
4564         else if (res)
4565                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4566                                res->bus, res->target, res->lun);
4567
4568         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4569         return len;
4570 }
4571
4572 static struct device_attribute ipr_resource_path_attr = {
4573         .attr = {
4574                 .name =         "resource_path",
4575                 .mode =         S_IRUGO,
4576         },
4577         .show = ipr_show_resource_path
4578 };
4579
4580 /**
4581  * ipr_show_device_id - Show the device_id for this device.
4582  * @dev:        device struct
4583  * @attr:       device attribute structure
4584  * @buf:        buffer
4585  *
4586  * Return value:
4587  *      number of bytes printed to buffer
4588  **/
4589 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4590 {
4591         struct scsi_device *sdev = to_scsi_device(dev);
4592         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4593         struct ipr_resource_entry *res;
4594         unsigned long lock_flags = 0;
4595         ssize_t len = -ENXIO;
4596
4597         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4598         res = (struct ipr_resource_entry *)sdev->hostdata;
4599         if (res && ioa_cfg->sis64)
4600                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4601         else if (res)
4602                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4603
4604         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4605         return len;
4606 }
4607
4608 static struct device_attribute ipr_device_id_attr = {
4609         .attr = {
4610                 .name =         "device_id",
4611                 .mode =         S_IRUGO,
4612         },
4613         .show = ipr_show_device_id
4614 };
4615
4616 /**
4617  * ipr_show_resource_type - Show the resource type for this device.
4618  * @dev:        device struct
4619  * @attr:       device attribute structure
4620  * @buf:        buffer
4621  *
4622  * Return value:
4623  *      number of bytes printed to buffer
4624  **/
4625 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4626 {
4627         struct scsi_device *sdev = to_scsi_device(dev);
4628         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4629         struct ipr_resource_entry *res;
4630         unsigned long lock_flags = 0;
4631         ssize_t len = -ENXIO;
4632
4633         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4634         res = (struct ipr_resource_entry *)sdev->hostdata;
4635
4636         if (res)
4637                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4638
4639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4640         return len;
4641 }
4642
4643 static struct device_attribute ipr_resource_type_attr = {
4644         .attr = {
4645                 .name =         "resource_type",
4646                 .mode =         S_IRUGO,
4647         },
4648         .show = ipr_show_resource_type
4649 };
4650
4651 /**
4652  * ipr_show_raw_mode - Show the adapter's raw mode
4653  * @dev:        class device struct
4654  * @buf:        buffer
4655  *
4656  * Return value:
4657  *      number of bytes printed to buffer
4658  **/
4659 static ssize_t ipr_show_raw_mode(struct device *dev,
4660                                  struct device_attribute *attr, char *buf)
4661 {
4662         struct scsi_device *sdev = to_scsi_device(dev);
4663         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4664         struct ipr_resource_entry *res;
4665         unsigned long lock_flags = 0;
4666         ssize_t len;
4667
4668         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4669         res = (struct ipr_resource_entry *)sdev->hostdata;
4670         if (res)
4671                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4672         else
4673                 len = -ENXIO;
4674         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4675         return len;
4676 }
4677
4678 /**
4679  * ipr_store_raw_mode - Change the adapter's raw mode
4680  * @dev:        class device struct
4681  * @buf:        buffer
4682  *
4683  * Return value:
4684  *      number of bytes printed to buffer
4685  **/
4686 static ssize_t ipr_store_raw_mode(struct device *dev,
4687                                   struct device_attribute *attr,
4688                                   const char *buf, size_t count)
4689 {
4690         struct scsi_device *sdev = to_scsi_device(dev);
4691         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4692         struct ipr_resource_entry *res;
4693         unsigned long lock_flags = 0;
4694         ssize_t len;
4695
4696         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4697         res = (struct ipr_resource_entry *)sdev->hostdata;
4698         if (res) {
4699                 if (ipr_is_af_dasd_device(res)) {
4700                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4701                         len = strlen(buf);
4702                         if (res->sdev)
4703                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4704                                         res->raw_mode ? "enabled" : "disabled");
4705                 } else
4706                         len = -EINVAL;
4707         } else
4708                 len = -ENXIO;
4709         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4710         return len;
4711 }
4712
4713 static struct device_attribute ipr_raw_mode_attr = {
4714         .attr = {
4715                 .name =         "raw_mode",
4716                 .mode =         S_IRUGO | S_IWUSR,
4717         },
4718         .show = ipr_show_raw_mode,
4719         .store = ipr_store_raw_mode
4720 };
4721
4722 static struct device_attribute *ipr_dev_attrs[] = {
4723         &ipr_adapter_handle_attr,
4724         &ipr_resource_path_attr,
4725         &ipr_device_id_attr,
4726         &ipr_resource_type_attr,
4727         &ipr_raw_mode_attr,
4728         NULL,
4729 };
4730
4731 /**
4732  * ipr_biosparam - Return the HSC mapping
4733  * @sdev:                       scsi device struct
4734  * @block_device:       block device pointer
4735  * @capacity:           capacity of the device
4736  * @parm:                       Array containing returned HSC values.
4737  *
4738  * This function generates the HSC parms that fdisk uses.
4739  * We want to make sure we return something that places partitions
4740  * on 4k boundaries for best performance with the IOA.
4741  *
4742  * Return value:
4743  *      0 on success
4744  **/
4745 static int ipr_biosparam(struct scsi_device *sdev,
4746                          struct block_device *block_device,
4747                          sector_t capacity, int *parm)
4748 {
4749         int heads, sectors;
4750         sector_t cylinders;
4751
4752         heads = 128;
4753         sectors = 32;
4754
4755         cylinders = capacity;
4756         sector_div(cylinders, (128 * 32));
4757
4758         /* return result */
4759         parm[0] = heads;
4760         parm[1] = sectors;
4761         parm[2] = cylinders;
4762
4763         return 0;
4764 }
4765
4766 /**
4767  * ipr_find_starget - Find target based on bus/target.
4768  * @starget:    scsi target struct
4769  *
4770  * Return value:
4771  *      resource entry pointer if found / NULL if not found
4772  **/
4773 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4774 {
4775         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4776         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4777         struct ipr_resource_entry *res;
4778
4779         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4780                 if ((res->bus == starget->channel) &&
4781                     (res->target == starget->id)) {
4782                         return res;
4783                 }
4784         }
4785
4786         return NULL;
4787 }
4788
4789 static struct ata_port_info sata_port_info;
4790
4791 /**
4792  * ipr_target_alloc - Prepare for commands to a SCSI target
4793  * @starget:    scsi target struct
4794  *
4795  * If the device is a SATA device, this function allocates an
4796  * ATA port with libata, else it does nothing.
4797  *
4798  * Return value:
4799  *      0 on success / non-0 on failure
4800  **/
4801 static int ipr_target_alloc(struct scsi_target *starget)
4802 {
4803         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4804         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4805         struct ipr_sata_port *sata_port;
4806         struct ata_port *ap;
4807         struct ipr_resource_entry *res;
4808         unsigned long lock_flags;
4809
4810         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4811         res = ipr_find_starget(starget);
4812         starget->hostdata = NULL;
4813
4814         if (res && ipr_is_gata(res)) {
4815                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4816                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4817                 if (!sata_port)
4818                         return -ENOMEM;
4819
4820                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4821                 if (ap) {
4822                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4823                         sata_port->ioa_cfg = ioa_cfg;
4824                         sata_port->ap = ap;
4825                         sata_port->res = res;
4826
4827                         res->sata_port = sata_port;
4828                         ap->private_data = sata_port;
4829                         starget->hostdata = sata_port;
4830                 } else {
4831                         kfree(sata_port);
4832                         return -ENOMEM;
4833                 }
4834         }
4835         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4836
4837         return 0;
4838 }
4839
4840 /**
4841  * ipr_target_destroy - Destroy a SCSI target
4842  * @starget:    scsi target struct
4843  *
4844  * If the device was a SATA device, this function frees the libata
4845  * ATA port, else it does nothing.
4846  *
4847  **/
4848 static void ipr_target_destroy(struct scsi_target *starget)
4849 {
4850         struct ipr_sata_port *sata_port = starget->hostdata;
4851         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4852         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4853
4854         if (ioa_cfg->sis64) {
4855                 if (!ipr_find_starget(starget)) {
4856                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4857                                 clear_bit(starget->id, ioa_cfg->array_ids);
4858                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4859                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4860                         else if (starget->channel == 0)
4861                                 clear_bit(starget->id, ioa_cfg->target_ids);
4862                 }
4863         }
4864
4865         if (sata_port) {
4866                 starget->hostdata = NULL;
4867                 ata_sas_port_destroy(sata_port->ap);
4868                 kfree(sata_port);
4869         }
4870 }
4871
4872 /**
4873  * ipr_find_sdev - Find device based on bus/target/lun.
4874  * @sdev:       scsi device struct
4875  *
4876  * Return value:
4877  *      resource entry pointer if found / NULL if not found
4878  **/
4879 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4880 {
4881         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4882         struct ipr_resource_entry *res;
4883
4884         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4885                 if ((res->bus == sdev->channel) &&
4886                     (res->target == sdev->id) &&
4887                     (res->lun == sdev->lun))
4888                         return res;
4889         }
4890
4891         return NULL;
4892 }
4893
4894 /**
4895  * ipr_slave_destroy - Unconfigure a SCSI device
4896  * @sdev:       scsi device struct
4897  *
4898  * Return value:
4899  *      nothing
4900  **/
4901 static void ipr_slave_destroy(struct scsi_device *sdev)
4902 {
4903         struct ipr_resource_entry *res;
4904         struct ipr_ioa_cfg *ioa_cfg;
4905         unsigned long lock_flags = 0;
4906
4907         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4908
4909         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4910         res = (struct ipr_resource_entry *) sdev->hostdata;
4911         if (res) {
4912                 if (res->sata_port)
4913                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4914                 sdev->hostdata = NULL;
4915                 res->sdev = NULL;
4916                 res->sata_port = NULL;
4917         }
4918         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4919 }
4920
4921 /**
4922  * ipr_slave_configure - Configure a SCSI device
4923  * @sdev:       scsi device struct
4924  *
4925  * This function configures the specified scsi device.
4926  *
4927  * Return value:
4928  *      0 on success
4929  **/
4930 static int ipr_slave_configure(struct scsi_device *sdev)
4931 {
4932         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4933         struct ipr_resource_entry *res;
4934         struct ata_port *ap = NULL;
4935         unsigned long lock_flags = 0;
4936         char buffer[IPR_MAX_RES_PATH_LENGTH];
4937
4938         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4939         res = sdev->hostdata;
4940         if (res) {
4941                 if (ipr_is_af_dasd_device(res))
4942                         sdev->type = TYPE_RAID;
4943                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4944                         sdev->scsi_level = 4;
4945                         sdev->no_uld_attach = 1;
4946                 }
4947                 if (ipr_is_vset_device(res)) {
4948                         sdev->scsi_level = SCSI_SPC_3;
4949                         sdev->no_report_opcodes = 1;
4950                         blk_queue_rq_timeout(sdev->request_queue,
4951                                              IPR_VSET_RW_TIMEOUT);
4952                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4953                 }
4954                 if (ipr_is_gata(res) && res->sata_port)
4955                         ap = res->sata_port->ap;
4956                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4957
4958                 if (ap) {
4959                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4960                         ata_sas_slave_configure(sdev, ap);
4961                 }
4962
4963                 if (ioa_cfg->sis64)
4964                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4965                                     ipr_format_res_path(ioa_cfg,
4966                                 res->res_path, buffer, sizeof(buffer)));
4967                 return 0;
4968         }
4969         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4970         return 0;
4971 }
4972
4973 /**
4974  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4975  * @sdev:       scsi device struct
4976  *
4977  * This function initializes an ATA port so that future commands
4978  * sent through queuecommand will work.
4979  *
4980  * Return value:
4981  *      0 on success
4982  **/
4983 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4984 {
4985         struct ipr_sata_port *sata_port = NULL;
4986         int rc = -ENXIO;
4987
4988         ENTER;
4989         if (sdev->sdev_target)
4990                 sata_port = sdev->sdev_target->hostdata;
4991         if (sata_port) {
4992                 rc = ata_sas_port_init(sata_port->ap);
4993                 if (rc == 0)
4994                         rc = ata_sas_sync_probe(sata_port->ap);
4995         }
4996
4997         if (rc)
4998                 ipr_slave_destroy(sdev);
4999
5000         LEAVE;
5001         return rc;
5002 }
5003
5004 /**
5005  * ipr_slave_alloc - Prepare for commands to a device.
5006  * @sdev:       scsi device struct
5007  *
5008  * This function saves a pointer to the resource entry
5009  * in the scsi device struct if the device exists. We
5010  * can then use this pointer in ipr_queuecommand when
5011  * handling new commands.
5012  *
5013  * Return value:
5014  *      0 on success / -ENXIO if device does not exist
5015  **/
5016 static int ipr_slave_alloc(struct scsi_device *sdev)
5017 {
5018         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5019         struct ipr_resource_entry *res;
5020         unsigned long lock_flags;
5021         int rc = -ENXIO;
5022
5023         sdev->hostdata = NULL;
5024
5025         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5026
5027         res = ipr_find_sdev(sdev);
5028         if (res) {
5029                 res->sdev = sdev;
5030                 res->add_to_ml = 0;
5031                 res->in_erp = 0;
5032                 sdev->hostdata = res;
5033                 if (!ipr_is_naca_model(res))
5034                         res->needs_sync_complete = 1;
5035                 rc = 0;
5036                 if (ipr_is_gata(res)) {
5037                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5038                         return ipr_ata_slave_alloc(sdev);
5039                 }
5040         }
5041
5042         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5043
5044         return rc;
5045 }
5046
5047 /**
5048  * ipr_match_lun - Match function for specified LUN
5049  * @ipr_cmd:    ipr command struct
5050  * @device:             device to match (sdev)
5051  *
5052  * Returns:
5053  *      1 if command matches sdev / 0 if command does not match sdev
5054  **/
5055 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5056 {
5057         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5058                 return 1;
5059         return 0;
5060 }
5061
5062 /**
5063  * ipr_cmnd_is_free - Check if a command is free or not
5064  * @ipr_cmd     ipr command struct
5065  *
5066  * Returns:
5067  *      true / false
5068  **/
5069 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5070 {
5071         struct ipr_cmnd *loop_cmd;
5072
5073         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5074                 if (loop_cmd == ipr_cmd)
5075                         return true;
5076         }
5077
5078         return false;
5079 }
5080
5081 /**
5082  * ipr_match_res - Match function for specified resource entry
5083  * @ipr_cmd:    ipr command struct
5084  * @resource:   resource entry to match
5085  *
5086  * Returns:
5087  *      1 if command matches sdev / 0 if command does not match sdev
5088  **/
5089 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5090 {
5091         struct ipr_resource_entry *res = resource;
5092
5093         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5094                 return 1;
5095         return 0;
5096 }
5097
5098 /**
5099  * ipr_wait_for_ops - Wait for matching commands to complete
5100  * @ipr_cmd:    ipr command struct
5101  * @device:             device to match (sdev)
5102  * @match:              match function to use
5103  *
5104  * Returns:
5105  *      SUCCESS / FAILED
5106  **/
5107 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5108                             int (*match)(struct ipr_cmnd *, void *))
5109 {
5110         struct ipr_cmnd *ipr_cmd;
5111         int wait, i;
5112         unsigned long flags;
5113         struct ipr_hrr_queue *hrrq;
5114         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5115         DECLARE_COMPLETION_ONSTACK(comp);
5116
5117         ENTER;
5118         do {
5119                 wait = 0;
5120
5121                 for_each_hrrq(hrrq, ioa_cfg) {
5122                         spin_lock_irqsave(hrrq->lock, flags);
5123                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5124                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5125                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5126                                         if (match(ipr_cmd, device)) {
5127                                                 ipr_cmd->eh_comp = &comp;
5128                                                 wait++;
5129                                         }
5130                                 }
5131                         }
5132                         spin_unlock_irqrestore(hrrq->lock, flags);
5133                 }
5134
5135                 if (wait) {
5136                         timeout = wait_for_completion_timeout(&comp, timeout);
5137
5138                         if (!timeout) {
5139                                 wait = 0;
5140
5141                                 for_each_hrrq(hrrq, ioa_cfg) {
5142                                         spin_lock_irqsave(hrrq->lock, flags);
5143                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5144                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5145                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5146                                                         if (match(ipr_cmd, device)) {
5147                                                                 ipr_cmd->eh_comp = NULL;
5148                                                                 wait++;
5149                                                         }
5150                                                 }
5151                                         }
5152                                         spin_unlock_irqrestore(hrrq->lock, flags);
5153                                 }
5154
5155                                 if (wait)
5156                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5157                                 LEAVE;
5158                                 return wait ? FAILED : SUCCESS;
5159                         }
5160                 }
5161         } while (wait);
5162
5163         LEAVE;
5164         return SUCCESS;
5165 }
5166
5167 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5168 {
5169         struct ipr_ioa_cfg *ioa_cfg;
5170         unsigned long lock_flags = 0;
5171         int rc = SUCCESS;
5172
5173         ENTER;
5174         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5175         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5176
5177         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5178                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5179                 dev_err(&ioa_cfg->pdev->dev,
5180                         "Adapter being reset as a result of error recovery.\n");
5181
5182                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5183                         ioa_cfg->sdt_state = GET_DUMP;
5184         }
5185
5186         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5187         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5188         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5189
5190         /* If we got hit with a host reset while we were already resetting
5191          the adapter for some reason, and the reset failed. */
5192         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5193                 ipr_trace;
5194                 rc = FAILED;
5195         }
5196
5197         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5198         LEAVE;
5199         return rc;
5200 }
5201
5202 /**
5203  * ipr_device_reset - Reset the device
5204  * @ioa_cfg:    ioa config struct
5205  * @res:                resource entry struct
5206  *
5207  * This function issues a device reset to the affected device.
5208  * If the device is a SCSI device, a LUN reset will be sent
5209  * to the device first. If that does not work, a target reset
5210  * will be sent. If the device is a SATA device, a PHY reset will
5211  * be sent.
5212  *
5213  * Return value:
5214  *      0 on success / non-zero on failure
5215  **/
5216 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5217                             struct ipr_resource_entry *res)
5218 {
5219         struct ipr_cmnd *ipr_cmd;
5220         struct ipr_ioarcb *ioarcb;
5221         struct ipr_cmd_pkt *cmd_pkt;
5222         struct ipr_ioarcb_ata_regs *regs;
5223         u32 ioasc;
5224
5225         ENTER;
5226         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5227         ioarcb = &ipr_cmd->ioarcb;
5228         cmd_pkt = &ioarcb->cmd_pkt;
5229
5230         if (ipr_cmd->ioa_cfg->sis64) {
5231                 regs = &ipr_cmd->i.ata_ioadl.regs;
5232                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5233         } else
5234                 regs = &ioarcb->u.add_data.u.regs;
5235
5236         ioarcb->res_handle = res->res_handle;
5237         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5238         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5239         if (ipr_is_gata(res)) {
5240                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5241                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5242                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5243         }
5244
5245         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5246         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5247         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5248         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5249                 if (ipr_cmd->ioa_cfg->sis64)
5250                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5251                                sizeof(struct ipr_ioasa_gata));
5252                 else
5253                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5254                                sizeof(struct ipr_ioasa_gata));
5255         }
5256
5257         LEAVE;
5258         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5259 }
5260
5261 /**
5262  * ipr_sata_reset - Reset the SATA port
5263  * @link:       SATA link to reset
5264  * @classes:    class of the attached device
5265  *
5266  * This function issues a SATA phy reset to the affected ATA link.
5267  *
5268  * Return value:
5269  *      0 on success / non-zero on failure
5270  **/
5271 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5272                                 unsigned long deadline)
5273 {
5274         struct ipr_sata_port *sata_port = link->ap->private_data;
5275         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5276         struct ipr_resource_entry *res;
5277         unsigned long lock_flags = 0;
5278         int rc = -ENXIO, ret;
5279
5280         ENTER;
5281         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5282         while (ioa_cfg->in_reset_reload) {
5283                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5284                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5285                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5286         }
5287
5288         res = sata_port->res;
5289         if (res) {
5290                 rc = ipr_device_reset(ioa_cfg, res);
5291                 *classes = res->ata_class;
5292                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5293
5294                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5295                 if (ret != SUCCESS) {
5296                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5297                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5298                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5299
5300                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5301                 }
5302         } else
5303                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5304
5305         LEAVE;
5306         return rc;
5307 }
5308
5309 /**
5310  * ipr_eh_dev_reset - Reset the device
5311  * @scsi_cmd:   scsi command struct
5312  *
5313  * This function issues a device reset to the affected device.
5314  * A LUN reset will be sent to the device first. If that does
5315  * not work, a target reset will be sent.
5316  *
5317  * Return value:
5318  *      SUCCESS / FAILED
5319  **/
5320 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5321 {
5322         struct ipr_cmnd *ipr_cmd;
5323         struct ipr_ioa_cfg *ioa_cfg;
5324         struct ipr_resource_entry *res;
5325         struct ata_port *ap;
5326         int rc = 0, i;
5327         struct ipr_hrr_queue *hrrq;
5328
5329         ENTER;
5330         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5331         res = scsi_cmd->device->hostdata;
5332
5333         /*
5334          * If we are currently going through reset/reload, return failed. This will force the
5335          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5336          * reset to complete
5337          */
5338         if (ioa_cfg->in_reset_reload)
5339                 return FAILED;
5340         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5341                 return FAILED;
5342
5343         for_each_hrrq(hrrq, ioa_cfg) {
5344                 spin_lock(&hrrq->_lock);
5345                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5346                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5347
5348                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5349                                 if (!ipr_cmd->qc)
5350                                         continue;
5351                                 if (ipr_cmnd_is_free(ipr_cmd))
5352                                         continue;
5353
5354                                 ipr_cmd->done = ipr_sata_eh_done;
5355                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5356                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5357                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5358                                 }
5359                         }
5360                 }
5361                 spin_unlock(&hrrq->_lock);
5362         }
5363         res->resetting_device = 1;
5364         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5365
5366         if (ipr_is_gata(res) && res->sata_port) {
5367                 ap = res->sata_port->ap;
5368                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5369                 ata_std_error_handler(ap);
5370                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5371         } else
5372                 rc = ipr_device_reset(ioa_cfg, res);
5373         res->resetting_device = 0;
5374         res->reset_occurred = 1;
5375
5376         LEAVE;
5377         return rc ? FAILED : SUCCESS;
5378 }
5379
5380 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5381 {
5382         int rc;
5383         struct ipr_ioa_cfg *ioa_cfg;
5384         struct ipr_resource_entry *res;
5385
5386         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5387         res = cmd->device->hostdata;
5388
5389         if (!res)
5390                 return FAILED;
5391
5392         spin_lock_irq(cmd->device->host->host_lock);
5393         rc = __ipr_eh_dev_reset(cmd);
5394         spin_unlock_irq(cmd->device->host->host_lock);
5395
5396         if (rc == SUCCESS) {
5397                 if (ipr_is_gata(res) && res->sata_port)
5398                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5399                 else
5400                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5401         }
5402
5403         return rc;
5404 }
5405
5406 /**
5407  * ipr_bus_reset_done - Op done function for bus reset.
5408  * @ipr_cmd:    ipr command struct
5409  *
5410  * This function is the op done function for a bus reset
5411  *
5412  * Return value:
5413  *      none
5414  **/
5415 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5416 {
5417         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5418         struct ipr_resource_entry *res;
5419
5420         ENTER;
5421         if (!ioa_cfg->sis64)
5422                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5423                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5424                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5425                                 break;
5426                         }
5427                 }
5428
5429         /*
5430          * If abort has not completed, indicate the reset has, else call the
5431          * abort's done function to wake the sleeping eh thread
5432          */
5433         if (ipr_cmd->sibling->sibling)
5434                 ipr_cmd->sibling->sibling = NULL;
5435         else
5436                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5437
5438         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5439         LEAVE;
5440 }
5441
5442 /**
5443  * ipr_abort_timeout - An abort task has timed out
5444  * @ipr_cmd:    ipr command struct
5445  *
5446  * This function handles when an abort task times out. If this
5447  * happens we issue a bus reset since we have resources tied
5448  * up that must be freed before returning to the midlayer.
5449  *
5450  * Return value:
5451  *      none
5452  **/
5453 static void ipr_abort_timeout(struct timer_list *t)
5454 {
5455         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5456         struct ipr_cmnd *reset_cmd;
5457         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5458         struct ipr_cmd_pkt *cmd_pkt;
5459         unsigned long lock_flags = 0;
5460
5461         ENTER;
5462         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5463         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5464                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5465                 return;
5466         }
5467
5468         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5469         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5470         ipr_cmd->sibling = reset_cmd;
5471         reset_cmd->sibling = ipr_cmd;
5472         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5473         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5474         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5475         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5476         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5477
5478         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5479         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5480         LEAVE;
5481 }
5482
5483 /**
5484  * ipr_cancel_op - Cancel specified op
5485  * @scsi_cmd:   scsi command struct
5486  *
5487  * This function cancels specified op.
5488  *
5489  * Return value:
5490  *      SUCCESS / FAILED
5491  **/
5492 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5493 {
5494         struct ipr_cmnd *ipr_cmd;
5495         struct ipr_ioa_cfg *ioa_cfg;
5496         struct ipr_resource_entry *res;
5497         struct ipr_cmd_pkt *cmd_pkt;
5498         u32 ioasc, int_reg;
5499         int i, op_found = 0;
5500         struct ipr_hrr_queue *hrrq;
5501
5502         ENTER;
5503         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5504         res = scsi_cmd->device->hostdata;
5505
5506         /* If we are currently going through reset/reload, return failed.
5507          * This will force the mid-layer to call ipr_eh_host_reset,
5508          * which will then go to sleep and wait for the reset to complete
5509          */
5510         if (ioa_cfg->in_reset_reload ||
5511             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5512                 return FAILED;
5513         if (!res)
5514                 return FAILED;
5515
5516         /*
5517          * If we are aborting a timed out op, chances are that the timeout was caused
5518          * by a still not detected EEH error. In such cases, reading a register will
5519          * trigger the EEH recovery infrastructure.
5520          */
5521         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5522
5523         if (!ipr_is_gscsi(res))
5524                 return FAILED;
5525
5526         for_each_hrrq(hrrq, ioa_cfg) {
5527                 spin_lock(&hrrq->_lock);
5528                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5529                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5530                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5531                                         op_found = 1;
5532                                         break;
5533                                 }
5534                         }
5535                 }
5536                 spin_unlock(&hrrq->_lock);
5537         }
5538
5539         if (!op_found)
5540                 return SUCCESS;
5541
5542         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5543         ipr_cmd->ioarcb.res_handle = res->res_handle;
5544         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5545         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5546         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5547         ipr_cmd->u.sdev = scsi_cmd->device;
5548
5549         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5550                     scsi_cmd->cmnd[0]);
5551         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5552         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5553
5554         /*
5555          * If the abort task timed out and we sent a bus reset, we will get
5556          * one the following responses to the abort
5557          */
5558         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5559                 ioasc = 0;
5560                 ipr_trace;
5561         }
5562
5563         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5564         if (!ipr_is_naca_model(res))
5565                 res->needs_sync_complete = 1;
5566
5567         LEAVE;
5568         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5569 }
5570
5571 /**
5572  * ipr_eh_abort - Abort a single op
5573  * @scsi_cmd:   scsi command struct
5574  *
5575  * Return value:
5576  *      0 if scan in progress / 1 if scan is complete
5577  **/
5578 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5579 {
5580         unsigned long lock_flags;
5581         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5582         int rc = 0;
5583
5584         spin_lock_irqsave(shost->host_lock, lock_flags);
5585         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5586                 rc = 1;
5587         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5588                 rc = 1;
5589         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5590         return rc;
5591 }
5592
5593 /**
5594  * ipr_eh_host_reset - Reset the host adapter
5595  * @scsi_cmd:   scsi command struct
5596  *
5597  * Return value:
5598  *      SUCCESS / FAILED
5599  **/
5600 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5601 {
5602         unsigned long flags;
5603         int rc;
5604         struct ipr_ioa_cfg *ioa_cfg;
5605
5606         ENTER;
5607
5608         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5609
5610         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5611         rc = ipr_cancel_op(scsi_cmd);
5612         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5613
5614         if (rc == SUCCESS)
5615                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5616         LEAVE;
5617         return rc;
5618 }
5619
5620 /**
5621  * ipr_handle_other_interrupt - Handle "other" interrupts
5622  * @ioa_cfg:    ioa config struct
5623  * @int_reg:    interrupt register
5624  *
5625  * Return value:
5626  *      IRQ_NONE / IRQ_HANDLED
5627  **/
5628 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5629                                               u32 int_reg)
5630 {
5631         irqreturn_t rc = IRQ_HANDLED;
5632         u32 int_mask_reg;
5633
5634         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5635         int_reg &= ~int_mask_reg;
5636
5637         /* If an interrupt on the adapter did not occur, ignore it.
5638          * Or in the case of SIS 64, check for a stage change interrupt.
5639          */
5640         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5641                 if (ioa_cfg->sis64) {
5642                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5643                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5644                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5645
5646                                 /* clear stage change */
5647                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5648                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5649                                 list_del(&ioa_cfg->reset_cmd->queue);
5650                                 del_timer(&ioa_cfg->reset_cmd->timer);
5651                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5652                                 return IRQ_HANDLED;
5653                         }
5654                 }
5655
5656                 return IRQ_NONE;
5657         }
5658
5659         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5660                 /* Mask the interrupt */
5661                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5662                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5663
5664                 list_del(&ioa_cfg->reset_cmd->queue);
5665                 del_timer(&ioa_cfg->reset_cmd->timer);
5666                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5667         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5668                 if (ioa_cfg->clear_isr) {
5669                         if (ipr_debug && printk_ratelimit())
5670                                 dev_err(&ioa_cfg->pdev->dev,
5671                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5672                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5673                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5674                         return IRQ_NONE;
5675                 }
5676         } else {
5677                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5678                         ioa_cfg->ioa_unit_checked = 1;
5679                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5680                         dev_err(&ioa_cfg->pdev->dev,
5681                                 "No Host RRQ. 0x%08X\n", int_reg);
5682                 else
5683                         dev_err(&ioa_cfg->pdev->dev,
5684                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5685
5686                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5687                         ioa_cfg->sdt_state = GET_DUMP;
5688
5689                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5690                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5691         }
5692
5693         return rc;
5694 }
5695
5696 /**
5697  * ipr_isr_eh - Interrupt service routine error handler
5698  * @ioa_cfg:    ioa config struct
5699  * @msg:        message to log
5700  *
5701  * Return value:
5702  *      none
5703  **/
5704 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5705 {
5706         ioa_cfg->errors_logged++;
5707         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5708
5709         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5710                 ioa_cfg->sdt_state = GET_DUMP;
5711
5712         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5713 }
5714
5715 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5716                                                 struct list_head *doneq)
5717 {
5718         u32 ioasc;
5719         u16 cmd_index;
5720         struct ipr_cmnd *ipr_cmd;
5721         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5722         int num_hrrq = 0;
5723
5724         /* If interrupts are disabled, ignore the interrupt */
5725         if (!hrr_queue->allow_interrupts)
5726                 return 0;
5727
5728         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5729                hrr_queue->toggle_bit) {
5730
5731                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5732                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5733                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5734
5735                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5736                              cmd_index < hrr_queue->min_cmd_id)) {
5737                         ipr_isr_eh(ioa_cfg,
5738                                 "Invalid response handle from IOA: ",
5739                                 cmd_index);
5740                         break;
5741                 }
5742
5743                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5744                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5745
5746                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5747
5748                 list_move_tail(&ipr_cmd->queue, doneq);
5749
5750                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5751                         hrr_queue->hrrq_curr++;
5752                 } else {
5753                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5754                         hrr_queue->toggle_bit ^= 1u;
5755                 }
5756                 num_hrrq++;
5757                 if (budget > 0 && num_hrrq >= budget)
5758                         break;
5759         }
5760
5761         return num_hrrq;
5762 }
5763
5764 static int ipr_iopoll(struct irq_poll *iop, int budget)
5765 {
5766         struct ipr_ioa_cfg *ioa_cfg;
5767         struct ipr_hrr_queue *hrrq;
5768         struct ipr_cmnd *ipr_cmd, *temp;
5769         unsigned long hrrq_flags;
5770         int completed_ops;
5771         LIST_HEAD(doneq);
5772
5773         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5774         ioa_cfg = hrrq->ioa_cfg;
5775
5776         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5777         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5778
5779         if (completed_ops < budget)
5780                 irq_poll_complete(iop);
5781         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5782
5783         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5784                 list_del(&ipr_cmd->queue);
5785                 del_timer(&ipr_cmd->timer);
5786                 ipr_cmd->fast_done(ipr_cmd);
5787         }
5788
5789         return completed_ops;
5790 }
5791
5792 /**
5793  * ipr_isr - Interrupt service routine
5794  * @irq:        irq number
5795  * @devp:       pointer to ioa config struct
5796  *
5797  * Return value:
5798  *      IRQ_NONE / IRQ_HANDLED
5799  **/
5800 static irqreturn_t ipr_isr(int irq, void *devp)
5801 {
5802         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5803         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5804         unsigned long hrrq_flags = 0;
5805         u32 int_reg = 0;
5806         int num_hrrq = 0;
5807         int irq_none = 0;
5808         struct ipr_cmnd *ipr_cmd, *temp;
5809         irqreturn_t rc = IRQ_NONE;
5810         LIST_HEAD(doneq);
5811
5812         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5813         /* If interrupts are disabled, ignore the interrupt */
5814         if (!hrrq->allow_interrupts) {
5815                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5816                 return IRQ_NONE;
5817         }
5818
5819         while (1) {
5820                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5821                         rc =  IRQ_HANDLED;
5822
5823                         if (!ioa_cfg->clear_isr)
5824                                 break;
5825
5826                         /* Clear the PCI interrupt */
5827                         num_hrrq = 0;
5828                         do {
5829                                 writel(IPR_PCII_HRRQ_UPDATED,
5830                                      ioa_cfg->regs.clr_interrupt_reg32);
5831                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5832                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5833                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5834
5835                 } else if (rc == IRQ_NONE && irq_none == 0) {
5836                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5837                         irq_none++;
5838                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5839                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5840                         ipr_isr_eh(ioa_cfg,
5841                                 "Error clearing HRRQ: ", num_hrrq);
5842                         rc = IRQ_HANDLED;
5843                         break;
5844                 } else
5845                         break;
5846         }
5847
5848         if (unlikely(rc == IRQ_NONE))
5849                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5850
5851         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5852         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5853                 list_del(&ipr_cmd->queue);
5854                 del_timer(&ipr_cmd->timer);
5855                 ipr_cmd->fast_done(ipr_cmd);
5856         }
5857         return rc;
5858 }
5859
5860 /**
5861  * ipr_isr_mhrrq - Interrupt service routine
5862  * @irq:        irq number
5863  * @devp:       pointer to ioa config struct
5864  *
5865  * Return value:
5866  *      IRQ_NONE / IRQ_HANDLED
5867  **/
5868 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5869 {
5870         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5871         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5872         unsigned long hrrq_flags = 0;
5873         struct ipr_cmnd *ipr_cmd, *temp;
5874         irqreturn_t rc = IRQ_NONE;
5875         LIST_HEAD(doneq);
5876
5877         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5878
5879         /* If interrupts are disabled, ignore the interrupt */
5880         if (!hrrq->allow_interrupts) {
5881                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5882                 return IRQ_NONE;
5883         }
5884
5885         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5886                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5887                        hrrq->toggle_bit) {
5888                         irq_poll_sched(&hrrq->iopoll);
5889                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5890                         return IRQ_HANDLED;
5891                 }
5892         } else {
5893                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5894                         hrrq->toggle_bit)
5895
5896                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5897                                 rc =  IRQ_HANDLED;
5898         }
5899
5900         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5901
5902         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5903                 list_del(&ipr_cmd->queue);
5904                 del_timer(&ipr_cmd->timer);
5905                 ipr_cmd->fast_done(ipr_cmd);
5906         }
5907         return rc;
5908 }
5909
5910 /**
5911  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5912  * @ioa_cfg:    ioa config struct
5913  * @ipr_cmd:    ipr command struct
5914  *
5915  * Return value:
5916  *      0 on success / -1 on failure
5917  **/
5918 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5919                              struct ipr_cmnd *ipr_cmd)
5920 {
5921         int i, nseg;
5922         struct scatterlist *sg;
5923         u32 length;
5924         u32 ioadl_flags = 0;
5925         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5926         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5927         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5928
5929         length = scsi_bufflen(scsi_cmd);
5930         if (!length)
5931                 return 0;
5932
5933         nseg = scsi_dma_map(scsi_cmd);
5934         if (nseg < 0) {
5935                 if (printk_ratelimit())
5936                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5937                 return -1;
5938         }
5939
5940         ipr_cmd->dma_use_sg = nseg;
5941
5942         ioarcb->data_transfer_length = cpu_to_be32(length);
5943         ioarcb->ioadl_len =
5944                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5945
5946         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5947                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5948                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5949         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5950                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5951
5952         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5953                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5954                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5955                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5956         }
5957
5958         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5959         return 0;
5960 }
5961
5962 /**
5963  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5964  * @ioa_cfg:    ioa config struct
5965  * @ipr_cmd:    ipr command struct
5966  *
5967  * Return value:
5968  *      0 on success / -1 on failure
5969  **/
5970 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5971                            struct ipr_cmnd *ipr_cmd)
5972 {
5973         int i, nseg;
5974         struct scatterlist *sg;
5975         u32 length;
5976         u32 ioadl_flags = 0;
5977         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5978         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5979         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5980
5981         length = scsi_bufflen(scsi_cmd);
5982         if (!length)
5983                 return 0;
5984
5985         nseg = scsi_dma_map(scsi_cmd);
5986         if (nseg < 0) {
5987                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5988                 return -1;
5989         }
5990
5991         ipr_cmd->dma_use_sg = nseg;
5992
5993         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5994                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5995                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5996                 ioarcb->data_transfer_length = cpu_to_be32(length);
5997                 ioarcb->ioadl_len =
5998                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5999         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6000                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6001                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6002                 ioarcb->read_ioadl_len =
6003                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6004         }
6005
6006         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6007                 ioadl = ioarcb->u.add_data.u.ioadl;
6008                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6009                                     offsetof(struct ipr_ioarcb, u.add_data));
6010                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6011         }
6012
6013         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6014                 ioadl[i].flags_and_data_len =
6015                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6016                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6017         }
6018
6019         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6020         return 0;
6021 }
6022
6023 /**
6024  * __ipr_erp_done - Process completion of ERP for a device
6025  * @ipr_cmd:            ipr command struct
6026  *
6027  * This function copies the sense buffer into the scsi_cmd
6028  * struct and pushes the scsi_done function.
6029  *
6030  * Return value:
6031  *      nothing
6032  **/
6033 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6034 {
6035         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6036         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6037         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6038
6039         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6040                 scsi_cmd->result |= (DID_ERROR << 16);
6041                 scmd_printk(KERN_ERR, scsi_cmd,
6042                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6043         } else {
6044                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6045                        SCSI_SENSE_BUFFERSIZE);
6046         }
6047
6048         if (res) {
6049                 if (!ipr_is_naca_model(res))
6050                         res->needs_sync_complete = 1;
6051                 res->in_erp = 0;
6052         }
6053         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6054         scsi_cmd->scsi_done(scsi_cmd);
6055         if (ipr_cmd->eh_comp)
6056                 complete(ipr_cmd->eh_comp);
6057         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6058 }
6059
6060 /**
6061  * ipr_erp_done - Process completion of ERP for a device
6062  * @ipr_cmd:            ipr command struct
6063  *
6064  * This function copies the sense buffer into the scsi_cmd
6065  * struct and pushes the scsi_done function.
6066  *
6067  * Return value:
6068  *      nothing
6069  **/
6070 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6071 {
6072         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6073         unsigned long hrrq_flags;
6074
6075         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6076         __ipr_erp_done(ipr_cmd);
6077         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6078 }
6079
6080 /**
6081  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6082  * @ipr_cmd:    ipr command struct
6083  *
6084  * Return value:
6085  *      none
6086  **/
6087 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6088 {
6089         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6090         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6091         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6092
6093         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6094         ioarcb->data_transfer_length = 0;
6095         ioarcb->read_data_transfer_length = 0;
6096         ioarcb->ioadl_len = 0;
6097         ioarcb->read_ioadl_len = 0;
6098         ioasa->hdr.ioasc = 0;
6099         ioasa->hdr.residual_data_len = 0;
6100
6101         if (ipr_cmd->ioa_cfg->sis64)
6102                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6103                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6104         else {
6105                 ioarcb->write_ioadl_addr =
6106                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6107                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6108         }
6109 }
6110
6111 /**
6112  * __ipr_erp_request_sense - Send request sense to a device
6113  * @ipr_cmd:    ipr command struct
6114  *
6115  * This function sends a request sense to a device as a result
6116  * of a check condition.
6117  *
6118  * Return value:
6119  *      nothing
6120  **/
6121 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6122 {
6123         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6124         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6125
6126         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6127                 __ipr_erp_done(ipr_cmd);
6128                 return;
6129         }
6130
6131         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6132
6133         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6134         cmd_pkt->cdb[0] = REQUEST_SENSE;
6135         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6136         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6137         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6138         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6139
6140         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6141                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6142
6143         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6144                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6145 }
6146
6147 /**
6148  * ipr_erp_request_sense - Send request sense to a device
6149  * @ipr_cmd:    ipr command struct
6150  *
6151  * This function sends a request sense to a device as a result
6152  * of a check condition.
6153  *
6154  * Return value:
6155  *      nothing
6156  **/
6157 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6158 {
6159         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6160         unsigned long hrrq_flags;
6161
6162         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6163         __ipr_erp_request_sense(ipr_cmd);
6164         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6165 }
6166
6167 /**
6168  * ipr_erp_cancel_all - Send cancel all to a device
6169  * @ipr_cmd:    ipr command struct
6170  *
6171  * This function sends a cancel all to a device to clear the
6172  * queue. If we are running TCQ on the device, QERR is set to 1,
6173  * which means all outstanding ops have been dropped on the floor.
6174  * Cancel all will return them to us.
6175  *
6176  * Return value:
6177  *      nothing
6178  **/
6179 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6180 {
6181         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6182         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6183         struct ipr_cmd_pkt *cmd_pkt;
6184
6185         res->in_erp = 1;
6186
6187         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6188
6189         if (!scsi_cmd->device->simple_tags) {
6190                 __ipr_erp_request_sense(ipr_cmd);
6191                 return;
6192         }
6193
6194         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6195         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6196         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6197
6198         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6199                    IPR_CANCEL_ALL_TIMEOUT);
6200 }
6201
6202 /**
6203  * ipr_dump_ioasa - Dump contents of IOASA
6204  * @ioa_cfg:    ioa config struct
6205  * @ipr_cmd:    ipr command struct
6206  * @res:                resource entry struct
6207  *
6208  * This function is invoked by the interrupt handler when ops
6209  * fail. It will log the IOASA if appropriate. Only called
6210  * for GPDD ops.
6211  *
6212  * Return value:
6213  *      none
6214  **/
6215 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6216                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6217 {
6218         int i;
6219         u16 data_len;
6220         u32 ioasc, fd_ioasc;
6221         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6222         __be32 *ioasa_data = (__be32 *)ioasa;
6223         int error_index;
6224
6225         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6226         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6227
6228         if (0 == ioasc)
6229                 return;
6230
6231         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6232                 return;
6233
6234         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6235                 error_index = ipr_get_error(fd_ioasc);
6236         else
6237                 error_index = ipr_get_error(ioasc);
6238
6239         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6240                 /* Don't log an error if the IOA already logged one */
6241                 if (ioasa->hdr.ilid != 0)
6242                         return;
6243
6244                 if (!ipr_is_gscsi(res))
6245                         return;
6246
6247                 if (ipr_error_table[error_index].log_ioasa == 0)
6248                         return;
6249         }
6250
6251         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6252
6253         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6254         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6255                 data_len = sizeof(struct ipr_ioasa64);
6256         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6257                 data_len = sizeof(struct ipr_ioasa);
6258
6259         ipr_err("IOASA Dump:\n");
6260
6261         for (i = 0; i < data_len / 4; i += 4) {
6262                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6263                         be32_to_cpu(ioasa_data[i]),
6264                         be32_to_cpu(ioasa_data[i+1]),
6265                         be32_to_cpu(ioasa_data[i+2]),
6266                         be32_to_cpu(ioasa_data[i+3]));
6267         }
6268 }
6269
6270 /**
6271  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6272  * @ioasa:              IOASA
6273  * @sense_buf:  sense data buffer
6274  *
6275  * Return value:
6276  *      none
6277  **/
6278 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6279 {
6280         u32 failing_lba;
6281         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6282         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6283         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6284         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6285
6286         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6287
6288         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6289                 return;
6290
6291         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6292
6293         if (ipr_is_vset_device(res) &&
6294             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6295             ioasa->u.vset.failing_lba_hi != 0) {
6296                 sense_buf[0] = 0x72;
6297                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6298                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6299                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6300
6301                 sense_buf[7] = 12;
6302                 sense_buf[8] = 0;
6303                 sense_buf[9] = 0x0A;
6304                 sense_buf[10] = 0x80;
6305
6306                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6307
6308                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6309                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6310                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6311                 sense_buf[15] = failing_lba & 0x000000ff;
6312
6313                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6314
6315                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6316                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6317                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6318                 sense_buf[19] = failing_lba & 0x000000ff;
6319         } else {
6320                 sense_buf[0] = 0x70;
6321                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6322                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6323                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6324
6325                 /* Illegal request */
6326                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6327                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6328                         sense_buf[7] = 10;      /* additional length */
6329
6330                         /* IOARCB was in error */
6331                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6332                                 sense_buf[15] = 0xC0;
6333                         else    /* Parameter data was invalid */
6334                                 sense_buf[15] = 0x80;
6335
6336                         sense_buf[16] =
6337                             ((IPR_FIELD_POINTER_MASK &
6338                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6339                         sense_buf[17] =
6340                             (IPR_FIELD_POINTER_MASK &
6341                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6342                 } else {
6343                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6344                                 if (ipr_is_vset_device(res))
6345                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6346                                 else
6347                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6348
6349                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6350                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6351                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6352                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6353                                 sense_buf[6] = failing_lba & 0x000000ff;
6354                         }
6355
6356                         sense_buf[7] = 6;       /* additional length */
6357                 }
6358         }
6359 }
6360
6361 /**
6362  * ipr_get_autosense - Copy autosense data to sense buffer
6363  * @ipr_cmd:    ipr command struct
6364  *
6365  * This function copies the autosense buffer to the buffer
6366  * in the scsi_cmd, if there is autosense available.
6367  *
6368  * Return value:
6369  *      1 if autosense was available / 0 if not
6370  **/
6371 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6372 {
6373         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6374         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6375
6376         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6377                 return 0;
6378
6379         if (ipr_cmd->ioa_cfg->sis64)
6380                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6381                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6382                            SCSI_SENSE_BUFFERSIZE));
6383         else
6384                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6385                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6386                            SCSI_SENSE_BUFFERSIZE));
6387         return 1;
6388 }
6389
6390 /**
6391  * ipr_erp_start - Process an error response for a SCSI op
6392  * @ioa_cfg:    ioa config struct
6393  * @ipr_cmd:    ipr command struct
6394  *
6395  * This function determines whether or not to initiate ERP
6396  * on the affected device.
6397  *
6398  * Return value:
6399  *      nothing
6400  **/
6401 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6402                               struct ipr_cmnd *ipr_cmd)
6403 {
6404         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6405         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6406         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6407         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6408
6409         if (!res) {
6410                 __ipr_scsi_eh_done(ipr_cmd);
6411                 return;
6412         }
6413
6414         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6415                 ipr_gen_sense(ipr_cmd);
6416
6417         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6418
6419         switch (masked_ioasc) {
6420         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6421                 if (ipr_is_naca_model(res))
6422                         scsi_cmd->result |= (DID_ABORT << 16);
6423                 else
6424                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6425                 break;
6426         case IPR_IOASC_IR_RESOURCE_HANDLE:
6427         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6428                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6429                 break;
6430         case IPR_IOASC_HW_SEL_TIMEOUT:
6431                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6432                 if (!ipr_is_naca_model(res))
6433                         res->needs_sync_complete = 1;
6434                 break;
6435         case IPR_IOASC_SYNC_REQUIRED:
6436                 if (!res->in_erp)
6437                         res->needs_sync_complete = 1;
6438                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6439                 break;
6440         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6441         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6442                 /*
6443                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6444                  * so SCSI mid-layer and upper layers handle it accordingly.
6445                  */
6446                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6447                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6448                 break;
6449         case IPR_IOASC_BUS_WAS_RESET:
6450         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6451                 /*
6452                  * Report the bus reset and ask for a retry. The device
6453                  * will give CC/UA the next command.
6454                  */
6455                 if (!res->resetting_device)
6456                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6457                 scsi_cmd->result |= (DID_ERROR << 16);
6458                 if (!ipr_is_naca_model(res))
6459                         res->needs_sync_complete = 1;
6460                 break;
6461         case IPR_IOASC_HW_DEV_BUS_STATUS:
6462                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6463                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6464                         if (!ipr_get_autosense(ipr_cmd)) {
6465                                 if (!ipr_is_naca_model(res)) {
6466                                         ipr_erp_cancel_all(ipr_cmd);
6467                                         return;
6468                                 }
6469                         }
6470                 }
6471                 if (!ipr_is_naca_model(res))
6472                         res->needs_sync_complete = 1;
6473                 break;
6474         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6475                 break;
6476         case IPR_IOASC_IR_NON_OPTIMIZED:
6477                 if (res->raw_mode) {
6478                         res->raw_mode = 0;
6479                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6480                 } else
6481                         scsi_cmd->result |= (DID_ERROR << 16);
6482                 break;
6483         default:
6484                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6485                         scsi_cmd->result |= (DID_ERROR << 16);
6486                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6487                         res->needs_sync_complete = 1;
6488                 break;
6489         }
6490
6491         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6492         scsi_cmd->scsi_done(scsi_cmd);
6493         if (ipr_cmd->eh_comp)
6494                 complete(ipr_cmd->eh_comp);
6495         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6496 }
6497
6498 /**
6499  * ipr_scsi_done - mid-layer done function
6500  * @ipr_cmd:    ipr command struct
6501  *
6502  * This function is invoked by the interrupt handler for
6503  * ops generated by the SCSI mid-layer
6504  *
6505  * Return value:
6506  *      none
6507  **/
6508 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6509 {
6510         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6511         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6512         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6513         unsigned long lock_flags;
6514
6515         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6516
6517         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6518                 scsi_dma_unmap(scsi_cmd);
6519
6520                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6521                 scsi_cmd->scsi_done(scsi_cmd);
6522                 if (ipr_cmd->eh_comp)
6523                         complete(ipr_cmd->eh_comp);
6524                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6525                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6526         } else {
6527                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6528                 spin_lock(&ipr_cmd->hrrq->_lock);
6529                 ipr_erp_start(ioa_cfg, ipr_cmd);
6530                 spin_unlock(&ipr_cmd->hrrq->_lock);
6531                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6532         }
6533 }
6534
6535 /**
6536  * ipr_queuecommand - Queue a mid-layer request
6537  * @shost:              scsi host struct
6538  * @scsi_cmd:   scsi command struct
6539  *
6540  * This function queues a request generated by the mid-layer.
6541  *
6542  * Return value:
6543  *      0 on success
6544  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6545  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6546  **/
6547 static int ipr_queuecommand(struct Scsi_Host *shost,
6548                             struct scsi_cmnd *scsi_cmd)
6549 {
6550         struct ipr_ioa_cfg *ioa_cfg;
6551         struct ipr_resource_entry *res;
6552         struct ipr_ioarcb *ioarcb;
6553         struct ipr_cmnd *ipr_cmd;
6554         unsigned long hrrq_flags, lock_flags;
6555         int rc;
6556         struct ipr_hrr_queue *hrrq;
6557         int hrrq_id;
6558
6559         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6560
6561         scsi_cmd->result = (DID_OK << 16);
6562         res = scsi_cmd->device->hostdata;
6563
6564         if (ipr_is_gata(res) && res->sata_port) {
6565                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6566                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6567                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6568                 return rc;
6569         }
6570
6571         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6572         hrrq = &ioa_cfg->hrrq[hrrq_id];
6573
6574         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6575         /*
6576          * We are currently blocking all devices due to a host reset
6577          * We have told the host to stop giving us new requests, but
6578          * ERP ops don't count. FIXME
6579          */
6580         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6581                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6582                 return SCSI_MLQUEUE_HOST_BUSY;
6583         }
6584
6585         /*
6586          * FIXME - Create scsi_set_host_offline interface
6587          *  and the ioa_is_dead check can be removed
6588          */
6589         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6590                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6591                 goto err_nodev;
6592         }
6593
6594         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6595         if (ipr_cmd == NULL) {
6596                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6597                 return SCSI_MLQUEUE_HOST_BUSY;
6598         }
6599         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6600
6601         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6602         ioarcb = &ipr_cmd->ioarcb;
6603
6604         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6605         ipr_cmd->scsi_cmd = scsi_cmd;
6606         ipr_cmd->done = ipr_scsi_eh_done;
6607
6608         if (ipr_is_gscsi(res)) {
6609                 if (scsi_cmd->underflow == 0)
6610                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6611
6612                 if (res->reset_occurred) {
6613                         res->reset_occurred = 0;
6614                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6615                 }
6616         }
6617
6618         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6619                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6620
6621                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6622                 if (scsi_cmd->flags & SCMD_TAGGED)
6623                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6624                 else
6625                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6626         }
6627
6628         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6629             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6630                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6631         }
6632         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6633                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6634
6635                 if (scsi_cmd->underflow == 0)
6636                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6637         }
6638
6639         if (ioa_cfg->sis64)
6640                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6641         else
6642                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6643
6644         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6645         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6646                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6647                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6648                 if (!rc)
6649                         scsi_dma_unmap(scsi_cmd);
6650                 return SCSI_MLQUEUE_HOST_BUSY;
6651         }
6652
6653         if (unlikely(hrrq->ioa_is_dead)) {
6654                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6655                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6656                 scsi_dma_unmap(scsi_cmd);
6657                 goto err_nodev;
6658         }
6659
6660         ioarcb->res_handle = res->res_handle;
6661         if (res->needs_sync_complete) {
6662                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6663                 res->needs_sync_complete = 0;
6664         }
6665         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6666         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6667         ipr_send_command(ipr_cmd);
6668         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6669         return 0;
6670
6671 err_nodev:
6672         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6673         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6674         scsi_cmd->result = (DID_NO_CONNECT << 16);
6675         scsi_cmd->scsi_done(scsi_cmd);
6676         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6677         return 0;
6678 }
6679
6680 /**
6681  * ipr_ioctl - IOCTL handler
6682  * @sdev:       scsi device struct
6683  * @cmd:        IOCTL cmd
6684  * @arg:        IOCTL arg
6685  *
6686  * Return value:
6687  *      0 on success / other on failure
6688  **/
6689 static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
6690                      void __user *arg)
6691 {
6692         struct ipr_resource_entry *res;
6693
6694         res = (struct ipr_resource_entry *)sdev->hostdata;
6695         if (res && ipr_is_gata(res)) {
6696                 if (cmd == HDIO_GET_IDENTITY)
6697                         return -ENOTTY;
6698                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6699         }
6700
6701         return -EINVAL;
6702 }
6703
6704 /**
6705  * ipr_info - Get information about the card/driver
6706  * @scsi_host:  scsi host struct
6707  *
6708  * Return value:
6709  *      pointer to buffer with description string
6710  **/
6711 static const char *ipr_ioa_info(struct Scsi_Host *host)
6712 {
6713         static char buffer[512];
6714         struct ipr_ioa_cfg *ioa_cfg;
6715         unsigned long lock_flags = 0;
6716
6717         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6718
6719         spin_lock_irqsave(host->host_lock, lock_flags);
6720         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6721         spin_unlock_irqrestore(host->host_lock, lock_flags);
6722
6723         return buffer;
6724 }
6725
6726 static struct scsi_host_template driver_template = {
6727         .module = THIS_MODULE,
6728         .name = "IPR",
6729         .info = ipr_ioa_info,
6730         .ioctl = ipr_ioctl,
6731         .queuecommand = ipr_queuecommand,
6732         .eh_abort_handler = ipr_eh_abort,
6733         .eh_device_reset_handler = ipr_eh_dev_reset,
6734         .eh_host_reset_handler = ipr_eh_host_reset,
6735         .slave_alloc = ipr_slave_alloc,
6736         .slave_configure = ipr_slave_configure,
6737         .slave_destroy = ipr_slave_destroy,
6738         .scan_finished = ipr_scan_finished,
6739         .target_alloc = ipr_target_alloc,
6740         .target_destroy = ipr_target_destroy,
6741         .change_queue_depth = ipr_change_queue_depth,
6742         .bios_param = ipr_biosparam,
6743         .can_queue = IPR_MAX_COMMANDS,
6744         .this_id = -1,
6745         .sg_tablesize = IPR_MAX_SGLIST,
6746         .max_sectors = IPR_IOA_MAX_SECTORS,
6747         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6748         .shost_attrs = ipr_ioa_attrs,
6749         .sdev_attrs = ipr_dev_attrs,
6750         .proc_name = IPR_NAME,
6751 };
6752
6753 /**
6754  * ipr_ata_phy_reset - libata phy_reset handler
6755  * @ap:         ata port to reset
6756  *
6757  **/
6758 static void ipr_ata_phy_reset(struct ata_port *ap)
6759 {
6760         unsigned long flags;
6761         struct ipr_sata_port *sata_port = ap->private_data;
6762         struct ipr_resource_entry *res = sata_port->res;
6763         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6764         int rc;
6765
6766         ENTER;
6767         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6768         while (ioa_cfg->in_reset_reload) {
6769                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6770                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6771                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6772         }
6773
6774         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6775                 goto out_unlock;
6776
6777         rc = ipr_device_reset(ioa_cfg, res);
6778
6779         if (rc) {
6780                 ap->link.device[0].class = ATA_DEV_NONE;
6781                 goto out_unlock;
6782         }
6783
6784         ap->link.device[0].class = res->ata_class;
6785         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6786                 ap->link.device[0].class = ATA_DEV_NONE;
6787
6788 out_unlock:
6789         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6790         LEAVE;
6791 }
6792
6793 /**
6794  * ipr_ata_post_internal - Cleanup after an internal command
6795  * @qc: ATA queued command
6796  *
6797  * Return value:
6798  *      none
6799  **/
6800 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6801 {
6802         struct ipr_sata_port *sata_port = qc->ap->private_data;
6803         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6804         struct ipr_cmnd *ipr_cmd;
6805         struct ipr_hrr_queue *hrrq;
6806         unsigned long flags;
6807
6808         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6809         while (ioa_cfg->in_reset_reload) {
6810                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6811                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6812                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6813         }
6814
6815         for_each_hrrq(hrrq, ioa_cfg) {
6816                 spin_lock(&hrrq->_lock);
6817                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6818                         if (ipr_cmd->qc == qc) {
6819                                 ipr_device_reset(ioa_cfg, sata_port->res);
6820                                 break;
6821                         }
6822                 }
6823                 spin_unlock(&hrrq->_lock);
6824         }
6825         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6826 }
6827
6828 /**
6829  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6830  * @regs:       destination
6831  * @tf: source ATA taskfile
6832  *
6833  * Return value:
6834  *      none
6835  **/
6836 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6837                              struct ata_taskfile *tf)
6838 {
6839         regs->feature = tf->feature;
6840         regs->nsect = tf->nsect;
6841         regs->lbal = tf->lbal;
6842         regs->lbam = tf->lbam;
6843         regs->lbah = tf->lbah;
6844         regs->device = tf->device;
6845         regs->command = tf->command;
6846         regs->hob_feature = tf->hob_feature;
6847         regs->hob_nsect = tf->hob_nsect;
6848         regs->hob_lbal = tf->hob_lbal;
6849         regs->hob_lbam = tf->hob_lbam;
6850         regs->hob_lbah = tf->hob_lbah;
6851         regs->ctl = tf->ctl;
6852 }
6853
6854 /**
6855  * ipr_sata_done - done function for SATA commands
6856  * @ipr_cmd:    ipr command struct
6857  *
6858  * This function is invoked by the interrupt handler for
6859  * ops generated by the SCSI mid-layer to SATA devices
6860  *
6861  * Return value:
6862  *      none
6863  **/
6864 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6865 {
6866         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6867         struct ata_queued_cmd *qc = ipr_cmd->qc;
6868         struct ipr_sata_port *sata_port = qc->ap->private_data;
6869         struct ipr_resource_entry *res = sata_port->res;
6870         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6871
6872         spin_lock(&ipr_cmd->hrrq->_lock);
6873         if (ipr_cmd->ioa_cfg->sis64)
6874                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6875                        sizeof(struct ipr_ioasa_gata));
6876         else
6877                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6878                        sizeof(struct ipr_ioasa_gata));
6879         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6880
6881         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6882                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6883
6884         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6885                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6886         else
6887                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6888         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6889         spin_unlock(&ipr_cmd->hrrq->_lock);
6890         ata_qc_complete(qc);
6891 }
6892
6893 /**
6894  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6895  * @ipr_cmd:    ipr command struct
6896  * @qc:         ATA queued command
6897  *
6898  **/
6899 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6900                                   struct ata_queued_cmd *qc)
6901 {
6902         u32 ioadl_flags = 0;
6903         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6904         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6905         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6906         int len = qc->nbytes;
6907         struct scatterlist *sg;
6908         unsigned int si;
6909         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6910
6911         if (len == 0)
6912                 return;
6913
6914         if (qc->dma_dir == DMA_TO_DEVICE) {
6915                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6916                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6917         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6918                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6919
6920         ioarcb->data_transfer_length = cpu_to_be32(len);
6921         ioarcb->ioadl_len =
6922                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6923         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6924                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6925
6926         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6927                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6928                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6929                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6930
6931                 last_ioadl64 = ioadl64;
6932                 ioadl64++;
6933         }
6934
6935         if (likely(last_ioadl64))
6936                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6937 }
6938
6939 /**
6940  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6941  * @ipr_cmd:    ipr command struct
6942  * @qc:         ATA queued command
6943  *
6944  **/
6945 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6946                                 struct ata_queued_cmd *qc)
6947 {
6948         u32 ioadl_flags = 0;
6949         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6950         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6951         struct ipr_ioadl_desc *last_ioadl = NULL;
6952         int len = qc->nbytes;
6953         struct scatterlist *sg;
6954         unsigned int si;
6955
6956         if (len == 0)
6957                 return;
6958
6959         if (qc->dma_dir == DMA_TO_DEVICE) {
6960                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6961                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6962                 ioarcb->data_transfer_length = cpu_to_be32(len);
6963                 ioarcb->ioadl_len =
6964                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6965         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6966                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6967                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6968                 ioarcb->read_ioadl_len =
6969                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6970         }
6971
6972         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6973                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6974                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6975
6976                 last_ioadl = ioadl;
6977                 ioadl++;
6978         }
6979
6980         if (likely(last_ioadl))
6981                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6982 }
6983
6984 /**
6985  * ipr_qc_defer - Get a free ipr_cmd
6986  * @qc: queued command
6987  *
6988  * Return value:
6989  *      0 if success
6990  **/
6991 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6992 {
6993         struct ata_port *ap = qc->ap;
6994         struct ipr_sata_port *sata_port = ap->private_data;
6995         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6996         struct ipr_cmnd *ipr_cmd;
6997         struct ipr_hrr_queue *hrrq;
6998         int hrrq_id;
6999
7000         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7001         hrrq = &ioa_cfg->hrrq[hrrq_id];
7002
7003         qc->lldd_task = NULL;
7004         spin_lock(&hrrq->_lock);
7005         if (unlikely(hrrq->ioa_is_dead)) {
7006                 spin_unlock(&hrrq->_lock);
7007                 return 0;
7008         }
7009
7010         if (unlikely(!hrrq->allow_cmds)) {
7011                 spin_unlock(&hrrq->_lock);
7012                 return ATA_DEFER_LINK;
7013         }
7014
7015         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7016         if (ipr_cmd == NULL) {
7017                 spin_unlock(&hrrq->_lock);
7018                 return ATA_DEFER_LINK;
7019         }
7020
7021         qc->lldd_task = ipr_cmd;
7022         spin_unlock(&hrrq->_lock);
7023         return 0;
7024 }
7025
7026 /**
7027  * ipr_qc_issue - Issue a SATA qc to a device
7028  * @qc: queued command
7029  *
7030  * Return value:
7031  *      0 if success
7032  **/
7033 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7034 {
7035         struct ata_port *ap = qc->ap;
7036         struct ipr_sata_port *sata_port = ap->private_data;
7037         struct ipr_resource_entry *res = sata_port->res;
7038         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7039         struct ipr_cmnd *ipr_cmd;
7040         struct ipr_ioarcb *ioarcb;
7041         struct ipr_ioarcb_ata_regs *regs;
7042
7043         if (qc->lldd_task == NULL)
7044                 ipr_qc_defer(qc);
7045
7046         ipr_cmd = qc->lldd_task;
7047         if (ipr_cmd == NULL)
7048                 return AC_ERR_SYSTEM;
7049
7050         qc->lldd_task = NULL;
7051         spin_lock(&ipr_cmd->hrrq->_lock);
7052         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7053                         ipr_cmd->hrrq->ioa_is_dead)) {
7054                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7055                 spin_unlock(&ipr_cmd->hrrq->_lock);
7056                 return AC_ERR_SYSTEM;
7057         }
7058
7059         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7060         ioarcb = &ipr_cmd->ioarcb;
7061
7062         if (ioa_cfg->sis64) {
7063                 regs = &ipr_cmd->i.ata_ioadl.regs;
7064                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7065         } else
7066                 regs = &ioarcb->u.add_data.u.regs;
7067
7068         memset(regs, 0, sizeof(*regs));
7069         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7070
7071         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7072         ipr_cmd->qc = qc;
7073         ipr_cmd->done = ipr_sata_done;
7074         ipr_cmd->ioarcb.res_handle = res->res_handle;
7075         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7076         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7077         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7078         ipr_cmd->dma_use_sg = qc->n_elem;
7079
7080         if (ioa_cfg->sis64)
7081                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7082         else
7083                 ipr_build_ata_ioadl(ipr_cmd, qc);
7084
7085         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7086         ipr_copy_sata_tf(regs, &qc->tf);
7087         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7088         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7089
7090         switch (qc->tf.protocol) {
7091         case ATA_PROT_NODATA:
7092         case ATA_PROT_PIO:
7093                 break;
7094
7095         case ATA_PROT_DMA:
7096                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7097                 break;
7098
7099         case ATAPI_PROT_PIO:
7100         case ATAPI_PROT_NODATA:
7101                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7102                 break;
7103
7104         case ATAPI_PROT_DMA:
7105                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7106                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7107                 break;
7108
7109         default:
7110                 WARN_ON(1);
7111                 spin_unlock(&ipr_cmd->hrrq->_lock);
7112                 return AC_ERR_INVALID;
7113         }
7114
7115         ipr_send_command(ipr_cmd);
7116         spin_unlock(&ipr_cmd->hrrq->_lock);
7117
7118         return 0;
7119 }
7120
7121 /**
7122  * ipr_qc_fill_rtf - Read result TF
7123  * @qc: ATA queued command
7124  *
7125  * Return value:
7126  *      true
7127  **/
7128 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7129 {
7130         struct ipr_sata_port *sata_port = qc->ap->private_data;
7131         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7132         struct ata_taskfile *tf = &qc->result_tf;
7133
7134         tf->feature = g->error;
7135         tf->nsect = g->nsect;
7136         tf->lbal = g->lbal;
7137         tf->lbam = g->lbam;
7138         tf->lbah = g->lbah;
7139         tf->device = g->device;
7140         tf->command = g->status;
7141         tf->hob_nsect = g->hob_nsect;
7142         tf->hob_lbal = g->hob_lbal;
7143         tf->hob_lbam = g->hob_lbam;
7144         tf->hob_lbah = g->hob_lbah;
7145
7146         return true;
7147 }
7148
7149 static struct ata_port_operations ipr_sata_ops = {
7150         .phy_reset = ipr_ata_phy_reset,
7151         .hardreset = ipr_sata_reset,
7152         .post_internal_cmd = ipr_ata_post_internal,
7153         .qc_prep = ata_noop_qc_prep,
7154         .qc_defer = ipr_qc_defer,
7155         .qc_issue = ipr_qc_issue,
7156         .qc_fill_rtf = ipr_qc_fill_rtf,
7157         .port_start = ata_sas_port_start,
7158         .port_stop = ata_sas_port_stop
7159 };
7160
7161 static struct ata_port_info sata_port_info = {
7162         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7163                           ATA_FLAG_SAS_HOST,
7164         .pio_mask       = ATA_PIO4_ONLY,
7165         .mwdma_mask     = ATA_MWDMA2,
7166         .udma_mask      = ATA_UDMA6,
7167         .port_ops       = &ipr_sata_ops
7168 };
7169
7170 #ifdef CONFIG_PPC_PSERIES
7171 static const u16 ipr_blocked_processors[] = {
7172         PVR_NORTHSTAR,
7173         PVR_PULSAR,
7174         PVR_POWER4,
7175         PVR_ICESTAR,
7176         PVR_SSTAR,
7177         PVR_POWER4p,
7178         PVR_630,
7179         PVR_630p
7180 };
7181
7182 /**
7183  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7184  * @ioa_cfg:    ioa cfg struct
7185  *
7186  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7187  * certain pSeries hardware. This function determines if the given
7188  * adapter is in one of these confgurations or not.
7189  *
7190  * Return value:
7191  *      1 if adapter is not supported / 0 if adapter is supported
7192  **/
7193 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7194 {
7195         int i;
7196
7197         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7198                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7199                         if (pvr_version_is(ipr_blocked_processors[i]))
7200                                 return 1;
7201                 }
7202         }
7203         return 0;
7204 }
7205 #else
7206 #define ipr_invalid_adapter(ioa_cfg) 0
7207 #endif
7208
7209 /**
7210  * ipr_ioa_bringdown_done - IOA bring down completion.
7211  * @ipr_cmd:    ipr command struct
7212  *
7213  * This function processes the completion of an adapter bring down.
7214  * It wakes any reset sleepers.
7215  *
7216  * Return value:
7217  *      IPR_RC_JOB_RETURN
7218  **/
7219 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7220 {
7221         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7222         int i;
7223
7224         ENTER;
7225         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7226                 ipr_trace;
7227                 ioa_cfg->scsi_unblock = 1;
7228                 schedule_work(&ioa_cfg->work_q);
7229         }
7230
7231         ioa_cfg->in_reset_reload = 0;
7232         ioa_cfg->reset_retries = 0;
7233         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7234                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7235                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7236                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7237         }
7238         wmb();
7239
7240         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7241         wake_up_all(&ioa_cfg->reset_wait_q);
7242         LEAVE;
7243
7244         return IPR_RC_JOB_RETURN;
7245 }
7246
7247 /**
7248  * ipr_ioa_reset_done - IOA reset completion.
7249  * @ipr_cmd:    ipr command struct
7250  *
7251  * This function processes the completion of an adapter reset.
7252  * It schedules any necessary mid-layer add/removes and
7253  * wakes any reset sleepers.
7254  *
7255  * Return value:
7256  *      IPR_RC_JOB_RETURN
7257  **/
7258 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7259 {
7260         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7261         struct ipr_resource_entry *res;
7262         int j;
7263
7264         ENTER;
7265         ioa_cfg->in_reset_reload = 0;
7266         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7267                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7268                 ioa_cfg->hrrq[j].allow_cmds = 1;
7269                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7270         }
7271         wmb();
7272         ioa_cfg->reset_cmd = NULL;
7273         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7274
7275         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7276                 if (res->add_to_ml || res->del_from_ml) {
7277                         ipr_trace;
7278                         break;
7279                 }
7280         }
7281         schedule_work(&ioa_cfg->work_q);
7282
7283         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7284                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7285                 if (j < IPR_NUM_LOG_HCAMS)
7286                         ipr_send_hcam(ioa_cfg,
7287                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7288                                 ioa_cfg->hostrcb[j]);
7289                 else
7290                         ipr_send_hcam(ioa_cfg,
7291                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7292                                 ioa_cfg->hostrcb[j]);
7293         }
7294
7295         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7296         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7297
7298         ioa_cfg->reset_retries = 0;
7299         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7300         wake_up_all(&ioa_cfg->reset_wait_q);
7301
7302         ioa_cfg->scsi_unblock = 1;
7303         schedule_work(&ioa_cfg->work_q);
7304         LEAVE;
7305         return IPR_RC_JOB_RETURN;
7306 }
7307
7308 /**
7309  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7310  * @supported_dev:      supported device struct
7311  * @vpids:                      vendor product id struct
7312  *
7313  * Return value:
7314  *      none
7315  **/
7316 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7317                                  struct ipr_std_inq_vpids *vpids)
7318 {
7319         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7320         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7321         supported_dev->num_records = 1;
7322         supported_dev->data_length =
7323                 cpu_to_be16(sizeof(struct ipr_supported_device));
7324         supported_dev->reserved = 0;
7325 }
7326
7327 /**
7328  * ipr_set_supported_devs - Send Set Supported Devices for a device
7329  * @ipr_cmd:    ipr command struct
7330  *
7331  * This function sends a Set Supported Devices to the adapter
7332  *
7333  * Return value:
7334  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7335  **/
7336 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7337 {
7338         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7339         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7340         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7341         struct ipr_resource_entry *res = ipr_cmd->u.res;
7342
7343         ipr_cmd->job_step = ipr_ioa_reset_done;
7344
7345         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7346                 if (!ipr_is_scsi_disk(res))
7347                         continue;
7348
7349                 ipr_cmd->u.res = res;
7350                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7351
7352                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7353                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7354                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7355
7356                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7357                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7358                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7359                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7360
7361                 ipr_init_ioadl(ipr_cmd,
7362                                ioa_cfg->vpd_cbs_dma +
7363                                  offsetof(struct ipr_misc_cbs, supp_dev),
7364                                sizeof(struct ipr_supported_device),
7365                                IPR_IOADL_FLAGS_WRITE_LAST);
7366
7367                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7368                            IPR_SET_SUP_DEVICE_TIMEOUT);
7369
7370                 if (!ioa_cfg->sis64)
7371                         ipr_cmd->job_step = ipr_set_supported_devs;
7372                 LEAVE;
7373                 return IPR_RC_JOB_RETURN;
7374         }
7375
7376         LEAVE;
7377         return IPR_RC_JOB_CONTINUE;
7378 }
7379
7380 /**
7381  * ipr_get_mode_page - Locate specified mode page
7382  * @mode_pages: mode page buffer
7383  * @page_code:  page code to find
7384  * @len:                minimum required length for mode page
7385  *
7386  * Return value:
7387  *      pointer to mode page / NULL on failure
7388  **/
7389 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7390                                u32 page_code, u32 len)
7391 {
7392         struct ipr_mode_page_hdr *mode_hdr;
7393         u32 page_length;
7394         u32 length;
7395
7396         if (!mode_pages || (mode_pages->hdr.length == 0))
7397                 return NULL;
7398
7399         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7400         mode_hdr = (struct ipr_mode_page_hdr *)
7401                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7402
7403         while (length) {
7404                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7405                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7406                                 return mode_hdr;
7407                         break;
7408                 } else {
7409                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7410                                        mode_hdr->page_length);
7411                         length -= page_length;
7412                         mode_hdr = (struct ipr_mode_page_hdr *)
7413                                 ((unsigned long)mode_hdr + page_length);
7414                 }
7415         }
7416         return NULL;
7417 }
7418
7419 /**
7420  * ipr_check_term_power - Check for term power errors
7421  * @ioa_cfg:    ioa config struct
7422  * @mode_pages: IOAFP mode pages buffer
7423  *
7424  * Check the IOAFP's mode page 28 for term power errors
7425  *
7426  * Return value:
7427  *      nothing
7428  **/
7429 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7430                                  struct ipr_mode_pages *mode_pages)
7431 {
7432         int i;
7433         int entry_length;
7434         struct ipr_dev_bus_entry *bus;
7435         struct ipr_mode_page28 *mode_page;
7436
7437         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7438                                       sizeof(struct ipr_mode_page28));
7439
7440         entry_length = mode_page->entry_length;
7441
7442         bus = mode_page->bus;
7443
7444         for (i = 0; i < mode_page->num_entries; i++) {
7445                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7446                         dev_err(&ioa_cfg->pdev->dev,
7447                                 "Term power is absent on scsi bus %d\n",
7448                                 bus->res_addr.bus);
7449                 }
7450
7451                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7452         }
7453 }
7454
7455 /**
7456  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7457  * @ioa_cfg:    ioa config struct
7458  *
7459  * Looks through the config table checking for SES devices. If
7460  * the SES device is in the SES table indicating a maximum SCSI
7461  * bus speed, the speed is limited for the bus.
7462  *
7463  * Return value:
7464  *      none
7465  **/
7466 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7467 {
7468         u32 max_xfer_rate;
7469         int i;
7470
7471         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7472                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7473                                                        ioa_cfg->bus_attr[i].bus_width);
7474
7475                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7476                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7477         }
7478 }
7479
7480 /**
7481  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7482  * @ioa_cfg:    ioa config struct
7483  * @mode_pages: mode page 28 buffer
7484  *
7485  * Updates mode page 28 based on driver configuration
7486  *
7487  * Return value:
7488  *      none
7489  **/
7490 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7491                                           struct ipr_mode_pages *mode_pages)
7492 {
7493         int i, entry_length;
7494         struct ipr_dev_bus_entry *bus;
7495         struct ipr_bus_attributes *bus_attr;
7496         struct ipr_mode_page28 *mode_page;
7497
7498         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7499                                       sizeof(struct ipr_mode_page28));
7500
7501         entry_length = mode_page->entry_length;
7502
7503         /* Loop for each device bus entry */
7504         for (i = 0, bus = mode_page->bus;
7505              i < mode_page->num_entries;
7506              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7507                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7508                         dev_err(&ioa_cfg->pdev->dev,
7509                                 "Invalid resource address reported: 0x%08X\n",
7510                                 IPR_GET_PHYS_LOC(bus->res_addr));
7511                         continue;
7512                 }
7513
7514                 bus_attr = &ioa_cfg->bus_attr[i];
7515                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7516                 bus->bus_width = bus_attr->bus_width;
7517                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7518                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7519                 if (bus_attr->qas_enabled)
7520                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7521                 else
7522                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7523         }
7524 }
7525
7526 /**
7527  * ipr_build_mode_select - Build a mode select command
7528  * @ipr_cmd:    ipr command struct
7529  * @res_handle: resource handle to send command to
7530  * @parm:               Byte 2 of Mode Sense command
7531  * @dma_addr:   DMA buffer address
7532  * @xfer_len:   data transfer length
7533  *
7534  * Return value:
7535  *      none
7536  **/
7537 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7538                                   __be32 res_handle, u8 parm,
7539                                   dma_addr_t dma_addr, u8 xfer_len)
7540 {
7541         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7542
7543         ioarcb->res_handle = res_handle;
7544         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7545         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7546         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7547         ioarcb->cmd_pkt.cdb[1] = parm;
7548         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7549
7550         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7551 }
7552
7553 /**
7554  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7555  * @ipr_cmd:    ipr command struct
7556  *
7557  * This function sets up the SCSI bus attributes and sends
7558  * a Mode Select for Page 28 to activate them.
7559  *
7560  * Return value:
7561  *      IPR_RC_JOB_RETURN
7562  **/
7563 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7564 {
7565         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7566         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7567         int length;
7568
7569         ENTER;
7570         ipr_scsi_bus_speed_limit(ioa_cfg);
7571         ipr_check_term_power(ioa_cfg, mode_pages);
7572         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7573         length = mode_pages->hdr.length + 1;
7574         mode_pages->hdr.length = 0;
7575
7576         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7577                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7578                               length);
7579
7580         ipr_cmd->job_step = ipr_set_supported_devs;
7581         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7582                                     struct ipr_resource_entry, queue);
7583         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7584
7585         LEAVE;
7586         return IPR_RC_JOB_RETURN;
7587 }
7588
7589 /**
7590  * ipr_build_mode_sense - Builds a mode sense command
7591  * @ipr_cmd:    ipr command struct
7592  * @res:                resource entry struct
7593  * @parm:               Byte 2 of mode sense command
7594  * @dma_addr:   DMA address of mode sense buffer
7595  * @xfer_len:   Size of DMA buffer
7596  *
7597  * Return value:
7598  *      none
7599  **/
7600 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7601                                  __be32 res_handle,
7602                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7603 {
7604         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7605
7606         ioarcb->res_handle = res_handle;
7607         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7608         ioarcb->cmd_pkt.cdb[2] = parm;
7609         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7610         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7611
7612         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7613 }
7614
7615 /**
7616  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7617  * @ipr_cmd:    ipr command struct
7618  *
7619  * This function handles the failure of an IOA bringup command.
7620  *
7621  * Return value:
7622  *      IPR_RC_JOB_RETURN
7623  **/
7624 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7625 {
7626         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7627         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7628
7629         dev_err(&ioa_cfg->pdev->dev,
7630                 "0x%02X failed with IOASC: 0x%08X\n",
7631                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7632
7633         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7634         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7635         return IPR_RC_JOB_RETURN;
7636 }
7637
7638 /**
7639  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7640  * @ipr_cmd:    ipr command struct
7641  *
7642  * This function handles the failure of a Mode Sense to the IOAFP.
7643  * Some adapters do not handle all mode pages.
7644  *
7645  * Return value:
7646  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7647  **/
7648 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7649 {
7650         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7651         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7652
7653         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7654                 ipr_cmd->job_step = ipr_set_supported_devs;
7655                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7656                                             struct ipr_resource_entry, queue);
7657                 return IPR_RC_JOB_CONTINUE;
7658         }
7659
7660         return ipr_reset_cmd_failed(ipr_cmd);
7661 }
7662
7663 /**
7664  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7665  * @ipr_cmd:    ipr command struct
7666  *
7667  * This function send a Page 28 mode sense to the IOA to
7668  * retrieve SCSI bus attributes.
7669  *
7670  * Return value:
7671  *      IPR_RC_JOB_RETURN
7672  **/
7673 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7674 {
7675         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7676
7677         ENTER;
7678         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7679                              0x28, ioa_cfg->vpd_cbs_dma +
7680                              offsetof(struct ipr_misc_cbs, mode_pages),
7681                              sizeof(struct ipr_mode_pages));
7682
7683         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7684         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7685
7686         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7687
7688         LEAVE;
7689         return IPR_RC_JOB_RETURN;
7690 }
7691
7692 /**
7693  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7694  * @ipr_cmd:    ipr command struct
7695  *
7696  * This function enables dual IOA RAID support if possible.
7697  *
7698  * Return value:
7699  *      IPR_RC_JOB_RETURN
7700  **/
7701 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7702 {
7703         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7704         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7705         struct ipr_mode_page24 *mode_page;
7706         int length;
7707
7708         ENTER;
7709         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7710                                       sizeof(struct ipr_mode_page24));
7711
7712         if (mode_page)
7713                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7714
7715         length = mode_pages->hdr.length + 1;
7716         mode_pages->hdr.length = 0;
7717
7718         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7719                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7720                               length);
7721
7722         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7723         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7724
7725         LEAVE;
7726         return IPR_RC_JOB_RETURN;
7727 }
7728
7729 /**
7730  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7731  * @ipr_cmd:    ipr command struct
7732  *
7733  * This function handles the failure of a Mode Sense to the IOAFP.
7734  * Some adapters do not handle all mode pages.
7735  *
7736  * Return value:
7737  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7738  **/
7739 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7740 {
7741         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7742
7743         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7744                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7745                 return IPR_RC_JOB_CONTINUE;
7746         }
7747
7748         return ipr_reset_cmd_failed(ipr_cmd);
7749 }
7750
7751 /**
7752  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7753  * @ipr_cmd:    ipr command struct
7754  *
7755  * This function send a mode sense to the IOA to retrieve
7756  * the IOA Advanced Function Control mode page.
7757  *
7758  * Return value:
7759  *      IPR_RC_JOB_RETURN
7760  **/
7761 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7762 {
7763         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7764
7765         ENTER;
7766         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7767                              0x24, ioa_cfg->vpd_cbs_dma +
7768                              offsetof(struct ipr_misc_cbs, mode_pages),
7769                              sizeof(struct ipr_mode_pages));
7770
7771         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7772         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7773
7774         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7775
7776         LEAVE;
7777         return IPR_RC_JOB_RETURN;
7778 }
7779
7780 /**
7781  * ipr_init_res_table - Initialize the resource table
7782  * @ipr_cmd:    ipr command struct
7783  *
7784  * This function looks through the existing resource table, comparing
7785  * it with the config table. This function will take care of old/new
7786  * devices and schedule adding/removing them from the mid-layer
7787  * as appropriate.
7788  *
7789  * Return value:
7790  *      IPR_RC_JOB_CONTINUE
7791  **/
7792 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7793 {
7794         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7795         struct ipr_resource_entry *res, *temp;
7796         struct ipr_config_table_entry_wrapper cfgtew;
7797         int entries, found, flag, i;
7798         LIST_HEAD(old_res);
7799
7800         ENTER;
7801         if (ioa_cfg->sis64)
7802                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7803         else
7804                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7805
7806         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7807                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7808
7809         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7810                 list_move_tail(&res->queue, &old_res);
7811
7812         if (ioa_cfg->sis64)
7813                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7814         else
7815                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7816
7817         for (i = 0; i < entries; i++) {
7818                 if (ioa_cfg->sis64)
7819                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7820                 else
7821                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7822                 found = 0;
7823
7824                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7825                         if (ipr_is_same_device(res, &cfgtew)) {
7826                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7827                                 found = 1;
7828                                 break;
7829                         }
7830                 }
7831
7832                 if (!found) {
7833                         if (list_empty(&ioa_cfg->free_res_q)) {
7834                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7835                                 break;
7836                         }
7837
7838                         found = 1;
7839                         res = list_entry(ioa_cfg->free_res_q.next,
7840                                          struct ipr_resource_entry, queue);
7841                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7842                         ipr_init_res_entry(res, &cfgtew);
7843                         res->add_to_ml = 1;
7844                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7845                         res->sdev->allow_restart = 1;
7846
7847                 if (found)
7848                         ipr_update_res_entry(res, &cfgtew);
7849         }
7850
7851         list_for_each_entry_safe(res, temp, &old_res, queue) {
7852                 if (res->sdev) {
7853                         res->del_from_ml = 1;
7854                         res->res_handle = IPR_INVALID_RES_HANDLE;
7855                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7856                 }
7857         }
7858
7859         list_for_each_entry_safe(res, temp, &old_res, queue) {
7860                 ipr_clear_res_target(res);
7861                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7862         }
7863
7864         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7865                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7866         else
7867                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7868
7869         LEAVE;
7870         return IPR_RC_JOB_CONTINUE;
7871 }
7872
7873 /**
7874  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7875  * @ipr_cmd:    ipr command struct
7876  *
7877  * This function sends a Query IOA Configuration command
7878  * to the adapter to retrieve the IOA configuration table.
7879  *
7880  * Return value:
7881  *      IPR_RC_JOB_RETURN
7882  **/
7883 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7884 {
7885         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7886         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7887         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7888         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7889
7890         ENTER;
7891         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7892                 ioa_cfg->dual_raid = 1;
7893         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7894                  ucode_vpd->major_release, ucode_vpd->card_type,
7895                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7896         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7897         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7898
7899         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7900         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7901         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7902         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7903
7904         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7905                        IPR_IOADL_FLAGS_READ_LAST);
7906
7907         ipr_cmd->job_step = ipr_init_res_table;
7908
7909         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7910
7911         LEAVE;
7912         return IPR_RC_JOB_RETURN;
7913 }
7914
7915 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7916 {
7917         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7918
7919         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7920                 return IPR_RC_JOB_CONTINUE;
7921
7922         return ipr_reset_cmd_failed(ipr_cmd);
7923 }
7924
7925 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7926                                          __be32 res_handle, u8 sa_code)
7927 {
7928         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7929
7930         ioarcb->res_handle = res_handle;
7931         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7932         ioarcb->cmd_pkt.cdb[1] = sa_code;
7933         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7934 }
7935
7936 /**
7937  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7938  * action
7939  *
7940  * Return value:
7941  *      none
7942  **/
7943 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7944 {
7945         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7946         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7947         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7948
7949         ENTER;
7950
7951         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7952
7953         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7954                 ipr_build_ioa_service_action(ipr_cmd,
7955                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7956                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7957
7958                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7959
7960                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7961                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7962                            IPR_SET_SUP_DEVICE_TIMEOUT);
7963
7964                 LEAVE;
7965                 return IPR_RC_JOB_RETURN;
7966         }
7967
7968         LEAVE;
7969         return IPR_RC_JOB_CONTINUE;
7970 }
7971
7972 /**
7973  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7974  * @ipr_cmd:    ipr command struct
7975  *
7976  * This utility function sends an inquiry to the adapter.
7977  *
7978  * Return value:
7979  *      none
7980  **/
7981 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7982                               dma_addr_t dma_addr, u8 xfer_len)
7983 {
7984         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7985
7986         ENTER;
7987         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7988         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7989
7990         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7991         ioarcb->cmd_pkt.cdb[1] = flags;
7992         ioarcb->cmd_pkt.cdb[2] = page;
7993         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7994
7995         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7996
7997         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7998         LEAVE;
7999 }
8000
8001 /**
8002  * ipr_inquiry_page_supported - Is the given inquiry page supported
8003  * @page0:              inquiry page 0 buffer
8004  * @page:               page code.
8005  *
8006  * This function determines if the specified inquiry page is supported.
8007  *
8008  * Return value:
8009  *      1 if page is supported / 0 if not
8010  **/
8011 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8012 {
8013         int i;
8014
8015         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8016                 if (page0->page[i] == page)
8017                         return 1;
8018
8019         return 0;
8020 }
8021
8022 /**
8023  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8024  * @ipr_cmd:    ipr command struct
8025  *
8026  * This function sends a Page 0xC4 inquiry to the adapter
8027  * to retrieve software VPD information.
8028  *
8029  * Return value:
8030  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8031  **/
8032 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8033 {
8034         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8035         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8036         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8037
8038         ENTER;
8039         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8040         memset(pageC4, 0, sizeof(*pageC4));
8041
8042         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8043                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8044                                   (ioa_cfg->vpd_cbs_dma
8045                                    + offsetof(struct ipr_misc_cbs,
8046                                               pageC4_data)),
8047                                   sizeof(struct ipr_inquiry_pageC4));
8048                 return IPR_RC_JOB_RETURN;
8049         }
8050
8051         LEAVE;
8052         return IPR_RC_JOB_CONTINUE;
8053 }
8054
8055 /**
8056  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8057  * @ipr_cmd:    ipr command struct
8058  *
8059  * This function sends a Page 0xD0 inquiry to the adapter
8060  * to retrieve adapter capabilities.
8061  *
8062  * Return value:
8063  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8064  **/
8065 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8066 {
8067         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8068         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8069         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8070
8071         ENTER;
8072         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8073         memset(cap, 0, sizeof(*cap));
8074
8075         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8076                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8077                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8078                                   sizeof(struct ipr_inquiry_cap));
8079                 return IPR_RC_JOB_RETURN;
8080         }
8081
8082         LEAVE;
8083         return IPR_RC_JOB_CONTINUE;
8084 }
8085
8086 /**
8087  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8088  * @ipr_cmd:    ipr command struct
8089  *
8090  * This function sends a Page 3 inquiry to the adapter
8091  * to retrieve software VPD information.
8092  *
8093  * Return value:
8094  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8095  **/
8096 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8097 {
8098         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8099
8100         ENTER;
8101
8102         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8103
8104         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8105                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8106                           sizeof(struct ipr_inquiry_page3));
8107
8108         LEAVE;
8109         return IPR_RC_JOB_RETURN;
8110 }
8111
8112 /**
8113  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8114  * @ipr_cmd:    ipr command struct
8115  *
8116  * This function sends a Page 0 inquiry to the adapter
8117  * to retrieve supported inquiry pages.
8118  *
8119  * Return value:
8120  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8121  **/
8122 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8123 {
8124         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8125         char type[5];
8126
8127         ENTER;
8128
8129         /* Grab the type out of the VPD and store it away */
8130         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8131         type[4] = '\0';
8132         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8133
8134         if (ipr_invalid_adapter(ioa_cfg)) {
8135                 dev_err(&ioa_cfg->pdev->dev,
8136                         "Adapter not supported in this hardware configuration.\n");
8137
8138                 if (!ipr_testmode) {
8139                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8140                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8141                         list_add_tail(&ipr_cmd->queue,
8142                                         &ioa_cfg->hrrq->hrrq_free_q);
8143                         return IPR_RC_JOB_RETURN;
8144                 }
8145         }
8146
8147         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8148
8149         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8150                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8151                           sizeof(struct ipr_inquiry_page0));
8152
8153         LEAVE;
8154         return IPR_RC_JOB_RETURN;
8155 }
8156
8157 /**
8158  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8159  * @ipr_cmd:    ipr command struct
8160  *
8161  * This function sends a standard inquiry to the adapter.
8162  *
8163  * Return value:
8164  *      IPR_RC_JOB_RETURN
8165  **/
8166 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8167 {
8168         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8169
8170         ENTER;
8171         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8172
8173         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8174                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8175                           sizeof(struct ipr_ioa_vpd));
8176
8177         LEAVE;
8178         return IPR_RC_JOB_RETURN;
8179 }
8180
8181 /**
8182  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8183  * @ipr_cmd:    ipr command struct
8184  *
8185  * This function send an Identify Host Request Response Queue
8186  * command to establish the HRRQ with the adapter.
8187  *
8188  * Return value:
8189  *      IPR_RC_JOB_RETURN
8190  **/
8191 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8192 {
8193         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8194         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8195         struct ipr_hrr_queue *hrrq;
8196
8197         ENTER;
8198         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8199         if (ioa_cfg->identify_hrrq_index == 0)
8200                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8201
8202         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8203                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8204
8205                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8206                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8207
8208                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8209                 if (ioa_cfg->sis64)
8210                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8211
8212                 if (ioa_cfg->nvectors == 1)
8213                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8214                 else
8215                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8216
8217                 ioarcb->cmd_pkt.cdb[2] =
8218                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8219                 ioarcb->cmd_pkt.cdb[3] =
8220                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8221                 ioarcb->cmd_pkt.cdb[4] =
8222                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8223                 ioarcb->cmd_pkt.cdb[5] =
8224                         ((u64) hrrq->host_rrq_dma) & 0xff;
8225                 ioarcb->cmd_pkt.cdb[7] =
8226                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8227                 ioarcb->cmd_pkt.cdb[8] =
8228                         (sizeof(u32) * hrrq->size) & 0xff;
8229
8230                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8231                         ioarcb->cmd_pkt.cdb[9] =
8232                                         ioa_cfg->identify_hrrq_index;
8233
8234                 if (ioa_cfg->sis64) {
8235                         ioarcb->cmd_pkt.cdb[10] =
8236                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8237                         ioarcb->cmd_pkt.cdb[11] =
8238                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8239                         ioarcb->cmd_pkt.cdb[12] =
8240                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8241                         ioarcb->cmd_pkt.cdb[13] =
8242                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8243                 }
8244
8245                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8246                         ioarcb->cmd_pkt.cdb[14] =
8247                                         ioa_cfg->identify_hrrq_index;
8248
8249                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8250                            IPR_INTERNAL_TIMEOUT);
8251
8252                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8253                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8254
8255                 LEAVE;
8256                 return IPR_RC_JOB_RETURN;
8257         }
8258
8259         LEAVE;
8260         return IPR_RC_JOB_CONTINUE;
8261 }
8262
8263 /**
8264  * ipr_reset_timer_done - Adapter reset timer function
8265  * @ipr_cmd:    ipr command struct
8266  *
8267  * Description: This function is used in adapter reset processing
8268  * for timing events. If the reset_cmd pointer in the IOA
8269  * config struct is not this adapter's we are doing nested
8270  * resets and fail_all_ops will take care of freeing the
8271  * command block.
8272  *
8273  * Return value:
8274  *      none
8275  **/
8276 static void ipr_reset_timer_done(struct timer_list *t)
8277 {
8278         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8279         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8280         unsigned long lock_flags = 0;
8281
8282         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8283
8284         if (ioa_cfg->reset_cmd == ipr_cmd) {
8285                 list_del(&ipr_cmd->queue);
8286                 ipr_cmd->done(ipr_cmd);
8287         }
8288
8289         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8290 }
8291
8292 /**
8293  * ipr_reset_start_timer - Start a timer for adapter reset job
8294  * @ipr_cmd:    ipr command struct
8295  * @timeout:    timeout value
8296  *
8297  * Description: This function is used in adapter reset processing
8298  * for timing events. If the reset_cmd pointer in the IOA
8299  * config struct is not this adapter's we are doing nested
8300  * resets and fail_all_ops will take care of freeing the
8301  * command block.
8302  *
8303  * Return value:
8304  *      none
8305  **/
8306 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8307                                   unsigned long timeout)
8308 {
8309
8310         ENTER;
8311         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8312         ipr_cmd->done = ipr_reset_ioa_job;
8313
8314         ipr_cmd->timer.expires = jiffies + timeout;
8315         ipr_cmd->timer.function = ipr_reset_timer_done;
8316         add_timer(&ipr_cmd->timer);
8317 }
8318
8319 /**
8320  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8321  * @ioa_cfg:    ioa cfg struct
8322  *
8323  * Return value:
8324  *      nothing
8325  **/
8326 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8327 {
8328         struct ipr_hrr_queue *hrrq;
8329
8330         for_each_hrrq(hrrq, ioa_cfg) {
8331                 spin_lock(&hrrq->_lock);
8332                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8333
8334                 /* Initialize Host RRQ pointers */
8335                 hrrq->hrrq_start = hrrq->host_rrq;
8336                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8337                 hrrq->hrrq_curr = hrrq->hrrq_start;
8338                 hrrq->toggle_bit = 1;
8339                 spin_unlock(&hrrq->_lock);
8340         }
8341         wmb();
8342
8343         ioa_cfg->identify_hrrq_index = 0;
8344         if (ioa_cfg->hrrq_num == 1)
8345                 atomic_set(&ioa_cfg->hrrq_index, 0);
8346         else
8347                 atomic_set(&ioa_cfg->hrrq_index, 1);
8348
8349         /* Zero out config table */
8350         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8351 }
8352
8353 /**
8354  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8355  * @ipr_cmd:    ipr command struct
8356  *
8357  * Return value:
8358  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8359  **/
8360 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8361 {
8362         unsigned long stage, stage_time;
8363         u32 feedback;
8364         volatile u32 int_reg;
8365         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8366         u64 maskval = 0;
8367
8368         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8369         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8370         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8371
8372         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8373
8374         /* sanity check the stage_time value */
8375         if (stage_time == 0)
8376                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8377         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8378                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8379         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8380                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8381
8382         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8383                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8384                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8385                 stage_time = ioa_cfg->transop_timeout;
8386                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8387         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8388                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8389                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8390                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8391                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8392                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8393                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8394                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8395                         return IPR_RC_JOB_CONTINUE;
8396                 }
8397         }
8398
8399         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8400         ipr_cmd->timer.function = ipr_oper_timeout;
8401         ipr_cmd->done = ipr_reset_ioa_job;
8402         add_timer(&ipr_cmd->timer);
8403
8404         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8405
8406         return IPR_RC_JOB_RETURN;
8407 }
8408
8409 /**
8410  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8411  * @ipr_cmd:    ipr command struct
8412  *
8413  * This function reinitializes some control blocks and
8414  * enables destructive diagnostics on the adapter.
8415  *
8416  * Return value:
8417  *      IPR_RC_JOB_RETURN
8418  **/
8419 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8420 {
8421         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8422         volatile u32 int_reg;
8423         volatile u64 maskval;
8424         int i;
8425
8426         ENTER;
8427         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8428         ipr_init_ioa_mem(ioa_cfg);
8429
8430         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8431                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8432                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8433                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8434         }
8435         if (ioa_cfg->sis64) {
8436                 /* Set the adapter to the correct endian mode. */
8437                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8438                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8439         }
8440
8441         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8442
8443         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8444                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8445                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8446                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8447                 return IPR_RC_JOB_CONTINUE;
8448         }
8449
8450         /* Enable destructive diagnostics on IOA */
8451         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8452
8453         if (ioa_cfg->sis64) {
8454                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8455                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8456                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8457         } else
8458                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8459
8460         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8461
8462         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8463
8464         if (ioa_cfg->sis64) {
8465                 ipr_cmd->job_step = ipr_reset_next_stage;
8466                 return IPR_RC_JOB_CONTINUE;
8467         }
8468
8469         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8470         ipr_cmd->timer.function = ipr_oper_timeout;
8471         ipr_cmd->done = ipr_reset_ioa_job;
8472         add_timer(&ipr_cmd->timer);
8473         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8474
8475         LEAVE;
8476         return IPR_RC_JOB_RETURN;
8477 }
8478
8479 /**
8480  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8481  * @ipr_cmd:    ipr command struct
8482  *
8483  * This function is invoked when an adapter dump has run out
8484  * of processing time.
8485  *
8486  * Return value:
8487  *      IPR_RC_JOB_CONTINUE
8488  **/
8489 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8490 {
8491         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8492
8493         if (ioa_cfg->sdt_state == GET_DUMP)
8494                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8495         else if (ioa_cfg->sdt_state == READ_DUMP)
8496                 ioa_cfg->sdt_state = ABORT_DUMP;
8497
8498         ioa_cfg->dump_timeout = 1;
8499         ipr_cmd->job_step = ipr_reset_alert;
8500
8501         return IPR_RC_JOB_CONTINUE;
8502 }
8503
8504 /**
8505  * ipr_unit_check_no_data - Log a unit check/no data error log
8506  * @ioa_cfg:            ioa config struct
8507  *
8508  * Logs an error indicating the adapter unit checked, but for some
8509  * reason, we were unable to fetch the unit check buffer.
8510  *
8511  * Return value:
8512  *      nothing
8513  **/
8514 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8515 {
8516         ioa_cfg->errors_logged++;
8517         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8518 }
8519
8520 /**
8521  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8522  * @ioa_cfg:            ioa config struct
8523  *
8524  * Fetches the unit check buffer from the adapter by clocking the data
8525  * through the mailbox register.
8526  *
8527  * Return value:
8528  *      nothing
8529  **/
8530 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8531 {
8532         unsigned long mailbox;
8533         struct ipr_hostrcb *hostrcb;
8534         struct ipr_uc_sdt sdt;
8535         int rc, length;
8536         u32 ioasc;
8537
8538         mailbox = readl(ioa_cfg->ioa_mailbox);
8539
8540         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8541                 ipr_unit_check_no_data(ioa_cfg);
8542                 return;
8543         }
8544
8545         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8546         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8547                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8548
8549         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8550             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8551             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8552                 ipr_unit_check_no_data(ioa_cfg);
8553                 return;
8554         }
8555
8556         /* Find length of the first sdt entry (UC buffer) */
8557         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8558                 length = be32_to_cpu(sdt.entry[0].end_token);
8559         else
8560                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8561                           be32_to_cpu(sdt.entry[0].start_token)) &
8562                           IPR_FMT2_MBX_ADDR_MASK;
8563
8564         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8565                              struct ipr_hostrcb, queue);
8566         list_del_init(&hostrcb->queue);
8567         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8568
8569         rc = ipr_get_ldump_data_section(ioa_cfg,
8570                                         be32_to_cpu(sdt.entry[0].start_token),
8571                                         (__be32 *)&hostrcb->hcam,
8572                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8573
8574         if (!rc) {
8575                 ipr_handle_log_data(ioa_cfg, hostrcb);
8576                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8577                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8578                     ioa_cfg->sdt_state == GET_DUMP)
8579                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8580         } else
8581                 ipr_unit_check_no_data(ioa_cfg);
8582
8583         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8584 }
8585
8586 /**
8587  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8588  * @ipr_cmd:    ipr command struct
8589  *
8590  * Description: This function will call to get the unit check buffer.
8591  *
8592  * Return value:
8593  *      IPR_RC_JOB_RETURN
8594  **/
8595 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8596 {
8597         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8598
8599         ENTER;
8600         ioa_cfg->ioa_unit_checked = 0;
8601         ipr_get_unit_check_buffer(ioa_cfg);
8602         ipr_cmd->job_step = ipr_reset_alert;
8603         ipr_reset_start_timer(ipr_cmd, 0);
8604
8605         LEAVE;
8606         return IPR_RC_JOB_RETURN;
8607 }
8608
8609 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8610 {
8611         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8612
8613         ENTER;
8614
8615         if (ioa_cfg->sdt_state != GET_DUMP)
8616                 return IPR_RC_JOB_RETURN;
8617
8618         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8619             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8620              IPR_PCII_MAILBOX_STABLE)) {
8621
8622                 if (!ipr_cmd->u.time_left)
8623                         dev_err(&ioa_cfg->pdev->dev,
8624                                 "Timed out waiting for Mailbox register.\n");
8625
8626                 ioa_cfg->sdt_state = READ_DUMP;
8627                 ioa_cfg->dump_timeout = 0;
8628                 if (ioa_cfg->sis64)
8629                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8630                 else
8631                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8632                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8633                 schedule_work(&ioa_cfg->work_q);
8634
8635         } else {
8636                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8637                 ipr_reset_start_timer(ipr_cmd,
8638                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8639         }
8640
8641         LEAVE;
8642         return IPR_RC_JOB_RETURN;
8643 }
8644
8645 /**
8646  * ipr_reset_restore_cfg_space - Restore PCI config space.
8647  * @ipr_cmd:    ipr command struct
8648  *
8649  * Description: This function restores the saved PCI config space of
8650  * the adapter, fails all outstanding ops back to the callers, and
8651  * fetches the dump/unit check if applicable to this reset.
8652  *
8653  * Return value:
8654  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8655  **/
8656 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8657 {
8658         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8659         u32 int_reg;
8660
8661         ENTER;
8662         ioa_cfg->pdev->state_saved = true;
8663         pci_restore_state(ioa_cfg->pdev);
8664
8665         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8666                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8667                 return IPR_RC_JOB_CONTINUE;
8668         }
8669
8670         ipr_fail_all_ops(ioa_cfg);
8671
8672         if (ioa_cfg->sis64) {
8673                 /* Set the adapter to the correct endian mode. */
8674                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8675                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8676         }
8677
8678         if (ioa_cfg->ioa_unit_checked) {
8679                 if (ioa_cfg->sis64) {
8680                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8681                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8682                         return IPR_RC_JOB_RETURN;
8683                 } else {
8684                         ioa_cfg->ioa_unit_checked = 0;
8685                         ipr_get_unit_check_buffer(ioa_cfg);
8686                         ipr_cmd->job_step = ipr_reset_alert;
8687                         ipr_reset_start_timer(ipr_cmd, 0);
8688                         return IPR_RC_JOB_RETURN;
8689                 }
8690         }
8691
8692         if (ioa_cfg->in_ioa_bringdown) {
8693                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8694         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8695                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8696                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8697         } else {
8698                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8699         }
8700
8701         LEAVE;
8702         return IPR_RC_JOB_CONTINUE;
8703 }
8704
8705 /**
8706  * ipr_reset_bist_done - BIST has completed on the adapter.
8707  * @ipr_cmd:    ipr command struct
8708  *
8709  * Description: Unblock config space and resume the reset process.
8710  *
8711  * Return value:
8712  *      IPR_RC_JOB_CONTINUE
8713  **/
8714 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8715 {
8716         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8717
8718         ENTER;
8719         if (ioa_cfg->cfg_locked)
8720                 pci_cfg_access_unlock(ioa_cfg->pdev);
8721         ioa_cfg->cfg_locked = 0;
8722         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8723         LEAVE;
8724         return IPR_RC_JOB_CONTINUE;
8725 }
8726
8727 /**
8728  * ipr_reset_start_bist - Run BIST on the adapter.
8729  * @ipr_cmd:    ipr command struct
8730  *
8731  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8732  *
8733  * Return value:
8734  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8735  **/
8736 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8737 {
8738         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8739         int rc = PCIBIOS_SUCCESSFUL;
8740
8741         ENTER;
8742         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8743                 writel(IPR_UPROCI_SIS64_START_BIST,
8744                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8745         else
8746                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8747
8748         if (rc == PCIBIOS_SUCCESSFUL) {
8749                 ipr_cmd->job_step = ipr_reset_bist_done;
8750                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8751                 rc = IPR_RC_JOB_RETURN;
8752         } else {
8753                 if (ioa_cfg->cfg_locked)
8754                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8755                 ioa_cfg->cfg_locked = 0;
8756                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8757                 rc = IPR_RC_JOB_CONTINUE;
8758         }
8759
8760         LEAVE;
8761         return rc;
8762 }
8763
8764 /**
8765  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8766  * @ipr_cmd:    ipr command struct
8767  *
8768  * Description: This clears PCI reset to the adapter and delays two seconds.
8769  *
8770  * Return value:
8771  *      IPR_RC_JOB_RETURN
8772  **/
8773 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8774 {
8775         ENTER;
8776         ipr_cmd->job_step = ipr_reset_bist_done;
8777         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8778         LEAVE;
8779         return IPR_RC_JOB_RETURN;
8780 }
8781
8782 /**
8783  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8784  * @work:       work struct
8785  *
8786  * Description: This pulses warm reset to a slot.
8787  *
8788  **/
8789 static void ipr_reset_reset_work(struct work_struct *work)
8790 {
8791         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8792         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8793         struct pci_dev *pdev = ioa_cfg->pdev;
8794         unsigned long lock_flags = 0;
8795
8796         ENTER;
8797         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8798         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8799         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8800
8801         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8802         if (ioa_cfg->reset_cmd == ipr_cmd)
8803                 ipr_reset_ioa_job(ipr_cmd);
8804         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8805         LEAVE;
8806 }
8807
8808 /**
8809  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8810  * @ipr_cmd:    ipr command struct
8811  *
8812  * Description: This asserts PCI reset to the adapter.
8813  *
8814  * Return value:
8815  *      IPR_RC_JOB_RETURN
8816  **/
8817 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8818 {
8819         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8820
8821         ENTER;
8822         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8823         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8824         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8825         LEAVE;
8826         return IPR_RC_JOB_RETURN;
8827 }
8828
8829 /**
8830  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8831  * @ipr_cmd:    ipr command struct
8832  *
8833  * Description: This attempts to block config access to the IOA.
8834  *
8835  * Return value:
8836  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8837  **/
8838 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8839 {
8840         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8841         int rc = IPR_RC_JOB_CONTINUE;
8842
8843         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8844                 ioa_cfg->cfg_locked = 1;
8845                 ipr_cmd->job_step = ioa_cfg->reset;
8846         } else {
8847                 if (ipr_cmd->u.time_left) {
8848                         rc = IPR_RC_JOB_RETURN;
8849                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8850                         ipr_reset_start_timer(ipr_cmd,
8851                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8852                 } else {
8853                         ipr_cmd->job_step = ioa_cfg->reset;
8854                         dev_err(&ioa_cfg->pdev->dev,
8855                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8856                 }
8857         }
8858
8859         return rc;
8860 }
8861
8862 /**
8863  * ipr_reset_block_config_access - Block config access to the IOA
8864  * @ipr_cmd:    ipr command struct
8865  *
8866  * Description: This attempts to block config access to the IOA
8867  *
8868  * Return value:
8869  *      IPR_RC_JOB_CONTINUE
8870  **/
8871 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8872 {
8873         ipr_cmd->ioa_cfg->cfg_locked = 0;
8874         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8875         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8876         return IPR_RC_JOB_CONTINUE;
8877 }
8878
8879 /**
8880  * ipr_reset_allowed - Query whether or not IOA can be reset
8881  * @ioa_cfg:    ioa config struct
8882  *
8883  * Return value:
8884  *      0 if reset not allowed / non-zero if reset is allowed
8885  **/
8886 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8887 {
8888         volatile u32 temp_reg;
8889
8890         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8891         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8892 }
8893
8894 /**
8895  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8896  * @ipr_cmd:    ipr command struct
8897  *
8898  * Description: This function waits for adapter permission to run BIST,
8899  * then runs BIST. If the adapter does not give permission after a
8900  * reasonable time, we will reset the adapter anyway. The impact of
8901  * resetting the adapter without warning the adapter is the risk of
8902  * losing the persistent error log on the adapter. If the adapter is
8903  * reset while it is writing to the flash on the adapter, the flash
8904  * segment will have bad ECC and be zeroed.
8905  *
8906  * Return value:
8907  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8908  **/
8909 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8910 {
8911         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8912         int rc = IPR_RC_JOB_RETURN;
8913
8914         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8915                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8916                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8917         } else {
8918                 ipr_cmd->job_step = ipr_reset_block_config_access;
8919                 rc = IPR_RC_JOB_CONTINUE;
8920         }
8921
8922         return rc;
8923 }
8924
8925 /**
8926  * ipr_reset_alert - Alert the adapter of a pending reset
8927  * @ipr_cmd:    ipr command struct
8928  *
8929  * Description: This function alerts the adapter that it will be reset.
8930  * If memory space is not currently enabled, proceed directly
8931  * to running BIST on the adapter. The timer must always be started
8932  * so we guarantee we do not run BIST from ipr_isr.
8933  *
8934  * Return value:
8935  *      IPR_RC_JOB_RETURN
8936  **/
8937 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8938 {
8939         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8940         u16 cmd_reg;
8941         int rc;
8942
8943         ENTER;
8944         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8945
8946         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8947                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8948                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8949                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8950         } else {
8951                 ipr_cmd->job_step = ipr_reset_block_config_access;
8952         }
8953
8954         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8955         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8956
8957         LEAVE;
8958         return IPR_RC_JOB_RETURN;
8959 }
8960
8961 /**
8962  * ipr_reset_quiesce_done - Complete IOA disconnect
8963  * @ipr_cmd:    ipr command struct
8964  *
8965  * Description: Freeze the adapter to complete quiesce processing
8966  *
8967  * Return value:
8968  *      IPR_RC_JOB_CONTINUE
8969  **/
8970 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8971 {
8972         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8973
8974         ENTER;
8975         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8976         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8977         LEAVE;
8978         return IPR_RC_JOB_CONTINUE;
8979 }
8980
8981 /**
8982  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8983  * @ipr_cmd:    ipr command struct
8984  *
8985  * Description: Ensure nothing is outstanding to the IOA and
8986  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8987  *
8988  * Return value:
8989  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8990  **/
8991 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8992 {
8993         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8994         struct ipr_cmnd *loop_cmd;
8995         struct ipr_hrr_queue *hrrq;
8996         int rc = IPR_RC_JOB_CONTINUE;
8997         int count = 0;
8998
8999         ENTER;
9000         ipr_cmd->job_step = ipr_reset_quiesce_done;
9001
9002         for_each_hrrq(hrrq, ioa_cfg) {
9003                 spin_lock(&hrrq->_lock);
9004                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9005                         count++;
9006                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9007                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9008                         rc = IPR_RC_JOB_RETURN;
9009                         break;
9010                 }
9011                 spin_unlock(&hrrq->_lock);
9012
9013                 if (count)
9014                         break;
9015         }
9016
9017         LEAVE;
9018         return rc;
9019 }
9020
9021 /**
9022  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9023  * @ipr_cmd:    ipr command struct
9024  *
9025  * Description: Cancel any oustanding HCAMs to the IOA.
9026  *
9027  * Return value:
9028  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9029  **/
9030 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9031 {
9032         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9033         int rc = IPR_RC_JOB_CONTINUE;
9034         struct ipr_cmd_pkt *cmd_pkt;
9035         struct ipr_cmnd *hcam_cmd;
9036         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9037
9038         ENTER;
9039         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9040
9041         if (!hrrq->ioa_is_dead) {
9042                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9043                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9044                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9045                                         continue;
9046
9047                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9048                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9049                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9050                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9051                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9052                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9053                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9054                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9055                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9056                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9057                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9058                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9059                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9060                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9061
9062                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9063                                            IPR_CANCEL_TIMEOUT);
9064
9065                                 rc = IPR_RC_JOB_RETURN;
9066                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9067                                 break;
9068                         }
9069                 }
9070         } else
9071                 ipr_cmd->job_step = ipr_reset_alert;
9072
9073         LEAVE;
9074         return rc;
9075 }
9076
9077 /**
9078  * ipr_reset_ucode_download_done - Microcode download completion
9079  * @ipr_cmd:    ipr command struct
9080  *
9081  * Description: This function unmaps the microcode download buffer.
9082  *
9083  * Return value:
9084  *      IPR_RC_JOB_CONTINUE
9085  **/
9086 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9087 {
9088         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9089         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9090
9091         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9092                      sglist->num_sg, DMA_TO_DEVICE);
9093
9094         ipr_cmd->job_step = ipr_reset_alert;
9095         return IPR_RC_JOB_CONTINUE;
9096 }
9097
9098 /**
9099  * ipr_reset_ucode_download - Download microcode to the adapter
9100  * @ipr_cmd:    ipr command struct
9101  *
9102  * Description: This function checks to see if it there is microcode
9103  * to download to the adapter. If there is, a download is performed.
9104  *
9105  * Return value:
9106  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9107  **/
9108 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9109 {
9110         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9111         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9112
9113         ENTER;
9114         ipr_cmd->job_step = ipr_reset_alert;
9115
9116         if (!sglist)
9117                 return IPR_RC_JOB_CONTINUE;
9118
9119         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9120         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9121         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9122         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9123         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9124         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9125         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9126
9127         if (ioa_cfg->sis64)
9128                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9129         else
9130                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9131         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9132
9133         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9134                    IPR_WRITE_BUFFER_TIMEOUT);
9135
9136         LEAVE;
9137         return IPR_RC_JOB_RETURN;
9138 }
9139
9140 /**
9141  * ipr_reset_shutdown_ioa - Shutdown the adapter
9142  * @ipr_cmd:    ipr command struct
9143  *
9144  * Description: This function issues an adapter shutdown of the
9145  * specified type to the specified adapter as part of the
9146  * adapter reset job.
9147  *
9148  * Return value:
9149  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9150  **/
9151 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9152 {
9153         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9154         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9155         unsigned long timeout;
9156         int rc = IPR_RC_JOB_CONTINUE;
9157
9158         ENTER;
9159         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9160                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9161         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9162                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9163                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9164                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9165                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9166                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9167
9168                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9169                         timeout = IPR_SHUTDOWN_TIMEOUT;
9170                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9171                         timeout = IPR_INTERNAL_TIMEOUT;
9172                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9173                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9174                 else
9175                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9176
9177                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9178
9179                 rc = IPR_RC_JOB_RETURN;
9180                 ipr_cmd->job_step = ipr_reset_ucode_download;
9181         } else
9182                 ipr_cmd->job_step = ipr_reset_alert;
9183
9184         LEAVE;
9185         return rc;
9186 }
9187
9188 /**
9189  * ipr_reset_ioa_job - Adapter reset job
9190  * @ipr_cmd:    ipr command struct
9191  *
9192  * Description: This function is the job router for the adapter reset job.
9193  *
9194  * Return value:
9195  *      none
9196  **/
9197 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9198 {
9199         u32 rc, ioasc;
9200         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9201
9202         do {
9203                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9204
9205                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9206                         /*
9207                          * We are doing nested adapter resets and this is
9208                          * not the current reset job.
9209                          */
9210                         list_add_tail(&ipr_cmd->queue,
9211                                         &ipr_cmd->hrrq->hrrq_free_q);
9212                         return;
9213                 }
9214
9215                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9216                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9217                         if (rc == IPR_RC_JOB_RETURN)
9218                                 return;
9219                 }
9220
9221                 ipr_reinit_ipr_cmnd(ipr_cmd);
9222                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9223                 rc = ipr_cmd->job_step(ipr_cmd);
9224         } while (rc == IPR_RC_JOB_CONTINUE);
9225 }
9226
9227 /**
9228  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9229  * @ioa_cfg:            ioa config struct
9230  * @job_step:           first job step of reset job
9231  * @shutdown_type:      shutdown type
9232  *
9233  * Description: This function will initiate the reset of the given adapter
9234  * starting at the selected job step.
9235  * If the caller needs to wait on the completion of the reset,
9236  * the caller must sleep on the reset_wait_q.
9237  *
9238  * Return value:
9239  *      none
9240  **/
9241 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9242                                     int (*job_step) (struct ipr_cmnd *),
9243                                     enum ipr_shutdown_type shutdown_type)
9244 {
9245         struct ipr_cmnd *ipr_cmd;
9246         int i;
9247
9248         ioa_cfg->in_reset_reload = 1;
9249         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9250                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9251                 ioa_cfg->hrrq[i].allow_cmds = 0;
9252                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9253         }
9254         wmb();
9255         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9256                 ioa_cfg->scsi_unblock = 0;
9257                 ioa_cfg->scsi_blocked = 1;
9258                 scsi_block_requests(ioa_cfg->host);
9259         }
9260
9261         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9262         ioa_cfg->reset_cmd = ipr_cmd;
9263         ipr_cmd->job_step = job_step;
9264         ipr_cmd->u.shutdown_type = shutdown_type;
9265
9266         ipr_reset_ioa_job(ipr_cmd);
9267 }
9268
9269 /**
9270  * ipr_initiate_ioa_reset - Initiate an adapter reset
9271  * @ioa_cfg:            ioa config struct
9272  * @shutdown_type:      shutdown type
9273  *
9274  * Description: This function will initiate the reset of the given adapter.
9275  * If the caller needs to wait on the completion of the reset,
9276  * the caller must sleep on the reset_wait_q.
9277  *
9278  * Return value:
9279  *      none
9280  **/
9281 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9282                                    enum ipr_shutdown_type shutdown_type)
9283 {
9284         int i;
9285
9286         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9287                 return;
9288
9289         if (ioa_cfg->in_reset_reload) {
9290                 if (ioa_cfg->sdt_state == GET_DUMP)
9291                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9292                 else if (ioa_cfg->sdt_state == READ_DUMP)
9293                         ioa_cfg->sdt_state = ABORT_DUMP;
9294         }
9295
9296         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9297                 dev_err(&ioa_cfg->pdev->dev,
9298                         "IOA taken offline - error recovery failed\n");
9299
9300                 ioa_cfg->reset_retries = 0;
9301                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9302                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9303                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9304                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9305                 }
9306                 wmb();
9307
9308                 if (ioa_cfg->in_ioa_bringdown) {
9309                         ioa_cfg->reset_cmd = NULL;
9310                         ioa_cfg->in_reset_reload = 0;
9311                         ipr_fail_all_ops(ioa_cfg);
9312                         wake_up_all(&ioa_cfg->reset_wait_q);
9313
9314                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9315                                 ioa_cfg->scsi_unblock = 1;
9316                                 schedule_work(&ioa_cfg->work_q);
9317                         }
9318                         return;
9319                 } else {
9320                         ioa_cfg->in_ioa_bringdown = 1;
9321                         shutdown_type = IPR_SHUTDOWN_NONE;
9322                 }
9323         }
9324
9325         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9326                                 shutdown_type);
9327 }
9328
9329 /**
9330  * ipr_reset_freeze - Hold off all I/O activity
9331  * @ipr_cmd:    ipr command struct
9332  *
9333  * Description: If the PCI slot is frozen, hold off all I/O
9334  * activity; then, as soon as the slot is available again,
9335  * initiate an adapter reset.
9336  */
9337 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9338 {
9339         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9340         int i;
9341
9342         /* Disallow new interrupts, avoid loop */
9343         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9344                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9345                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9346                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9347         }
9348         wmb();
9349         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9350         ipr_cmd->done = ipr_reset_ioa_job;
9351         return IPR_RC_JOB_RETURN;
9352 }
9353
9354 /**
9355  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9356  * @pdev:       PCI device struct
9357  *
9358  * Description: This routine is called to tell us that the MMIO
9359  * access to the IOA has been restored
9360  */
9361 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9362 {
9363         unsigned long flags = 0;
9364         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9365
9366         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9367         if (!ioa_cfg->probe_done)
9368                 pci_save_state(pdev);
9369         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9370         return PCI_ERS_RESULT_NEED_RESET;
9371 }
9372
9373 /**
9374  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9375  * @pdev:       PCI device struct
9376  *
9377  * Description: This routine is called to tell us that the PCI bus
9378  * is down. Can't do anything here, except put the device driver
9379  * into a holding pattern, waiting for the PCI bus to come back.
9380  */
9381 static void ipr_pci_frozen(struct pci_dev *pdev)
9382 {
9383         unsigned long flags = 0;
9384         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9385
9386         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9387         if (ioa_cfg->probe_done)
9388                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9389         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9390 }
9391
9392 /**
9393  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9394  * @pdev:       PCI device struct
9395  *
9396  * Description: This routine is called by the pci error recovery
9397  * code after the PCI slot has been reset, just before we
9398  * should resume normal operations.
9399  */
9400 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9401 {
9402         unsigned long flags = 0;
9403         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9404
9405         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9406         if (ioa_cfg->probe_done) {
9407                 if (ioa_cfg->needs_warm_reset)
9408                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9409                 else
9410                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9411                                                 IPR_SHUTDOWN_NONE);
9412         } else
9413                 wake_up_all(&ioa_cfg->eeh_wait_q);
9414         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9415         return PCI_ERS_RESULT_RECOVERED;
9416 }
9417
9418 /**
9419  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9420  * @pdev:       PCI device struct
9421  *
9422  * Description: This routine is called when the PCI bus has
9423  * permanently failed.
9424  */
9425 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9426 {
9427         unsigned long flags = 0;
9428         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9429         int i;
9430
9431         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9432         if (ioa_cfg->probe_done) {
9433                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9434                         ioa_cfg->sdt_state = ABORT_DUMP;
9435                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9436                 ioa_cfg->in_ioa_bringdown = 1;
9437                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9438                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9439                         ioa_cfg->hrrq[i].allow_cmds = 0;
9440                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9441                 }
9442                 wmb();
9443                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9444         } else
9445                 wake_up_all(&ioa_cfg->eeh_wait_q);
9446         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9447 }
9448
9449 /**
9450  * ipr_pci_error_detected - Called when a PCI error is detected.
9451  * @pdev:       PCI device struct
9452  * @state:      PCI channel state
9453  *
9454  * Description: Called when a PCI error is detected.
9455  *
9456  * Return value:
9457  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9458  */
9459 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9460                                                pci_channel_state_t state)
9461 {
9462         switch (state) {
9463         case pci_channel_io_frozen:
9464                 ipr_pci_frozen(pdev);
9465                 return PCI_ERS_RESULT_CAN_RECOVER;
9466         case pci_channel_io_perm_failure:
9467                 ipr_pci_perm_failure(pdev);
9468                 return PCI_ERS_RESULT_DISCONNECT;
9469                 break;
9470         default:
9471                 break;
9472         }
9473         return PCI_ERS_RESULT_NEED_RESET;
9474 }
9475
9476 /**
9477  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9478  * @ioa_cfg:    ioa cfg struct
9479  *
9480  * Description: This is the second phase of adapter initialization
9481  * This function takes care of initilizing the adapter to the point
9482  * where it can accept new commands.
9483
9484  * Return value:
9485  *      0 on success / -EIO on failure
9486  **/
9487 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9488 {
9489         int rc = 0;
9490         unsigned long host_lock_flags = 0;
9491
9492         ENTER;
9493         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9494         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9495         ioa_cfg->probe_done = 1;
9496         if (ioa_cfg->needs_hard_reset) {
9497                 ioa_cfg->needs_hard_reset = 0;
9498                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9499         } else
9500                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9501                                         IPR_SHUTDOWN_NONE);
9502         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9503
9504         LEAVE;
9505         return rc;
9506 }
9507
9508 /**
9509  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9510  * @ioa_cfg:    ioa config struct
9511  *
9512  * Return value:
9513  *      none
9514  **/
9515 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9516 {
9517         int i;
9518
9519         if (ioa_cfg->ipr_cmnd_list) {
9520                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9521                         if (ioa_cfg->ipr_cmnd_list[i])
9522                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9523                                               ioa_cfg->ipr_cmnd_list[i],
9524                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9525
9526                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9527                 }
9528         }
9529
9530         if (ioa_cfg->ipr_cmd_pool)
9531                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9532
9533         kfree(ioa_cfg->ipr_cmnd_list);
9534         kfree(ioa_cfg->ipr_cmnd_list_dma);
9535         ioa_cfg->ipr_cmnd_list = NULL;
9536         ioa_cfg->ipr_cmnd_list_dma = NULL;
9537         ioa_cfg->ipr_cmd_pool = NULL;
9538 }
9539
9540 /**
9541  * ipr_free_mem - Frees memory allocated for an adapter
9542  * @ioa_cfg:    ioa cfg struct
9543  *
9544  * Return value:
9545  *      nothing
9546  **/
9547 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9548 {
9549         int i;
9550
9551         kfree(ioa_cfg->res_entries);
9552         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9553                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9554         ipr_free_cmd_blks(ioa_cfg);
9555
9556         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9557                 dma_free_coherent(&ioa_cfg->pdev->dev,
9558                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9559                                   ioa_cfg->hrrq[i].host_rrq,
9560                                   ioa_cfg->hrrq[i].host_rrq_dma);
9561
9562         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9563                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9564
9565         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9566                 dma_free_coherent(&ioa_cfg->pdev->dev,
9567                                   sizeof(struct ipr_hostrcb),
9568                                   ioa_cfg->hostrcb[i],
9569                                   ioa_cfg->hostrcb_dma[i]);
9570         }
9571
9572         ipr_free_dump(ioa_cfg);
9573         kfree(ioa_cfg->trace);
9574 }
9575
9576 /**
9577  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9578  * @ioa_cfg:    ipr cfg struct
9579  *
9580  * This function frees all allocated IRQs for the
9581  * specified adapter.
9582  *
9583  * Return value:
9584  *      none
9585  **/
9586 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9587 {
9588         struct pci_dev *pdev = ioa_cfg->pdev;
9589         int i;
9590
9591         for (i = 0; i < ioa_cfg->nvectors; i++)
9592                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9593         pci_free_irq_vectors(pdev);
9594 }
9595
9596 /**
9597  * ipr_free_all_resources - Free all allocated resources for an adapter.
9598  * @ipr_cmd:    ipr command struct
9599  *
9600  * This function frees all allocated resources for the
9601  * specified adapter.
9602  *
9603  * Return value:
9604  *      none
9605  **/
9606 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9607 {
9608         struct pci_dev *pdev = ioa_cfg->pdev;
9609
9610         ENTER;
9611         ipr_free_irqs(ioa_cfg);
9612         if (ioa_cfg->reset_work_q)
9613                 destroy_workqueue(ioa_cfg->reset_work_q);
9614         iounmap(ioa_cfg->hdw_dma_regs);
9615         pci_release_regions(pdev);
9616         ipr_free_mem(ioa_cfg);
9617         scsi_host_put(ioa_cfg->host);
9618         pci_disable_device(pdev);
9619         LEAVE;
9620 }
9621
9622 /**
9623  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9624  * @ioa_cfg:    ioa config struct
9625  *
9626  * Return value:
9627  *      0 on success / -ENOMEM on allocation failure
9628  **/
9629 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9630 {
9631         struct ipr_cmnd *ipr_cmd;
9632         struct ipr_ioarcb *ioarcb;
9633         dma_addr_t dma_addr;
9634         int i, entries_each_hrrq, hrrq_id = 0;
9635
9636         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9637                                                 sizeof(struct ipr_cmnd), 512, 0);
9638
9639         if (!ioa_cfg->ipr_cmd_pool)
9640                 return -ENOMEM;
9641
9642         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9643         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9644
9645         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9646                 ipr_free_cmd_blks(ioa_cfg);
9647                 return -ENOMEM;
9648         }
9649
9650         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9651                 if (ioa_cfg->hrrq_num > 1) {
9652                         if (i == 0) {
9653                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9654                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9655                                 ioa_cfg->hrrq[i].max_cmd_id =
9656                                         (entries_each_hrrq - 1);
9657                         } else {
9658                                 entries_each_hrrq =
9659                                         IPR_NUM_BASE_CMD_BLKS/
9660                                         (ioa_cfg->hrrq_num - 1);
9661                                 ioa_cfg->hrrq[i].min_cmd_id =
9662                                         IPR_NUM_INTERNAL_CMD_BLKS +
9663                                         (i - 1) * entries_each_hrrq;
9664                                 ioa_cfg->hrrq[i].max_cmd_id =
9665                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9666                                         i * entries_each_hrrq - 1);
9667                         }
9668                 } else {
9669                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9670                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9671                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9672                 }
9673                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9674         }
9675
9676         BUG_ON(ioa_cfg->hrrq_num == 0);
9677
9678         i = IPR_NUM_CMD_BLKS -
9679                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9680         if (i > 0) {
9681                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9682                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9683         }
9684
9685         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9686                 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9687                                 GFP_KERNEL, &dma_addr);
9688
9689                 if (!ipr_cmd) {
9690                         ipr_free_cmd_blks(ioa_cfg);
9691                         return -ENOMEM;
9692                 }
9693
9694                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9695                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9696
9697                 ioarcb = &ipr_cmd->ioarcb;
9698                 ipr_cmd->dma_addr = dma_addr;
9699                 if (ioa_cfg->sis64)
9700                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9701                 else
9702                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9703
9704                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9705                 if (ioa_cfg->sis64) {
9706                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9707                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9708                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9709                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9710                 } else {
9711                         ioarcb->write_ioadl_addr =
9712                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9713                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9714                         ioarcb->ioasa_host_pci_addr =
9715                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9716                 }
9717                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9718                 ipr_cmd->cmd_index = i;
9719                 ipr_cmd->ioa_cfg = ioa_cfg;
9720                 ipr_cmd->sense_buffer_dma = dma_addr +
9721                         offsetof(struct ipr_cmnd, sense_buffer);
9722
9723                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9724                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9725                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9726                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9727                         hrrq_id++;
9728         }
9729
9730         return 0;
9731 }
9732
9733 /**
9734  * ipr_alloc_mem - Allocate memory for an adapter
9735  * @ioa_cfg:    ioa config struct
9736  *
9737  * Return value:
9738  *      0 on success / non-zero for error
9739  **/
9740 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9741 {
9742         struct pci_dev *pdev = ioa_cfg->pdev;
9743         int i, rc = -ENOMEM;
9744
9745         ENTER;
9746         ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9747                                        sizeof(struct ipr_resource_entry),
9748                                        GFP_KERNEL);
9749
9750         if (!ioa_cfg->res_entries)
9751                 goto out;
9752
9753         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9754                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9755                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9756         }
9757
9758         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9759                                               sizeof(struct ipr_misc_cbs),
9760                                               &ioa_cfg->vpd_cbs_dma,
9761                                               GFP_KERNEL);
9762
9763         if (!ioa_cfg->vpd_cbs)
9764                 goto out_free_res_entries;
9765
9766         if (ipr_alloc_cmd_blks(ioa_cfg))
9767                 goto out_free_vpd_cbs;
9768
9769         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9770                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9771                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9772                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9773                                         GFP_KERNEL);
9774
9775                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9776                         while (--i >= 0)
9777                                 dma_free_coherent(&pdev->dev,
9778                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9779                                         ioa_cfg->hrrq[i].host_rrq,
9780                                         ioa_cfg->hrrq[i].host_rrq_dma);
9781                         goto out_ipr_free_cmd_blocks;
9782                 }
9783                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9784         }
9785
9786         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9787                                                   ioa_cfg->cfg_table_size,
9788                                                   &ioa_cfg->cfg_table_dma,
9789                                                   GFP_KERNEL);
9790
9791         if (!ioa_cfg->u.cfg_table)
9792                 goto out_free_host_rrq;
9793
9794         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9795                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9796                                                          sizeof(struct ipr_hostrcb),
9797                                                          &ioa_cfg->hostrcb_dma[i],
9798                                                          GFP_KERNEL);
9799
9800                 if (!ioa_cfg->hostrcb[i])
9801                         goto out_free_hostrcb_dma;
9802
9803                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9804                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9805                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9806                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9807         }
9808
9809         ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9810                                  sizeof(struct ipr_trace_entry),
9811                                  GFP_KERNEL);
9812
9813         if (!ioa_cfg->trace)
9814                 goto out_free_hostrcb_dma;
9815
9816         rc = 0;
9817 out:
9818         LEAVE;
9819         return rc;
9820
9821 out_free_hostrcb_dma:
9822         while (i-- > 0) {
9823                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9824                                   ioa_cfg->hostrcb[i],
9825                                   ioa_cfg->hostrcb_dma[i]);
9826         }
9827         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9828                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9829 out_free_host_rrq:
9830         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9831                 dma_free_coherent(&pdev->dev,
9832                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9833                                   ioa_cfg->hrrq[i].host_rrq,
9834                                   ioa_cfg->hrrq[i].host_rrq_dma);
9835         }
9836 out_ipr_free_cmd_blocks:
9837         ipr_free_cmd_blks(ioa_cfg);
9838 out_free_vpd_cbs:
9839         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9840                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9841 out_free_res_entries:
9842         kfree(ioa_cfg->res_entries);
9843         goto out;
9844 }
9845
9846 /**
9847  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9848  * @ioa_cfg:    ioa config struct
9849  *
9850  * Return value:
9851  *      none
9852  **/
9853 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9854 {
9855         int i;
9856
9857         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9858                 ioa_cfg->bus_attr[i].bus = i;
9859                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9860                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9861                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9862                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9863                 else
9864                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9865         }
9866 }
9867
9868 /**
9869  * ipr_init_regs - Initialize IOA registers
9870  * @ioa_cfg:    ioa config struct
9871  *
9872  * Return value:
9873  *      none
9874  **/
9875 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9876 {
9877         const struct ipr_interrupt_offsets *p;
9878         struct ipr_interrupts *t;
9879         void __iomem *base;
9880
9881         p = &ioa_cfg->chip_cfg->regs;
9882         t = &ioa_cfg->regs;
9883         base = ioa_cfg->hdw_dma_regs;
9884
9885         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9886         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9887         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9888         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9889         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9890         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9891         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9892         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9893         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9894         t->ioarrin_reg = base + p->ioarrin_reg;
9895         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9896         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9897         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9898         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9899         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9900         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9901
9902         if (ioa_cfg->sis64) {
9903                 t->init_feedback_reg = base + p->init_feedback_reg;
9904                 t->dump_addr_reg = base + p->dump_addr_reg;
9905                 t->dump_data_reg = base + p->dump_data_reg;
9906                 t->endian_swap_reg = base + p->endian_swap_reg;
9907         }
9908 }
9909
9910 /**
9911  * ipr_init_ioa_cfg - Initialize IOA config struct
9912  * @ioa_cfg:    ioa config struct
9913  * @host:               scsi host struct
9914  * @pdev:               PCI dev struct
9915  *
9916  * Return value:
9917  *      none
9918  **/
9919 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9920                              struct Scsi_Host *host, struct pci_dev *pdev)
9921 {
9922         int i;
9923
9924         ioa_cfg->host = host;
9925         ioa_cfg->pdev = pdev;
9926         ioa_cfg->log_level = ipr_log_level;
9927         ioa_cfg->doorbell = IPR_DOORBELL;
9928         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9929         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9930         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9931         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9932         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9933         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9934
9935         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9936         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9937         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9938         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9939         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9940         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9941         INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9942         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9943         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9944         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9945         ioa_cfg->sdt_state = INACTIVE;
9946
9947         ipr_initialize_bus_attr(ioa_cfg);
9948         ioa_cfg->max_devs_supported = ipr_max_devs;
9949
9950         if (ioa_cfg->sis64) {
9951                 host->max_channel = IPR_MAX_SIS64_BUSES;
9952                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9953                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9954                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9955                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9956                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9957                                            + ((sizeof(struct ipr_config_table_entry64)
9958                                                * ioa_cfg->max_devs_supported)));
9959         } else {
9960                 host->max_channel = IPR_VSET_BUS;
9961                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9962                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9963                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9964                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9965                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9966                                            + ((sizeof(struct ipr_config_table_entry)
9967                                                * ioa_cfg->max_devs_supported)));
9968         }
9969
9970         host->unique_id = host->host_no;
9971         host->max_cmd_len = IPR_MAX_CDB_LEN;
9972         host->can_queue = ioa_cfg->max_cmds;
9973         pci_set_drvdata(pdev, ioa_cfg);
9974
9975         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9976                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9977                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9978                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9979                 if (i == 0)
9980                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9981                 else
9982                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9983         }
9984 }
9985
9986 /**
9987  * ipr_get_chip_info - Find adapter chip information
9988  * @dev_id:             PCI device id struct
9989  *
9990  * Return value:
9991  *      ptr to chip information on success / NULL on failure
9992  **/
9993 static const struct ipr_chip_t *
9994 ipr_get_chip_info(const struct pci_device_id *dev_id)
9995 {
9996         int i;
9997
9998         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9999                 if (ipr_chip[i].vendor == dev_id->vendor &&
10000                     ipr_chip[i].device == dev_id->device)
10001                         return &ipr_chip[i];
10002         return NULL;
10003 }
10004
10005 /**
10006  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10007  *                                              during probe time
10008  * @ioa_cfg:    ioa config struct
10009  *
10010  * Return value:
10011  *      None
10012  **/
10013 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10014 {
10015         struct pci_dev *pdev = ioa_cfg->pdev;
10016
10017         if (pci_channel_offline(pdev)) {
10018                 wait_event_timeout(ioa_cfg->eeh_wait_q,
10019                                    !pci_channel_offline(pdev),
10020                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10021                 pci_restore_state(pdev);
10022         }
10023 }
10024
10025 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10026 {
10027         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10028
10029         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10030                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10031                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10032                 ioa_cfg->vectors_info[vec_idx].
10033                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10034         }
10035 }
10036
10037 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10038                 struct pci_dev *pdev)
10039 {
10040         int i, rc;
10041
10042         for (i = 1; i < ioa_cfg->nvectors; i++) {
10043                 rc = request_irq(pci_irq_vector(pdev, i),
10044                         ipr_isr_mhrrq,
10045                         0,
10046                         ioa_cfg->vectors_info[i].desc,
10047                         &ioa_cfg->hrrq[i]);
10048                 if (rc) {
10049                         while (--i > 0)
10050                                 free_irq(pci_irq_vector(pdev, i),
10051                                         &ioa_cfg->hrrq[i]);
10052                         return rc;
10053                 }
10054         }
10055         return 0;
10056 }
10057
10058 /**
10059  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10060  * @pdev:               PCI device struct
10061  *
10062  * Description: Simply set the msi_received flag to 1 indicating that
10063  * Message Signaled Interrupts are supported.
10064  *
10065  * Return value:
10066  *      0 on success / non-zero on failure
10067  **/
10068 static irqreturn_t ipr_test_intr(int irq, void *devp)
10069 {
10070         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10071         unsigned long lock_flags = 0;
10072         irqreturn_t rc = IRQ_HANDLED;
10073
10074         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10075         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10076
10077         ioa_cfg->msi_received = 1;
10078         wake_up(&ioa_cfg->msi_wait_q);
10079
10080         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10081         return rc;
10082 }
10083
10084 /**
10085  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10086  * @pdev:               PCI device struct
10087  *
10088  * Description: This routine sets up and initiates a test interrupt to determine
10089  * if the interrupt is received via the ipr_test_intr() service routine.
10090  * If the tests fails, the driver will fall back to LSI.
10091  *
10092  * Return value:
10093  *      0 on success / non-zero on failure
10094  **/
10095 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10096 {
10097         int rc;
10098         volatile u32 int_reg;
10099         unsigned long lock_flags = 0;
10100         int irq = pci_irq_vector(pdev, 0);
10101
10102         ENTER;
10103
10104         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10105         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10106         ioa_cfg->msi_received = 0;
10107         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10108         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10109         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10110         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10111
10112         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10113         if (rc) {
10114                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10115                 return rc;
10116         } else if (ipr_debug)
10117                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10118
10119         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10120         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10121         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10122         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10123         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10124
10125         if (!ioa_cfg->msi_received) {
10126                 /* MSI test failed */
10127                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10128                 rc = -EOPNOTSUPP;
10129         } else if (ipr_debug)
10130                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10131
10132         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10133
10134         free_irq(irq, ioa_cfg);
10135
10136         LEAVE;
10137
10138         return rc;
10139 }
10140
10141  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10142  * @pdev:               PCI device struct
10143  * @dev_id:             PCI device id struct
10144  *
10145  * Return value:
10146  *      0 on success / non-zero on failure
10147  **/
10148 static int ipr_probe_ioa(struct pci_dev *pdev,
10149                          const struct pci_device_id *dev_id)
10150 {
10151         struct ipr_ioa_cfg *ioa_cfg;
10152         struct Scsi_Host *host;
10153         unsigned long ipr_regs_pci;
10154         void __iomem *ipr_regs;
10155         int rc = PCIBIOS_SUCCESSFUL;
10156         volatile u32 mask, uproc, interrupts;
10157         unsigned long lock_flags, driver_lock_flags;
10158         unsigned int irq_flag;
10159
10160         ENTER;
10161
10162         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10163         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10164
10165         if (!host) {
10166                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10167                 rc = -ENOMEM;
10168                 goto out;
10169         }
10170
10171         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10172         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10173         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10174
10175         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10176
10177         if (!ioa_cfg->ipr_chip) {
10178                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10179                         dev_id->vendor, dev_id->device);
10180                 goto out_scsi_host_put;
10181         }
10182
10183         /* set SIS 32 or SIS 64 */
10184         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10185         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10186         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10187         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10188
10189         if (ipr_transop_timeout)
10190                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10191         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10192                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10193         else
10194                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10195
10196         ioa_cfg->revid = pdev->revision;
10197
10198         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10199
10200         ipr_regs_pci = pci_resource_start(pdev, 0);
10201
10202         rc = pci_request_regions(pdev, IPR_NAME);
10203         if (rc < 0) {
10204                 dev_err(&pdev->dev,
10205                         "Couldn't register memory range of registers\n");
10206                 goto out_scsi_host_put;
10207         }
10208
10209         rc = pci_enable_device(pdev);
10210
10211         if (rc || pci_channel_offline(pdev)) {
10212                 if (pci_channel_offline(pdev)) {
10213                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10214                         rc = pci_enable_device(pdev);
10215                 }
10216
10217                 if (rc) {
10218                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10219                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10220                         goto out_release_regions;
10221                 }
10222         }
10223
10224         ipr_regs = pci_ioremap_bar(pdev, 0);
10225
10226         if (!ipr_regs) {
10227                 dev_err(&pdev->dev,
10228                         "Couldn't map memory range of registers\n");
10229                 rc = -ENOMEM;
10230                 goto out_disable;
10231         }
10232
10233         ioa_cfg->hdw_dma_regs = ipr_regs;
10234         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10235         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10236
10237         ipr_init_regs(ioa_cfg);
10238
10239         if (ioa_cfg->sis64) {
10240                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10241                 if (rc < 0) {
10242                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10243                         rc = dma_set_mask_and_coherent(&pdev->dev,
10244                                                        DMA_BIT_MASK(32));
10245                 }
10246         } else
10247                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10248
10249         if (rc < 0) {
10250                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10251                 goto cleanup_nomem;
10252         }
10253
10254         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10255                                    ioa_cfg->chip_cfg->cache_line_size);
10256
10257         if (rc != PCIBIOS_SUCCESSFUL) {
10258                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10259                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10260                 rc = -EIO;
10261                 goto cleanup_nomem;
10262         }
10263
10264         /* Issue MMIO read to ensure card is not in EEH */
10265         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10266         ipr_wait_for_pci_err_recovery(ioa_cfg);
10267
10268         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10269                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10270                         IPR_MAX_MSIX_VECTORS);
10271                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10272         }
10273
10274         irq_flag = PCI_IRQ_LEGACY;
10275         if (ioa_cfg->ipr_chip->has_msi)
10276                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10277         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10278         if (rc < 0) {
10279                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10280                 goto cleanup_nomem;
10281         }
10282         ioa_cfg->nvectors = rc;
10283
10284         if (!pdev->msi_enabled && !pdev->msix_enabled)
10285                 ioa_cfg->clear_isr = 1;
10286
10287         pci_set_master(pdev);
10288
10289         if (pci_channel_offline(pdev)) {
10290                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10291                 pci_set_master(pdev);
10292                 if (pci_channel_offline(pdev)) {
10293                         rc = -EIO;
10294                         goto out_msi_disable;
10295                 }
10296         }
10297
10298         if (pdev->msi_enabled || pdev->msix_enabled) {
10299                 rc = ipr_test_msi(ioa_cfg, pdev);
10300                 switch (rc) {
10301                 case 0:
10302                         dev_info(&pdev->dev,
10303                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10304                                 pdev->msix_enabled ? "-X" : "");
10305                         break;
10306                 case -EOPNOTSUPP:
10307                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10308                         pci_free_irq_vectors(pdev);
10309
10310                         ioa_cfg->nvectors = 1;
10311                         ioa_cfg->clear_isr = 1;
10312                         break;
10313                 default:
10314                         goto out_msi_disable;
10315                 }
10316         }
10317
10318         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10319                                 (unsigned int)num_online_cpus(),
10320                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10321
10322         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10323                 goto out_msi_disable;
10324
10325         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10326                 goto out_msi_disable;
10327
10328         rc = ipr_alloc_mem(ioa_cfg);
10329         if (rc < 0) {
10330                 dev_err(&pdev->dev,
10331                         "Couldn't allocate enough memory for device driver!\n");
10332                 goto out_msi_disable;
10333         }
10334
10335         /* Save away PCI config space for use following IOA reset */
10336         rc = pci_save_state(pdev);
10337
10338         if (rc != PCIBIOS_SUCCESSFUL) {
10339                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10340                 rc = -EIO;
10341                 goto cleanup_nolog;
10342         }
10343
10344         /*
10345          * If HRRQ updated interrupt is not masked, or reset alert is set,
10346          * the card is in an unknown state and needs a hard reset
10347          */
10348         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10349         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10350         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10351         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10352                 ioa_cfg->needs_hard_reset = 1;
10353         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10354                 ioa_cfg->needs_hard_reset = 1;
10355         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10356                 ioa_cfg->ioa_unit_checked = 1;
10357
10358         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10359         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10360         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10361
10362         if (pdev->msi_enabled || pdev->msix_enabled) {
10363                 name_msi_vectors(ioa_cfg);
10364                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10365                         ioa_cfg->vectors_info[0].desc,
10366                         &ioa_cfg->hrrq[0]);
10367                 if (!rc)
10368                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10369         } else {
10370                 rc = request_irq(pdev->irq, ipr_isr,
10371                          IRQF_SHARED,
10372                          IPR_NAME, &ioa_cfg->hrrq[0]);
10373         }
10374         if (rc) {
10375                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10376                         pdev->irq, rc);
10377                 goto cleanup_nolog;
10378         }
10379
10380         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10381             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10382                 ioa_cfg->needs_warm_reset = 1;
10383                 ioa_cfg->reset = ipr_reset_slot_reset;
10384
10385                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10386                                                                 WQ_MEM_RECLAIM, host->host_no);
10387
10388                 if (!ioa_cfg->reset_work_q) {
10389                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10390                         rc = -ENOMEM;
10391                         goto out_free_irq;
10392                 }
10393         } else
10394                 ioa_cfg->reset = ipr_reset_start_bist;
10395
10396         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10397         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10398         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10399
10400         LEAVE;
10401 out:
10402         return rc;
10403
10404 out_free_irq:
10405         ipr_free_irqs(ioa_cfg);
10406 cleanup_nolog:
10407         ipr_free_mem(ioa_cfg);
10408 out_msi_disable:
10409         ipr_wait_for_pci_err_recovery(ioa_cfg);
10410         pci_free_irq_vectors(pdev);
10411 cleanup_nomem:
10412         iounmap(ipr_regs);
10413 out_disable:
10414         pci_disable_device(pdev);
10415 out_release_regions:
10416         pci_release_regions(pdev);
10417 out_scsi_host_put:
10418         scsi_host_put(host);
10419         goto out;
10420 }
10421
10422 /**
10423  * ipr_initiate_ioa_bringdown - Bring down an adapter
10424  * @ioa_cfg:            ioa config struct
10425  * @shutdown_type:      shutdown type
10426  *
10427  * Description: This function will initiate bringing down the adapter.
10428  * This consists of issuing an IOA shutdown to the adapter
10429  * to flush the cache, and running BIST.
10430  * If the caller needs to wait on the completion of the reset,
10431  * the caller must sleep on the reset_wait_q.
10432  *
10433  * Return value:
10434  *      none
10435  **/
10436 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10437                                        enum ipr_shutdown_type shutdown_type)
10438 {
10439         ENTER;
10440         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10441                 ioa_cfg->sdt_state = ABORT_DUMP;
10442         ioa_cfg->reset_retries = 0;
10443         ioa_cfg->in_ioa_bringdown = 1;
10444         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10445         LEAVE;
10446 }
10447
10448 /**
10449  * __ipr_remove - Remove a single adapter
10450  * @pdev:       pci device struct
10451  *
10452  * Adapter hot plug remove entry point.
10453  *
10454  * Return value:
10455  *      none
10456  **/
10457 static void __ipr_remove(struct pci_dev *pdev)
10458 {
10459         unsigned long host_lock_flags = 0;
10460         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10461         int i;
10462         unsigned long driver_lock_flags;
10463         ENTER;
10464
10465         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10466         while (ioa_cfg->in_reset_reload) {
10467                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10468                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10469                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10470         }
10471
10472         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10473                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10474                 ioa_cfg->hrrq[i].removing_ioa = 1;
10475                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10476         }
10477         wmb();
10478         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10479
10480         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10481         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10482         flush_work(&ioa_cfg->work_q);
10483         if (ioa_cfg->reset_work_q)
10484                 flush_workqueue(ioa_cfg->reset_work_q);
10485         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10486         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10487
10488         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10489         list_del(&ioa_cfg->queue);
10490         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10491
10492         if (ioa_cfg->sdt_state == ABORT_DUMP)
10493                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10494         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10495
10496         ipr_free_all_resources(ioa_cfg);
10497
10498         LEAVE;
10499 }
10500
10501 /**
10502  * ipr_remove - IOA hot plug remove entry point
10503  * @pdev:       pci device struct
10504  *
10505  * Adapter hot plug remove entry point.
10506  *
10507  * Return value:
10508  *      none
10509  **/
10510 static void ipr_remove(struct pci_dev *pdev)
10511 {
10512         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10513
10514         ENTER;
10515
10516         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10517                               &ipr_trace_attr);
10518         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10519                              &ipr_dump_attr);
10520         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10521                         &ipr_ioa_async_err_log);
10522         scsi_remove_host(ioa_cfg->host);
10523
10524         __ipr_remove(pdev);
10525
10526         LEAVE;
10527 }
10528
10529 /**
10530  * ipr_probe - Adapter hot plug add entry point
10531  *
10532  * Return value:
10533  *      0 on success / non-zero on failure
10534  **/
10535 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10536 {
10537         struct ipr_ioa_cfg *ioa_cfg;
10538         unsigned long flags;
10539         int rc, i;
10540
10541         rc = ipr_probe_ioa(pdev, dev_id);
10542
10543         if (rc)
10544                 return rc;
10545
10546         ioa_cfg = pci_get_drvdata(pdev);
10547         rc = ipr_probe_ioa_part2(ioa_cfg);
10548
10549         if (rc) {
10550                 __ipr_remove(pdev);
10551                 return rc;
10552         }
10553
10554         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10555
10556         if (rc) {
10557                 __ipr_remove(pdev);
10558                 return rc;
10559         }
10560
10561         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10562                                    &ipr_trace_attr);
10563
10564         if (rc) {
10565                 scsi_remove_host(ioa_cfg->host);
10566                 __ipr_remove(pdev);
10567                 return rc;
10568         }
10569
10570         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10571                         &ipr_ioa_async_err_log);
10572
10573         if (rc) {
10574                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10575                                 &ipr_dump_attr);
10576                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10577                                 &ipr_trace_attr);
10578                 scsi_remove_host(ioa_cfg->host);
10579                 __ipr_remove(pdev);
10580                 return rc;
10581         }
10582
10583         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10584                                    &ipr_dump_attr);
10585
10586         if (rc) {
10587                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10588                                       &ipr_ioa_async_err_log);
10589                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10590                                       &ipr_trace_attr);
10591                 scsi_remove_host(ioa_cfg->host);
10592                 __ipr_remove(pdev);
10593                 return rc;
10594         }
10595         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10596         ioa_cfg->scan_enabled = 1;
10597         schedule_work(&ioa_cfg->work_q);
10598         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10599
10600         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10601
10602         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10603                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10604                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10605                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10606                 }
10607         }
10608
10609         scsi_scan_host(ioa_cfg->host);
10610
10611         return 0;
10612 }
10613
10614 /**
10615  * ipr_shutdown - Shutdown handler.
10616  * @pdev:       pci device struct
10617  *
10618  * This function is invoked upon system shutdown/reboot. It will issue
10619  * an adapter shutdown to the adapter to flush the write cache.
10620  *
10621  * Return value:
10622  *      none
10623  **/
10624 static void ipr_shutdown(struct pci_dev *pdev)
10625 {
10626         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10627         unsigned long lock_flags = 0;
10628         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10629         int i;
10630
10631         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10632         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10633                 ioa_cfg->iopoll_weight = 0;
10634                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10635                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10636         }
10637
10638         while (ioa_cfg->in_reset_reload) {
10639                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10640                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10641                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10642         }
10643
10644         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10645                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10646
10647         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10648         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10649         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10650         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10651                 ipr_free_irqs(ioa_cfg);
10652                 pci_disable_device(ioa_cfg->pdev);
10653         }
10654 }
10655
10656 static struct pci_device_id ipr_pci_table[] = {
10657         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10658                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10659         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10660                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10661         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10662                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10663         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10664                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10665         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10666                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10667         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10668                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10669         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10670                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10671         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10672                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10673                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10674         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10675               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10676         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10677               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10678               IPR_USE_LONG_TRANSOP_TIMEOUT },
10679         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10680               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10681               IPR_USE_LONG_TRANSOP_TIMEOUT },
10682         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10683               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10684         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10685               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10686               IPR_USE_LONG_TRANSOP_TIMEOUT},
10687         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10688               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10689               IPR_USE_LONG_TRANSOP_TIMEOUT },
10690         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10691               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10692               IPR_USE_LONG_TRANSOP_TIMEOUT },
10693         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10694               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10695         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10696               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10697         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10698               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10699               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10700         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10701                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10702         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10703                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10704         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10705                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10706                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10707         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10708                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10709                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10710         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10711                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10712         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10713                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10714         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10715                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10716         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10717                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10718         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10719                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10720         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10721                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10722         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10723                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10724         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10726         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10727                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10728         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10729                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10730         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10731                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10732         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10733                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10734         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10735                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10736         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10737                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10738         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10739                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10740         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10741                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10742         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10743                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10744         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10745                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10746         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10747                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10748         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10749                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10750         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10751                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10752         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10753                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10754         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10755                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10756         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10757                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10758         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10759                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10760         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10761                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10762         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10763                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10764         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10765                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10766         { }
10767 };
10768 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10769
10770 static const struct pci_error_handlers ipr_err_handler = {
10771         .error_detected = ipr_pci_error_detected,
10772         .mmio_enabled = ipr_pci_mmio_enabled,
10773         .slot_reset = ipr_pci_slot_reset,
10774 };
10775
10776 static struct pci_driver ipr_driver = {
10777         .name = IPR_NAME,
10778         .id_table = ipr_pci_table,
10779         .probe = ipr_probe,
10780         .remove = ipr_remove,
10781         .shutdown = ipr_shutdown,
10782         .err_handler = &ipr_err_handler,
10783 };
10784
10785 /**
10786  * ipr_halt_done - Shutdown prepare completion
10787  *
10788  * Return value:
10789  *      none
10790  **/
10791 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10792 {
10793         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10794 }
10795
10796 /**
10797  * ipr_halt - Issue shutdown prepare to all adapters
10798  *
10799  * Return value:
10800  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10801  **/
10802 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10803 {
10804         struct ipr_cmnd *ipr_cmd;
10805         struct ipr_ioa_cfg *ioa_cfg;
10806         unsigned long flags = 0, driver_lock_flags;
10807
10808         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10809                 return NOTIFY_DONE;
10810
10811         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10812
10813         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10814                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10815                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10816                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10817                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10818                         continue;
10819                 }
10820
10821                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10822                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10823                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10824                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10825                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10826
10827                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10828                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10829         }
10830         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10831
10832         return NOTIFY_OK;
10833 }
10834
10835 static struct notifier_block ipr_notifier = {
10836         ipr_halt, NULL, 0
10837 };
10838
10839 /**
10840  * ipr_init - Module entry point
10841  *
10842  * Return value:
10843  *      0 on success / negative value on failure
10844  **/
10845 static int __init ipr_init(void)
10846 {
10847         int rc;
10848
10849         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10850                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10851
10852         register_reboot_notifier(&ipr_notifier);
10853         rc = pci_register_driver(&ipr_driver);
10854         if (rc) {
10855                 unregister_reboot_notifier(&ipr_notifier);
10856                 return rc;
10857         }
10858
10859         return 0;
10860 }
10861
10862 /**
10863  * ipr_exit - Module unload
10864  *
10865  * Module unload entry point.
10866  *
10867  * Return value:
10868  *      none
10869  **/
10870 static void __exit ipr_exit(void)
10871 {
10872         unregister_reboot_notifier(&ipr_notifier);
10873         pci_unregister_driver(&ipr_driver);
10874 }
10875
10876 module_init(ipr_init);
10877 module_exit(ipr_exit);