GNU Linux-libre 4.9.315-gnu1
[releases.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
439         "3140: Device bus not ready to ready transition"},
440         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "FFFB: SCSI bus was reset"},
442         {0x06290500, 0, 0,
443         "FFFE: SCSI bus transition to single ended"},
444         {0x06290600, 0, 0,
445         "FFFE: SCSI bus transition to LVD"},
446         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
447         "FFFB: SCSI bus was reset by another initiator"},
448         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
449         "3029: A device replacement has occurred"},
450         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "4102: Device bus fabric performance degradation"},
452         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9051: IOA cache data exists for a missing or failed device"},
454         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
456         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9025: Disk unit is not supported at its physical location"},
458         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
459         "3020: IOA detected a SCSI bus configuration error"},
460         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3150: SCSI bus configuration error"},
462         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
463         "9074: Asymmetric advanced function disk configuration"},
464         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
465         "4040: Incomplete multipath connection between IOA and enclosure"},
466         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4041: Incomplete multipath connection between enclosure and device"},
468         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
469         "9075: Incomplete multipath connection between IOA and remote IOA"},
470         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9076: Configuration error, missing remote IOA"},
472         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4050: Enclosure does not support a required multipath function"},
474         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4121: Configuration error, required cable is missing"},
476         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4122: Cable is not plugged into the correct location on remote IOA"},
478         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4123: Configuration error, invalid cable vital product data"},
480         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4124: Configuration error, both cable ends are plugged into the same IOA"},
482         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4070: Logically bad block written on device"},
484         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9041: Array protection temporarily suspended"},
486         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9042: Corrupt array parity detected on specified device"},
488         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9030: Array no longer protected due to missing or failed disk unit"},
490         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9071: Link operational transition"},
492         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9072: Link not operational transition"},
494         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9032: Array exposed but still protected"},
496         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
497         "70DD: Device forced failed by disrupt device command"},
498         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
499         "4061: Multipath redundancy level got better"},
500         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4060: Multipath redundancy level got worse"},
502         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
503         "9083: Device raw mode enabled"},
504         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
505         "9084: Device raw mode disabled"},
506         {0x07270000, 0, 0,
507         "Failure due to other device"},
508         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9008: IOA does not support functions expected by devices"},
510         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9010: Cache data associated with attached devices cannot be found"},
512         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9011: Cache data belongs to devices other than those attached"},
514         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9020: Array missing 2 or more devices with only 1 device present"},
516         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9021: Array missing 2 or more devices with 2 or more devices present"},
518         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9022: Exposed array is missing a required device"},
520         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9023: Array member(s) not at required physical locations"},
522         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9024: Array not functional due to present hardware configuration"},
524         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9026: Array not functional due to present hardware configuration"},
526         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9027: Array is missing a device and parity is out of sync"},
528         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9028: Maximum number of arrays already exist"},
530         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9050: Required cache data cannot be located for a disk unit"},
532         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9052: Cache data exists for a device that has been modified"},
534         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9054: IOA resources not available due to previous problems"},
536         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9092: Disk unit requires initialization before use"},
538         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9029: Incorrect hardware configuration change has been detected"},
540         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9060: One or more disk pairs are missing from an array"},
542         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9061: One or more disks are missing from an array"},
544         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9062: One or more disks are missing from an array"},
546         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9063: Maximum number of functional arrays has been exceeded"},
548         {0x07279A00, 0, 0,
549         "Data protect, other volume set problem"},
550         {0x0B260000, 0, 0,
551         "Aborted command, invalid descriptor"},
552         {0x0B3F9000, 0, 0,
553         "Target operating conditions have changed, dual adapter takeover"},
554         {0x0B530200, 0, 0,
555         "Aborted command, medium removal prevented"},
556         {0x0B5A0000, 0, 0,
557         "Command terminated by host"},
558         {0x0B5B8000, 0, 0,
559         "Aborted command, command terminated by host"}
560 };
561
562 static const struct ipr_ses_table_entry ipr_ses_table[] = {
563         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
564         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
565         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
570         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
571         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
574         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
576 };
577
578 /*
579  *  Function Prototypes
580  */
581 static int ipr_reset_alert(struct ipr_cmnd *);
582 static void ipr_process_ccn(struct ipr_cmnd *);
583 static void ipr_process_error(struct ipr_cmnd *);
584 static void ipr_reset_ioa_job(struct ipr_cmnd *);
585 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586                                    enum ipr_shutdown_type);
587
588 #ifdef CONFIG_SCSI_IPR_TRACE
589 /**
590  * ipr_trc_hook - Add a trace entry to the driver trace
591  * @ipr_cmd:    ipr command struct
592  * @type:               trace type
593  * @add_data:   additional data
594  *
595  * Return value:
596  *      none
597  **/
598 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599                          u8 type, u32 add_data)
600 {
601         struct ipr_trace_entry *trace_entry;
602         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
603         unsigned int trace_index;
604
605         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606         trace_entry = &ioa_cfg->trace[trace_index];
607         trace_entry->time = jiffies;
608         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609         trace_entry->type = type;
610         if (ipr_cmd->ioa_cfg->sis64)
611                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
612         else
613                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
614         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
615         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616         trace_entry->u.add_data = add_data;
617         wmb();
618 }
619 #else
620 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
621 #endif
622
623 /**
624  * ipr_lock_and_done - Acquire lock and complete command
625  * @ipr_cmd:    ipr command struct
626  *
627  * Return value:
628  *      none
629  **/
630 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
631 {
632         unsigned long lock_flags;
633         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
634
635         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636         ipr_cmd->done(ipr_cmd);
637         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
638 }
639
640 /**
641  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642  * @ipr_cmd:    ipr command struct
643  *
644  * Return value:
645  *      none
646  **/
647 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
648 {
649         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
650         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
652         dma_addr_t dma_addr = ipr_cmd->dma_addr;
653         int hrrq_id;
654
655         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
656         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
657         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
658         ioarcb->data_transfer_length = 0;
659         ioarcb->read_data_transfer_length = 0;
660         ioarcb->ioadl_len = 0;
661         ioarcb->read_ioadl_len = 0;
662
663         if (ipr_cmd->ioa_cfg->sis64) {
664                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
666                 ioasa64->u.gata.status = 0;
667         } else {
668                 ioarcb->write_ioadl_addr =
669                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
671                 ioasa->u.gata.status = 0;
672         }
673
674         ioasa->hdr.ioasc = 0;
675         ioasa->hdr.residual_data_len = 0;
676         ipr_cmd->scsi_cmd = NULL;
677         ipr_cmd->qc = NULL;
678         ipr_cmd->sense_buffer[0] = 0;
679         ipr_cmd->dma_use_sg = 0;
680 }
681
682 /**
683  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684  * @ipr_cmd:    ipr command struct
685  *
686  * Return value:
687  *      none
688  **/
689 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690                               void (*fast_done) (struct ipr_cmnd *))
691 {
692         ipr_reinit_ipr_cmnd(ipr_cmd);
693         ipr_cmd->u.scratch = 0;
694         ipr_cmd->sibling = NULL;
695         ipr_cmd->eh_comp = NULL;
696         ipr_cmd->fast_done = fast_done;
697         init_timer(&ipr_cmd->timer);
698 }
699
700 /**
701  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
702  * @ioa_cfg:    ioa config struct
703  *
704  * Return value:
705  *      pointer to ipr command struct
706  **/
707 static
708 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
709 {
710         struct ipr_cmnd *ipr_cmd = NULL;
711
712         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714                         struct ipr_cmnd, queue);
715                 list_del(&ipr_cmd->queue);
716         }
717
718
719         return ipr_cmd;
720 }
721
722 /**
723  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724  * @ioa_cfg:    ioa config struct
725  *
726  * Return value:
727  *      pointer to ipr command struct
728  **/
729 static
730 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
731 {
732         struct ipr_cmnd *ipr_cmd =
733                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
734         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
735         return ipr_cmd;
736 }
737
738 /**
739  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740  * @ioa_cfg:    ioa config struct
741  * @clr_ints:     interrupts to clear
742  *
743  * This function masks all interrupts on the adapter, then clears the
744  * interrupts specified in the mask
745  *
746  * Return value:
747  *      none
748  **/
749 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
750                                           u32 clr_ints)
751 {
752         volatile u32 int_reg;
753         int i;
754
755         /* Stop new interrupts */
756         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757                 spin_lock(&ioa_cfg->hrrq[i]._lock);
758                 ioa_cfg->hrrq[i].allow_interrupts = 0;
759                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
760         }
761         wmb();
762
763         /* Set interrupt mask to stop all new interrupts */
764         if (ioa_cfg->sis64)
765                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766         else
767                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768
769         /* Clear any pending interrupts */
770         if (ioa_cfg->sis64)
771                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
773         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
774 }
775
776 /**
777  * ipr_save_pcix_cmd_reg - Save PCI-X command register
778  * @ioa_cfg:    ioa config struct
779  *
780  * Return value:
781  *      0 on success / -EIO on failure
782  **/
783 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
784 {
785         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
786
787         if (pcix_cmd_reg == 0)
788                 return 0;
789
790         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
793                 return -EIO;
794         }
795
796         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
797         return 0;
798 }
799
800 /**
801  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802  * @ioa_cfg:    ioa config struct
803  *
804  * Return value:
805  *      0 on success / -EIO on failure
806  **/
807 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
808 {
809         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
810
811         if (pcix_cmd_reg) {
812                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
815                         return -EIO;
816                 }
817         }
818
819         return 0;
820 }
821
822 /**
823  * ipr_sata_eh_done - done function for aborted SATA commands
824  * @ipr_cmd:    ipr command struct
825  *
826  * This function is invoked for ops generated to SATA
827  * devices which are being aborted.
828  *
829  * Return value:
830  *      none
831  **/
832 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
833 {
834         struct ata_queued_cmd *qc = ipr_cmd->qc;
835         struct ipr_sata_port *sata_port = qc->ap->private_data;
836
837         qc->err_mask |= AC_ERR_OTHER;
838         sata_port->ioasa.status |= ATA_BUSY;
839         ata_qc_complete(qc);
840         if (ipr_cmd->eh_comp)
841                 complete(ipr_cmd->eh_comp);
842         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
843 }
844
845 /**
846  * ipr_scsi_eh_done - mid-layer done function for aborted ops
847  * @ipr_cmd:    ipr command struct
848  *
849  * This function is invoked by the interrupt handler for
850  * ops generated by the SCSI mid-layer which are being aborted.
851  *
852  * Return value:
853  *      none
854  **/
855 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
856 {
857         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
858
859         scsi_cmd->result |= (DID_ERROR << 16);
860
861         scsi_dma_unmap(ipr_cmd->scsi_cmd);
862         scsi_cmd->scsi_done(scsi_cmd);
863         if (ipr_cmd->eh_comp)
864                 complete(ipr_cmd->eh_comp);
865         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
866 }
867
868 /**
869  * ipr_fail_all_ops - Fails all outstanding ops.
870  * @ioa_cfg:    ioa config struct
871  *
872  * This function fails all outstanding ops.
873  *
874  * Return value:
875  *      none
876  **/
877 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
878 {
879         struct ipr_cmnd *ipr_cmd, *temp;
880         struct ipr_hrr_queue *hrrq;
881
882         ENTER;
883         for_each_hrrq(hrrq, ioa_cfg) {
884                 spin_lock(&hrrq->_lock);
885                 list_for_each_entry_safe(ipr_cmd,
886                                         temp, &hrrq->hrrq_pending_q, queue) {
887                         list_del(&ipr_cmd->queue);
888
889                         ipr_cmd->s.ioasa.hdr.ioasc =
890                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
891                         ipr_cmd->s.ioasa.hdr.ilid =
892                                 cpu_to_be32(IPR_DRIVER_ILID);
893
894                         if (ipr_cmd->scsi_cmd)
895                                 ipr_cmd->done = ipr_scsi_eh_done;
896                         else if (ipr_cmd->qc)
897                                 ipr_cmd->done = ipr_sata_eh_done;
898
899                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
900                                      IPR_IOASC_IOA_WAS_RESET);
901                         del_timer(&ipr_cmd->timer);
902                         ipr_cmd->done(ipr_cmd);
903                 }
904                 spin_unlock(&hrrq->_lock);
905         }
906         LEAVE;
907 }
908
909 /**
910  * ipr_send_command -  Send driver initiated requests.
911  * @ipr_cmd:            ipr command struct
912  *
913  * This function sends a command to the adapter using the correct write call.
914  * In the case of sis64, calculate the ioarcb size required. Then or in the
915  * appropriate bits.
916  *
917  * Return value:
918  *      none
919  **/
920 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
921 {
922         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
923         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
924
925         if (ioa_cfg->sis64) {
926                 /* The default size is 256 bytes */
927                 send_dma_addr |= 0x1;
928
929                 /* If the number of ioadls * size of ioadl > 128 bytes,
930                    then use a 512 byte ioarcb */
931                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
932                         send_dma_addr |= 0x4;
933                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
934         } else
935                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
936 }
937
938 /**
939  * ipr_do_req -  Send driver initiated requests.
940  * @ipr_cmd:            ipr command struct
941  * @done:                       done function
942  * @timeout_func:       timeout function
943  * @timeout:            timeout value
944  *
945  * This function sends the specified command to the adapter with the
946  * timeout given. The done function is invoked on command completion.
947  *
948  * Return value:
949  *      none
950  **/
951 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
952                        void (*done) (struct ipr_cmnd *),
953                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
954 {
955         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
956
957         ipr_cmd->done = done;
958
959         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
960         ipr_cmd->timer.expires = jiffies + timeout;
961         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
962
963         add_timer(&ipr_cmd->timer);
964
965         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
966
967         ipr_send_command(ipr_cmd);
968 }
969
970 /**
971  * ipr_internal_cmd_done - Op done function for an internally generated op.
972  * @ipr_cmd:    ipr command struct
973  *
974  * This function is the op done function for an internally generated,
975  * blocking op. It simply wakes the sleeping thread.
976  *
977  * Return value:
978  *      none
979  **/
980 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
981 {
982         if (ipr_cmd->sibling)
983                 ipr_cmd->sibling = NULL;
984         else
985                 complete(&ipr_cmd->completion);
986 }
987
988 /**
989  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
990  * @ipr_cmd:    ipr command struct
991  * @dma_addr:   dma address
992  * @len:        transfer length
993  * @flags:      ioadl flag value
994  *
995  * This function initializes an ioadl in the case where there is only a single
996  * descriptor.
997  *
998  * Return value:
999  *      nothing
1000  **/
1001 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1002                            u32 len, int flags)
1003 {
1004         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1005         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1006
1007         ipr_cmd->dma_use_sg = 1;
1008
1009         if (ipr_cmd->ioa_cfg->sis64) {
1010                 ioadl64->flags = cpu_to_be32(flags);
1011                 ioadl64->data_len = cpu_to_be32(len);
1012                 ioadl64->address = cpu_to_be64(dma_addr);
1013
1014                 ipr_cmd->ioarcb.ioadl_len =
1015                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1016                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1017         } else {
1018                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1019                 ioadl->address = cpu_to_be32(dma_addr);
1020
1021                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1022                         ipr_cmd->ioarcb.read_ioadl_len =
1023                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1024                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1025                 } else {
1026                         ipr_cmd->ioarcb.ioadl_len =
1027                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1028                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1029                 }
1030         }
1031 }
1032
1033 /**
1034  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1035  * @ipr_cmd:    ipr command struct
1036  * @timeout_func:       function to invoke if command times out
1037  * @timeout:    timeout
1038  *
1039  * Return value:
1040  *      none
1041  **/
1042 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1043                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1044                                   u32 timeout)
1045 {
1046         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1047
1048         init_completion(&ipr_cmd->completion);
1049         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1050
1051         spin_unlock_irq(ioa_cfg->host->host_lock);
1052         wait_for_completion(&ipr_cmd->completion);
1053         spin_lock_irq(ioa_cfg->host->host_lock);
1054 }
1055
1056 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1057 {
1058         unsigned int hrrq;
1059
1060         if (ioa_cfg->hrrq_num == 1)
1061                 hrrq = 0;
1062         else {
1063                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1064                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1065         }
1066         return hrrq;
1067 }
1068
1069 /**
1070  * ipr_send_hcam - Send an HCAM to the adapter.
1071  * @ioa_cfg:    ioa config struct
1072  * @type:               HCAM type
1073  * @hostrcb:    hostrcb struct
1074  *
1075  * This function will send a Host Controlled Async command to the adapter.
1076  * If HCAMs are currently not allowed to be issued to the adapter, it will
1077  * place the hostrcb on the free queue.
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1083                           struct ipr_hostrcb *hostrcb)
1084 {
1085         struct ipr_cmnd *ipr_cmd;
1086         struct ipr_ioarcb *ioarcb;
1087
1088         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1089                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1090                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1091                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1092
1093                 ipr_cmd->u.hostrcb = hostrcb;
1094                 ioarcb = &ipr_cmd->ioarcb;
1095
1096                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1097                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1098                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1099                 ioarcb->cmd_pkt.cdb[1] = type;
1100                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1101                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1102
1103                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1104                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1105
1106                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1107                         ipr_cmd->done = ipr_process_ccn;
1108                 else
1109                         ipr_cmd->done = ipr_process_error;
1110
1111                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1112
1113                 ipr_send_command(ipr_cmd);
1114         } else {
1115                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1116         }
1117 }
1118
1119 /**
1120  * ipr_update_ata_class - Update the ata class in the resource entry
1121  * @res:        resource entry struct
1122  * @proto:      cfgte device bus protocol value
1123  *
1124  * Return value:
1125  *      none
1126  **/
1127 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1128 {
1129         switch (proto) {
1130         case IPR_PROTO_SATA:
1131         case IPR_PROTO_SAS_STP:
1132                 res->ata_class = ATA_DEV_ATA;
1133                 break;
1134         case IPR_PROTO_SATA_ATAPI:
1135         case IPR_PROTO_SAS_STP_ATAPI:
1136                 res->ata_class = ATA_DEV_ATAPI;
1137                 break;
1138         default:
1139                 res->ata_class = ATA_DEV_UNKNOWN;
1140                 break;
1141         };
1142 }
1143
1144 /**
1145  * ipr_init_res_entry - Initialize a resource entry struct.
1146  * @res:        resource entry struct
1147  * @cfgtew:     config table entry wrapper struct
1148  *
1149  * Return value:
1150  *      none
1151  **/
1152 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1153                                struct ipr_config_table_entry_wrapper *cfgtew)
1154 {
1155         int found = 0;
1156         unsigned int proto;
1157         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1158         struct ipr_resource_entry *gscsi_res = NULL;
1159
1160         res->needs_sync_complete = 0;
1161         res->in_erp = 0;
1162         res->add_to_ml = 0;
1163         res->del_from_ml = 0;
1164         res->resetting_device = 0;
1165         res->reset_occurred = 0;
1166         res->sdev = NULL;
1167         res->sata_port = NULL;
1168
1169         if (ioa_cfg->sis64) {
1170                 proto = cfgtew->u.cfgte64->proto;
1171                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1172                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1173                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1174                 res->type = cfgtew->u.cfgte64->res_type;
1175
1176                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1177                         sizeof(res->res_path));
1178
1179                 res->bus = 0;
1180                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1181                         sizeof(res->dev_lun.scsi_lun));
1182                 res->lun = scsilun_to_int(&res->dev_lun);
1183
1184                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1185                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1186                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1187                                         found = 1;
1188                                         res->target = gscsi_res->target;
1189                                         break;
1190                                 }
1191                         }
1192                         if (!found) {
1193                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1194                                                                   ioa_cfg->max_devs_supported);
1195                                 set_bit(res->target, ioa_cfg->target_ids);
1196                         }
1197                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1198                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1199                         res->target = 0;
1200                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1201                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1202                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1203                                                           ioa_cfg->max_devs_supported);
1204                         set_bit(res->target, ioa_cfg->array_ids);
1205                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1206                         res->bus = IPR_VSET_VIRTUAL_BUS;
1207                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1208                                                           ioa_cfg->max_devs_supported);
1209                         set_bit(res->target, ioa_cfg->vset_ids);
1210                 } else {
1211                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1212                                                           ioa_cfg->max_devs_supported);
1213                         set_bit(res->target, ioa_cfg->target_ids);
1214                 }
1215         } else {
1216                 proto = cfgtew->u.cfgte->proto;
1217                 res->qmodel = IPR_QUEUEING_MODEL(res);
1218                 res->flags = cfgtew->u.cfgte->flags;
1219                 if (res->flags & IPR_IS_IOA_RESOURCE)
1220                         res->type = IPR_RES_TYPE_IOAFP;
1221                 else
1222                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1223
1224                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1225                 res->target = cfgtew->u.cfgte->res_addr.target;
1226                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1227                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1228         }
1229
1230         ipr_update_ata_class(res, proto);
1231 }
1232
1233 /**
1234  * ipr_is_same_device - Determine if two devices are the same.
1235  * @res:        resource entry struct
1236  * @cfgtew:     config table entry wrapper struct
1237  *
1238  * Return value:
1239  *      1 if the devices are the same / 0 otherwise
1240  **/
1241 static int ipr_is_same_device(struct ipr_resource_entry *res,
1242                               struct ipr_config_table_entry_wrapper *cfgtew)
1243 {
1244         if (res->ioa_cfg->sis64) {
1245                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1246                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1247                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1248                                         sizeof(cfgtew->u.cfgte64->lun))) {
1249                         return 1;
1250                 }
1251         } else {
1252                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1253                     res->target == cfgtew->u.cfgte->res_addr.target &&
1254                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1255                         return 1;
1256         }
1257
1258         return 0;
1259 }
1260
1261 /**
1262  * __ipr_format_res_path - Format the resource path for printing.
1263  * @res_path:   resource path
1264  * @buf:        buffer
1265  * @len:        length of buffer provided
1266  *
1267  * Return value:
1268  *      pointer to buffer
1269  **/
1270 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1271 {
1272         int i;
1273         char *p = buffer;
1274
1275         *p = '\0';
1276         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1277         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1278                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1279
1280         return buffer;
1281 }
1282
1283 /**
1284  * ipr_format_res_path - Format the resource path for printing.
1285  * @ioa_cfg:    ioa config struct
1286  * @res_path:   resource path
1287  * @buf:        buffer
1288  * @len:        length of buffer provided
1289  *
1290  * Return value:
1291  *      pointer to buffer
1292  **/
1293 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1294                                  u8 *res_path, char *buffer, int len)
1295 {
1296         char *p = buffer;
1297
1298         *p = '\0';
1299         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1300         __ipr_format_res_path(res_path, p, len - (buffer - p));
1301         return buffer;
1302 }
1303
1304 /**
1305  * ipr_update_res_entry - Update the resource entry.
1306  * @res:        resource entry struct
1307  * @cfgtew:     config table entry wrapper struct
1308  *
1309  * Return value:
1310  *      none
1311  **/
1312 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1313                                  struct ipr_config_table_entry_wrapper *cfgtew)
1314 {
1315         char buffer[IPR_MAX_RES_PATH_LENGTH];
1316         unsigned int proto;
1317         int new_path = 0;
1318
1319         if (res->ioa_cfg->sis64) {
1320                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1321                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1322                 res->type = cfgtew->u.cfgte64->res_type;
1323
1324                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1325                         sizeof(struct ipr_std_inq_data));
1326
1327                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1328                 proto = cfgtew->u.cfgte64->proto;
1329                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1330                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1331
1332                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1333                         sizeof(res->dev_lun.scsi_lun));
1334
1335                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1336                                         sizeof(res->res_path))) {
1337                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1338                                 sizeof(res->res_path));
1339                         new_path = 1;
1340                 }
1341
1342                 if (res->sdev && new_path)
1343                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1344                                     ipr_format_res_path(res->ioa_cfg,
1345                                         res->res_path, buffer, sizeof(buffer)));
1346         } else {
1347                 res->flags = cfgtew->u.cfgte->flags;
1348                 if (res->flags & IPR_IS_IOA_RESOURCE)
1349                         res->type = IPR_RES_TYPE_IOAFP;
1350                 else
1351                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1352
1353                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1354                         sizeof(struct ipr_std_inq_data));
1355
1356                 res->qmodel = IPR_QUEUEING_MODEL(res);
1357                 proto = cfgtew->u.cfgte->proto;
1358                 res->res_handle = cfgtew->u.cfgte->res_handle;
1359         }
1360
1361         ipr_update_ata_class(res, proto);
1362 }
1363
1364 /**
1365  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1366  *                        for the resource.
1367  * @res:        resource entry struct
1368  * @cfgtew:     config table entry wrapper struct
1369  *
1370  * Return value:
1371  *      none
1372  **/
1373 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1374 {
1375         struct ipr_resource_entry *gscsi_res = NULL;
1376         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1377
1378         if (!ioa_cfg->sis64)
1379                 return;
1380
1381         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1382                 clear_bit(res->target, ioa_cfg->array_ids);
1383         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1384                 clear_bit(res->target, ioa_cfg->vset_ids);
1385         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1386                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1387                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1388                                 return;
1389                 clear_bit(res->target, ioa_cfg->target_ids);
1390
1391         } else if (res->bus == 0)
1392                 clear_bit(res->target, ioa_cfg->target_ids);
1393 }
1394
1395 /**
1396  * ipr_handle_config_change - Handle a config change from the adapter
1397  * @ioa_cfg:    ioa config struct
1398  * @hostrcb:    hostrcb
1399  *
1400  * Return value:
1401  *      none
1402  **/
1403 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1404                                      struct ipr_hostrcb *hostrcb)
1405 {
1406         struct ipr_resource_entry *res = NULL;
1407         struct ipr_config_table_entry_wrapper cfgtew;
1408         __be32 cc_res_handle;
1409
1410         u32 is_ndn = 1;
1411
1412         if (ioa_cfg->sis64) {
1413                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1414                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1415         } else {
1416                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1417                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1418         }
1419
1420         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1421                 if (res->res_handle == cc_res_handle) {
1422                         is_ndn = 0;
1423                         break;
1424                 }
1425         }
1426
1427         if (is_ndn) {
1428                 if (list_empty(&ioa_cfg->free_res_q)) {
1429                         ipr_send_hcam(ioa_cfg,
1430                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1431                                       hostrcb);
1432                         return;
1433                 }
1434
1435                 res = list_entry(ioa_cfg->free_res_q.next,
1436                                  struct ipr_resource_entry, queue);
1437
1438                 list_del(&res->queue);
1439                 ipr_init_res_entry(res, &cfgtew);
1440                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1441         }
1442
1443         ipr_update_res_entry(res, &cfgtew);
1444
1445         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1446                 if (res->sdev) {
1447                         res->del_from_ml = 1;
1448                         res->res_handle = IPR_INVALID_RES_HANDLE;
1449                         schedule_work(&ioa_cfg->work_q);
1450                 } else {
1451                         ipr_clear_res_target(res);
1452                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1453                 }
1454         } else if (!res->sdev || res->del_from_ml) {
1455                 res->add_to_ml = 1;
1456                 schedule_work(&ioa_cfg->work_q);
1457         }
1458
1459         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1460 }
1461
1462 /**
1463  * ipr_process_ccn - Op done function for a CCN.
1464  * @ipr_cmd:    ipr command struct
1465  *
1466  * This function is the op done function for a configuration
1467  * change notification host controlled async from the adapter.
1468  *
1469  * Return value:
1470  *      none
1471  **/
1472 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1473 {
1474         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1475         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1476         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1477
1478         list_del_init(&hostrcb->queue);
1479         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1480
1481         if (ioasc) {
1482                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1483                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1484                         dev_err(&ioa_cfg->pdev->dev,
1485                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1486
1487                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1488         } else {
1489                 ipr_handle_config_change(ioa_cfg, hostrcb);
1490         }
1491 }
1492
1493 /**
1494  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1495  * @i:          index into buffer
1496  * @buf:                string to modify
1497  *
1498  * This function will strip all trailing whitespace, pad the end
1499  * of the string with a single space, and NULL terminate the string.
1500  *
1501  * Return value:
1502  *      new length of string
1503  **/
1504 static int strip_and_pad_whitespace(int i, char *buf)
1505 {
1506         while (i && buf[i] == ' ')
1507                 i--;
1508         buf[i+1] = ' ';
1509         buf[i+2] = '\0';
1510         return i + 2;
1511 }
1512
1513 /**
1514  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1515  * @prefix:             string to print at start of printk
1516  * @hostrcb:    hostrcb pointer
1517  * @vpd:                vendor/product id/sn struct
1518  *
1519  * Return value:
1520  *      none
1521  **/
1522 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1523                                 struct ipr_vpd *vpd)
1524 {
1525         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1526         int i = 0;
1527
1528         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1529         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1530
1531         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1532         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1533
1534         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1535         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1536
1537         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1538 }
1539
1540 /**
1541  * ipr_log_vpd - Log the passed VPD to the error log.
1542  * @vpd:                vendor/product id/sn struct
1543  *
1544  * Return value:
1545  *      none
1546  **/
1547 static void ipr_log_vpd(struct ipr_vpd *vpd)
1548 {
1549         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1550                     + IPR_SERIAL_NUM_LEN];
1551
1552         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1553         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1554                IPR_PROD_ID_LEN);
1555         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1556         ipr_err("Vendor/Product ID: %s\n", buffer);
1557
1558         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1559         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1560         ipr_err("    Serial Number: %s\n", buffer);
1561 }
1562
1563 /**
1564  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1565  * @prefix:             string to print at start of printk
1566  * @hostrcb:    hostrcb pointer
1567  * @vpd:                vendor/product id/sn/wwn struct
1568  *
1569  * Return value:
1570  *      none
1571  **/
1572 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1573                                     struct ipr_ext_vpd *vpd)
1574 {
1575         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1576         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1577                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1578 }
1579
1580 /**
1581  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1582  * @vpd:                vendor/product id/sn/wwn struct
1583  *
1584  * Return value:
1585  *      none
1586  **/
1587 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1588 {
1589         ipr_log_vpd(&vpd->vpd);
1590         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1591                 be32_to_cpu(vpd->wwid[1]));
1592 }
1593
1594 /**
1595  * ipr_log_enhanced_cache_error - Log a cache error.
1596  * @ioa_cfg:    ioa config struct
1597  * @hostrcb:    hostrcb struct
1598  *
1599  * Return value:
1600  *      none
1601  **/
1602 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1603                                          struct ipr_hostrcb *hostrcb)
1604 {
1605         struct ipr_hostrcb_type_12_error *error;
1606
1607         if (ioa_cfg->sis64)
1608                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1609         else
1610                 error = &hostrcb->hcam.u.error.u.type_12_error;
1611
1612         ipr_err("-----Current Configuration-----\n");
1613         ipr_err("Cache Directory Card Information:\n");
1614         ipr_log_ext_vpd(&error->ioa_vpd);
1615         ipr_err("Adapter Card Information:\n");
1616         ipr_log_ext_vpd(&error->cfc_vpd);
1617
1618         ipr_err("-----Expected Configuration-----\n");
1619         ipr_err("Cache Directory Card Information:\n");
1620         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1621         ipr_err("Adapter Card Information:\n");
1622         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1623
1624         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1625                      be32_to_cpu(error->ioa_data[0]),
1626                      be32_to_cpu(error->ioa_data[1]),
1627                      be32_to_cpu(error->ioa_data[2]));
1628 }
1629
1630 /**
1631  * ipr_log_cache_error - Log a cache error.
1632  * @ioa_cfg:    ioa config struct
1633  * @hostrcb:    hostrcb struct
1634  *
1635  * Return value:
1636  *      none
1637  **/
1638 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1639                                 struct ipr_hostrcb *hostrcb)
1640 {
1641         struct ipr_hostrcb_type_02_error *error =
1642                 &hostrcb->hcam.u.error.u.type_02_error;
1643
1644         ipr_err("-----Current Configuration-----\n");
1645         ipr_err("Cache Directory Card Information:\n");
1646         ipr_log_vpd(&error->ioa_vpd);
1647         ipr_err("Adapter Card Information:\n");
1648         ipr_log_vpd(&error->cfc_vpd);
1649
1650         ipr_err("-----Expected Configuration-----\n");
1651         ipr_err("Cache Directory Card Information:\n");
1652         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1653         ipr_err("Adapter Card Information:\n");
1654         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1655
1656         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1657                      be32_to_cpu(error->ioa_data[0]),
1658                      be32_to_cpu(error->ioa_data[1]),
1659                      be32_to_cpu(error->ioa_data[2]));
1660 }
1661
1662 /**
1663  * ipr_log_enhanced_config_error - Log a configuration error.
1664  * @ioa_cfg:    ioa config struct
1665  * @hostrcb:    hostrcb struct
1666  *
1667  * Return value:
1668  *      none
1669  **/
1670 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1671                                           struct ipr_hostrcb *hostrcb)
1672 {
1673         int errors_logged, i;
1674         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1675         struct ipr_hostrcb_type_13_error *error;
1676
1677         error = &hostrcb->hcam.u.error.u.type_13_error;
1678         errors_logged = be32_to_cpu(error->errors_logged);
1679
1680         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1681                 be32_to_cpu(error->errors_detected), errors_logged);
1682
1683         dev_entry = error->dev;
1684
1685         for (i = 0; i < errors_logged; i++, dev_entry++) {
1686                 ipr_err_separator;
1687
1688                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1689                 ipr_log_ext_vpd(&dev_entry->vpd);
1690
1691                 ipr_err("-----New Device Information-----\n");
1692                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1693
1694                 ipr_err("Cache Directory Card Information:\n");
1695                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1696
1697                 ipr_err("Adapter Card Information:\n");
1698                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1699         }
1700 }
1701
1702 /**
1703  * ipr_log_sis64_config_error - Log a device error.
1704  * @ioa_cfg:    ioa config struct
1705  * @hostrcb:    hostrcb struct
1706  *
1707  * Return value:
1708  *      none
1709  **/
1710 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711                                        struct ipr_hostrcb *hostrcb)
1712 {
1713         int errors_logged, i;
1714         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1715         struct ipr_hostrcb_type_23_error *error;
1716         char buffer[IPR_MAX_RES_PATH_LENGTH];
1717
1718         error = &hostrcb->hcam.u.error64.u.type_23_error;
1719         errors_logged = be32_to_cpu(error->errors_logged);
1720
1721         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722                 be32_to_cpu(error->errors_detected), errors_logged);
1723
1724         dev_entry = error->dev;
1725
1726         for (i = 0; i < errors_logged; i++, dev_entry++) {
1727                 ipr_err_separator;
1728
1729                 ipr_err("Device %d : %s", i + 1,
1730                         __ipr_format_res_path(dev_entry->res_path,
1731                                               buffer, sizeof(buffer)));
1732                 ipr_log_ext_vpd(&dev_entry->vpd);
1733
1734                 ipr_err("-----New Device Information-----\n");
1735                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1736
1737                 ipr_err("Cache Directory Card Information:\n");
1738                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1739
1740                 ipr_err("Adapter Card Information:\n");
1741                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1742         }
1743 }
1744
1745 /**
1746  * ipr_log_config_error - Log a configuration error.
1747  * @ioa_cfg:    ioa config struct
1748  * @hostrcb:    hostrcb struct
1749  *
1750  * Return value:
1751  *      none
1752  **/
1753 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1754                                  struct ipr_hostrcb *hostrcb)
1755 {
1756         int errors_logged, i;
1757         struct ipr_hostrcb_device_data_entry *dev_entry;
1758         struct ipr_hostrcb_type_03_error *error;
1759
1760         error = &hostrcb->hcam.u.error.u.type_03_error;
1761         errors_logged = be32_to_cpu(error->errors_logged);
1762
1763         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1764                 be32_to_cpu(error->errors_detected), errors_logged);
1765
1766         dev_entry = error->dev;
1767
1768         for (i = 0; i < errors_logged; i++, dev_entry++) {
1769                 ipr_err_separator;
1770
1771                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1772                 ipr_log_vpd(&dev_entry->vpd);
1773
1774                 ipr_err("-----New Device Information-----\n");
1775                 ipr_log_vpd(&dev_entry->new_vpd);
1776
1777                 ipr_err("Cache Directory Card Information:\n");
1778                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1779
1780                 ipr_err("Adapter Card Information:\n");
1781                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1782
1783                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1784                         be32_to_cpu(dev_entry->ioa_data[0]),
1785                         be32_to_cpu(dev_entry->ioa_data[1]),
1786                         be32_to_cpu(dev_entry->ioa_data[2]),
1787                         be32_to_cpu(dev_entry->ioa_data[3]),
1788                         be32_to_cpu(dev_entry->ioa_data[4]));
1789         }
1790 }
1791
1792 /**
1793  * ipr_log_enhanced_array_error - Log an array configuration error.
1794  * @ioa_cfg:    ioa config struct
1795  * @hostrcb:    hostrcb struct
1796  *
1797  * Return value:
1798  *      none
1799  **/
1800 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1801                                          struct ipr_hostrcb *hostrcb)
1802 {
1803         int i, num_entries;
1804         struct ipr_hostrcb_type_14_error *error;
1805         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1806         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1807
1808         error = &hostrcb->hcam.u.error.u.type_14_error;
1809
1810         ipr_err_separator;
1811
1812         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1813                 error->protection_level,
1814                 ioa_cfg->host->host_no,
1815                 error->last_func_vset_res_addr.bus,
1816                 error->last_func_vset_res_addr.target,
1817                 error->last_func_vset_res_addr.lun);
1818
1819         ipr_err_separator;
1820
1821         array_entry = error->array_member;
1822         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1823                             ARRAY_SIZE(error->array_member));
1824
1825         for (i = 0; i < num_entries; i++, array_entry++) {
1826                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1827                         continue;
1828
1829                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1830                         ipr_err("Exposed Array Member %d:\n", i);
1831                 else
1832                         ipr_err("Array Member %d:\n", i);
1833
1834                 ipr_log_ext_vpd(&array_entry->vpd);
1835                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1836                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1837                                  "Expected Location");
1838
1839                 ipr_err_separator;
1840         }
1841 }
1842
1843 /**
1844  * ipr_log_array_error - Log an array configuration error.
1845  * @ioa_cfg:    ioa config struct
1846  * @hostrcb:    hostrcb struct
1847  *
1848  * Return value:
1849  *      none
1850  **/
1851 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1852                                 struct ipr_hostrcb *hostrcb)
1853 {
1854         int i;
1855         struct ipr_hostrcb_type_04_error *error;
1856         struct ipr_hostrcb_array_data_entry *array_entry;
1857         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1858
1859         error = &hostrcb->hcam.u.error.u.type_04_error;
1860
1861         ipr_err_separator;
1862
1863         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1864                 error->protection_level,
1865                 ioa_cfg->host->host_no,
1866                 error->last_func_vset_res_addr.bus,
1867                 error->last_func_vset_res_addr.target,
1868                 error->last_func_vset_res_addr.lun);
1869
1870         ipr_err_separator;
1871
1872         array_entry = error->array_member;
1873
1874         for (i = 0; i < 18; i++) {
1875                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1876                         continue;
1877
1878                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1879                         ipr_err("Exposed Array Member %d:\n", i);
1880                 else
1881                         ipr_err("Array Member %d:\n", i);
1882
1883                 ipr_log_vpd(&array_entry->vpd);
1884
1885                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1886                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1887                                  "Expected Location");
1888
1889                 ipr_err_separator;
1890
1891                 if (i == 9)
1892                         array_entry = error->array_member2;
1893                 else
1894                         array_entry++;
1895         }
1896 }
1897
1898 /**
1899  * ipr_log_hex_data - Log additional hex IOA error data.
1900  * @ioa_cfg:    ioa config struct
1901  * @data:               IOA error data
1902  * @len:                data length
1903  *
1904  * Return value:
1905  *      none
1906  **/
1907 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1908 {
1909         int i;
1910
1911         if (len == 0)
1912                 return;
1913
1914         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1915                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1916
1917         for (i = 0; i < len / 4; i += 4) {
1918                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1919                         be32_to_cpu(data[i]),
1920                         be32_to_cpu(data[i+1]),
1921                         be32_to_cpu(data[i+2]),
1922                         be32_to_cpu(data[i+3]));
1923         }
1924 }
1925
1926 /**
1927  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1928  * @ioa_cfg:    ioa config struct
1929  * @hostrcb:    hostrcb struct
1930  *
1931  * Return value:
1932  *      none
1933  **/
1934 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1935                                             struct ipr_hostrcb *hostrcb)
1936 {
1937         struct ipr_hostrcb_type_17_error *error;
1938
1939         if (ioa_cfg->sis64)
1940                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1941         else
1942                 error = &hostrcb->hcam.u.error.u.type_17_error;
1943
1944         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1945         strim(error->failure_reason);
1946
1947         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1948                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1949         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1950         ipr_log_hex_data(ioa_cfg, error->data,
1951                          be32_to_cpu(hostrcb->hcam.length) -
1952                          (offsetof(struct ipr_hostrcb_error, u) +
1953                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1954 }
1955
1956 /**
1957  * ipr_log_dual_ioa_error - Log a dual adapter error.
1958  * @ioa_cfg:    ioa config struct
1959  * @hostrcb:    hostrcb struct
1960  *
1961  * Return value:
1962  *      none
1963  **/
1964 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1965                                    struct ipr_hostrcb *hostrcb)
1966 {
1967         struct ipr_hostrcb_type_07_error *error;
1968
1969         error = &hostrcb->hcam.u.error.u.type_07_error;
1970         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1971         strim(error->failure_reason);
1972
1973         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1974                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1975         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1976         ipr_log_hex_data(ioa_cfg, error->data,
1977                          be32_to_cpu(hostrcb->hcam.length) -
1978                          (offsetof(struct ipr_hostrcb_error, u) +
1979                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1980 }
1981
1982 static const struct {
1983         u8 active;
1984         char *desc;
1985 } path_active_desc[] = {
1986         { IPR_PATH_NO_INFO, "Path" },
1987         { IPR_PATH_ACTIVE, "Active path" },
1988         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1989 };
1990
1991 static const struct {
1992         u8 state;
1993         char *desc;
1994 } path_state_desc[] = {
1995         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1996         { IPR_PATH_HEALTHY, "is healthy" },
1997         { IPR_PATH_DEGRADED, "is degraded" },
1998         { IPR_PATH_FAILED, "is failed" }
1999 };
2000
2001 /**
2002  * ipr_log_fabric_path - Log a fabric path error
2003  * @hostrcb:    hostrcb struct
2004  * @fabric:             fabric descriptor
2005  *
2006  * Return value:
2007  *      none
2008  **/
2009 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2010                                 struct ipr_hostrcb_fabric_desc *fabric)
2011 {
2012         int i, j;
2013         u8 path_state = fabric->path_state;
2014         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2015         u8 state = path_state & IPR_PATH_STATE_MASK;
2016
2017         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2018                 if (path_active_desc[i].active != active)
2019                         continue;
2020
2021                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2022                         if (path_state_desc[j].state != state)
2023                                 continue;
2024
2025                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2026                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2027                                              path_active_desc[i].desc, path_state_desc[j].desc,
2028                                              fabric->ioa_port);
2029                         } else if (fabric->cascaded_expander == 0xff) {
2030                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2031                                              path_active_desc[i].desc, path_state_desc[j].desc,
2032                                              fabric->ioa_port, fabric->phy);
2033                         } else if (fabric->phy == 0xff) {
2034                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2035                                              path_active_desc[i].desc, path_state_desc[j].desc,
2036                                              fabric->ioa_port, fabric->cascaded_expander);
2037                         } else {
2038                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2039                                              path_active_desc[i].desc, path_state_desc[j].desc,
2040                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2041                         }
2042                         return;
2043                 }
2044         }
2045
2046         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2047                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2048 }
2049
2050 /**
2051  * ipr_log64_fabric_path - Log a fabric path error
2052  * @hostrcb:    hostrcb struct
2053  * @fabric:             fabric descriptor
2054  *
2055  * Return value:
2056  *      none
2057  **/
2058 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2059                                   struct ipr_hostrcb64_fabric_desc *fabric)
2060 {
2061         int i, j;
2062         u8 path_state = fabric->path_state;
2063         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2064         u8 state = path_state & IPR_PATH_STATE_MASK;
2065         char buffer[IPR_MAX_RES_PATH_LENGTH];
2066
2067         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2068                 if (path_active_desc[i].active != active)
2069                         continue;
2070
2071                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2072                         if (path_state_desc[j].state != state)
2073                                 continue;
2074
2075                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2076                                      path_active_desc[i].desc, path_state_desc[j].desc,
2077                                      ipr_format_res_path(hostrcb->ioa_cfg,
2078                                                 fabric->res_path,
2079                                                 buffer, sizeof(buffer)));
2080                         return;
2081                 }
2082         }
2083
2084         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2085                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2086                                     buffer, sizeof(buffer)));
2087 }
2088
2089 static const struct {
2090         u8 type;
2091         char *desc;
2092 } path_type_desc[] = {
2093         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2094         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2095         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2096         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2097 };
2098
2099 static const struct {
2100         u8 status;
2101         char *desc;
2102 } path_status_desc[] = {
2103         { IPR_PATH_CFG_NO_PROB, "Functional" },
2104         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2105         { IPR_PATH_CFG_FAILED, "Failed" },
2106         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2107         { IPR_PATH_NOT_DETECTED, "Missing" },
2108         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2109 };
2110
2111 static const char *link_rate[] = {
2112         "unknown",
2113         "disabled",
2114         "phy reset problem",
2115         "spinup hold",
2116         "port selector",
2117         "unknown",
2118         "unknown",
2119         "unknown",
2120         "1.5Gbps",
2121         "3.0Gbps",
2122         "unknown",
2123         "unknown",
2124         "unknown",
2125         "unknown",
2126         "unknown",
2127         "unknown"
2128 };
2129
2130 /**
2131  * ipr_log_path_elem - Log a fabric path element.
2132  * @hostrcb:    hostrcb struct
2133  * @cfg:                fabric path element struct
2134  *
2135  * Return value:
2136  *      none
2137  **/
2138 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2139                               struct ipr_hostrcb_config_element *cfg)
2140 {
2141         int i, j;
2142         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2143         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2144
2145         if (type == IPR_PATH_CFG_NOT_EXIST)
2146                 return;
2147
2148         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2149                 if (path_type_desc[i].type != type)
2150                         continue;
2151
2152                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2153                         if (path_status_desc[j].status != status)
2154                                 continue;
2155
2156                         if (type == IPR_PATH_CFG_IOA_PORT) {
2157                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2158                                              path_status_desc[j].desc, path_type_desc[i].desc,
2159                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2160                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2161                         } else {
2162                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2163                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2164                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2165                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2166                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2167                                 } else if (cfg->cascaded_expander == 0xff) {
2168                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2169                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2170                                                      path_type_desc[i].desc, cfg->phy,
2171                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2172                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2173                                 } else if (cfg->phy == 0xff) {
2174                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2175                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2176                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2177                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2178                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2179                                 } else {
2180                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2181                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2182                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2183                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2184                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2185                                 }
2186                         }
2187                         return;
2188                 }
2189         }
2190
2191         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2192                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2193                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2194                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2195 }
2196
2197 /**
2198  * ipr_log64_path_elem - Log a fabric path element.
2199  * @hostrcb:    hostrcb struct
2200  * @cfg:                fabric path element struct
2201  *
2202  * Return value:
2203  *      none
2204  **/
2205 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2206                                 struct ipr_hostrcb64_config_element *cfg)
2207 {
2208         int i, j;
2209         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2210         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2211         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2212         char buffer[IPR_MAX_RES_PATH_LENGTH];
2213
2214         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2215                 return;
2216
2217         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2218                 if (path_type_desc[i].type != type)
2219                         continue;
2220
2221                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2222                         if (path_status_desc[j].status != status)
2223                                 continue;
2224
2225                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2226                                      path_status_desc[j].desc, path_type_desc[i].desc,
2227                                      ipr_format_res_path(hostrcb->ioa_cfg,
2228                                         cfg->res_path, buffer, sizeof(buffer)),
2229                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2230                                         be32_to_cpu(cfg->wwid[0]),
2231                                         be32_to_cpu(cfg->wwid[1]));
2232                         return;
2233                 }
2234         }
2235         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2236                      "WWN=%08X%08X\n", cfg->type_status,
2237                      ipr_format_res_path(hostrcb->ioa_cfg,
2238                         cfg->res_path, buffer, sizeof(buffer)),
2239                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2240                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2241 }
2242
2243 /**
2244  * ipr_log_fabric_error - Log a fabric error.
2245  * @ioa_cfg:    ioa config struct
2246  * @hostrcb:    hostrcb struct
2247  *
2248  * Return value:
2249  *      none
2250  **/
2251 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2252                                  struct ipr_hostrcb *hostrcb)
2253 {
2254         struct ipr_hostrcb_type_20_error *error;
2255         struct ipr_hostrcb_fabric_desc *fabric;
2256         struct ipr_hostrcb_config_element *cfg;
2257         int i, add_len;
2258
2259         error = &hostrcb->hcam.u.error.u.type_20_error;
2260         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2261         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2262
2263         add_len = be32_to_cpu(hostrcb->hcam.length) -
2264                 (offsetof(struct ipr_hostrcb_error, u) +
2265                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2266
2267         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2268                 ipr_log_fabric_path(hostrcb, fabric);
2269                 for_each_fabric_cfg(fabric, cfg)
2270                         ipr_log_path_elem(hostrcb, cfg);
2271
2272                 add_len -= be16_to_cpu(fabric->length);
2273                 fabric = (struct ipr_hostrcb_fabric_desc *)
2274                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2275         }
2276
2277         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2278 }
2279
2280 /**
2281  * ipr_log_sis64_array_error - Log a sis64 array error.
2282  * @ioa_cfg:    ioa config struct
2283  * @hostrcb:    hostrcb struct
2284  *
2285  * Return value:
2286  *      none
2287  **/
2288 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2289                                       struct ipr_hostrcb *hostrcb)
2290 {
2291         int i, num_entries;
2292         struct ipr_hostrcb_type_24_error *error;
2293         struct ipr_hostrcb64_array_data_entry *array_entry;
2294         char buffer[IPR_MAX_RES_PATH_LENGTH];
2295         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2296
2297         error = &hostrcb->hcam.u.error64.u.type_24_error;
2298
2299         ipr_err_separator;
2300
2301         ipr_err("RAID %s Array Configuration: %s\n",
2302                 error->protection_level,
2303                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2304                         buffer, sizeof(buffer)));
2305
2306         ipr_err_separator;
2307
2308         array_entry = error->array_member;
2309         num_entries = min_t(u32, error->num_entries,
2310                             ARRAY_SIZE(error->array_member));
2311
2312         for (i = 0; i < num_entries; i++, array_entry++) {
2313
2314                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2315                         continue;
2316
2317                 if (error->exposed_mode_adn == i)
2318                         ipr_err("Exposed Array Member %d:\n", i);
2319                 else
2320                         ipr_err("Array Member %d:\n", i);
2321
2322                 ipr_err("Array Member %d:\n", i);
2323                 ipr_log_ext_vpd(&array_entry->vpd);
2324                 ipr_err("Current Location: %s\n",
2325                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2326                                 buffer, sizeof(buffer)));
2327                 ipr_err("Expected Location: %s\n",
2328                          ipr_format_res_path(ioa_cfg,
2329                                 array_entry->expected_res_path,
2330                                 buffer, sizeof(buffer)));
2331
2332                 ipr_err_separator;
2333         }
2334 }
2335
2336 /**
2337  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2338  * @ioa_cfg:    ioa config struct
2339  * @hostrcb:    hostrcb struct
2340  *
2341  * Return value:
2342  *      none
2343  **/
2344 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2345                                        struct ipr_hostrcb *hostrcb)
2346 {
2347         struct ipr_hostrcb_type_30_error *error;
2348         struct ipr_hostrcb64_fabric_desc *fabric;
2349         struct ipr_hostrcb64_config_element *cfg;
2350         int i, add_len;
2351
2352         error = &hostrcb->hcam.u.error64.u.type_30_error;
2353
2354         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2355         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2356
2357         add_len = be32_to_cpu(hostrcb->hcam.length) -
2358                 (offsetof(struct ipr_hostrcb64_error, u) +
2359                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2360
2361         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2362                 ipr_log64_fabric_path(hostrcb, fabric);
2363                 for_each_fabric_cfg(fabric, cfg)
2364                         ipr_log64_path_elem(hostrcb, cfg);
2365
2366                 add_len -= be16_to_cpu(fabric->length);
2367                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2368                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2369         }
2370
2371         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2372 }
2373
2374 /**
2375  * ipr_log_generic_error - Log an adapter error.
2376  * @ioa_cfg:    ioa config struct
2377  * @hostrcb:    hostrcb struct
2378  *
2379  * Return value:
2380  *      none
2381  **/
2382 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2383                                   struct ipr_hostrcb *hostrcb)
2384 {
2385         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2386                          be32_to_cpu(hostrcb->hcam.length));
2387 }
2388
2389 /**
2390  * ipr_log_sis64_device_error - Log a cache error.
2391  * @ioa_cfg:    ioa config struct
2392  * @hostrcb:    hostrcb struct
2393  *
2394  * Return value:
2395  *      none
2396  **/
2397 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2398                                          struct ipr_hostrcb *hostrcb)
2399 {
2400         struct ipr_hostrcb_type_21_error *error;
2401         char buffer[IPR_MAX_RES_PATH_LENGTH];
2402
2403         error = &hostrcb->hcam.u.error64.u.type_21_error;
2404
2405         ipr_err("-----Failing Device Information-----\n");
2406         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2407                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2408                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2409         ipr_err("Device Resource Path: %s\n",
2410                 __ipr_format_res_path(error->res_path,
2411                                       buffer, sizeof(buffer)));
2412         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2413         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2414         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2415         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2416         ipr_err("SCSI Sense Data:\n");
2417         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2418         ipr_err("SCSI Command Descriptor Block: \n");
2419         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2420
2421         ipr_err("Additional IOA Data:\n");
2422         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2423 }
2424
2425 /**
2426  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2427  * @ioasc:      IOASC
2428  *
2429  * This function will return the index of into the ipr_error_table
2430  * for the specified IOASC. If the IOASC is not in the table,
2431  * 0 will be returned, which points to the entry used for unknown errors.
2432  *
2433  * Return value:
2434  *      index into the ipr_error_table
2435  **/
2436 static u32 ipr_get_error(u32 ioasc)
2437 {
2438         int i;
2439
2440         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2441                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2442                         return i;
2443
2444         return 0;
2445 }
2446
2447 /**
2448  * ipr_handle_log_data - Log an adapter error.
2449  * @ioa_cfg:    ioa config struct
2450  * @hostrcb:    hostrcb struct
2451  *
2452  * This function logs an adapter error to the system.
2453  *
2454  * Return value:
2455  *      none
2456  **/
2457 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2458                                 struct ipr_hostrcb *hostrcb)
2459 {
2460         u32 ioasc;
2461         int error_index;
2462         struct ipr_hostrcb_type_21_error *error;
2463
2464         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2465                 return;
2466
2467         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2468                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2469
2470         if (ioa_cfg->sis64)
2471                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2472         else
2473                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2474
2475         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2476             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2477                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2478                 scsi_report_bus_reset(ioa_cfg->host,
2479                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2480         }
2481
2482         error_index = ipr_get_error(ioasc);
2483
2484         if (!ipr_error_table[error_index].log_hcam)
2485                 return;
2486
2487         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2488             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2489                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2490
2491                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2492                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2493                                 return;
2494         }
2495
2496         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2497
2498         /* Set indication we have logged an error */
2499         ioa_cfg->errors_logged++;
2500
2501         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2502                 return;
2503         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2504                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2505
2506         switch (hostrcb->hcam.overlay_id) {
2507         case IPR_HOST_RCB_OVERLAY_ID_2:
2508                 ipr_log_cache_error(ioa_cfg, hostrcb);
2509                 break;
2510         case IPR_HOST_RCB_OVERLAY_ID_3:
2511                 ipr_log_config_error(ioa_cfg, hostrcb);
2512                 break;
2513         case IPR_HOST_RCB_OVERLAY_ID_4:
2514         case IPR_HOST_RCB_OVERLAY_ID_6:
2515                 ipr_log_array_error(ioa_cfg, hostrcb);
2516                 break;
2517         case IPR_HOST_RCB_OVERLAY_ID_7:
2518                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2519                 break;
2520         case IPR_HOST_RCB_OVERLAY_ID_12:
2521                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2522                 break;
2523         case IPR_HOST_RCB_OVERLAY_ID_13:
2524                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2525                 break;
2526         case IPR_HOST_RCB_OVERLAY_ID_14:
2527         case IPR_HOST_RCB_OVERLAY_ID_16:
2528                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2529                 break;
2530         case IPR_HOST_RCB_OVERLAY_ID_17:
2531                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2532                 break;
2533         case IPR_HOST_RCB_OVERLAY_ID_20:
2534                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2535                 break;
2536         case IPR_HOST_RCB_OVERLAY_ID_21:
2537                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2538                 break;
2539         case IPR_HOST_RCB_OVERLAY_ID_23:
2540                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2541                 break;
2542         case IPR_HOST_RCB_OVERLAY_ID_24:
2543         case IPR_HOST_RCB_OVERLAY_ID_26:
2544                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2545                 break;
2546         case IPR_HOST_RCB_OVERLAY_ID_30:
2547                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2548                 break;
2549         case IPR_HOST_RCB_OVERLAY_ID_1:
2550         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2551         default:
2552                 ipr_log_generic_error(ioa_cfg, hostrcb);
2553                 break;
2554         }
2555 }
2556
2557 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2558 {
2559         struct ipr_hostrcb *hostrcb;
2560
2561         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2562                                         struct ipr_hostrcb, queue);
2563
2564         if (unlikely(!hostrcb)) {
2565                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2566                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2567                                                 struct ipr_hostrcb, queue);
2568         }
2569
2570         list_del_init(&hostrcb->queue);
2571         return hostrcb;
2572 }
2573
2574 /**
2575  * ipr_process_error - Op done function for an adapter error log.
2576  * @ipr_cmd:    ipr command struct
2577  *
2578  * This function is the op done function for an error log host
2579  * controlled async from the adapter. It will log the error and
2580  * send the HCAM back to the adapter.
2581  *
2582  * Return value:
2583  *      none
2584  **/
2585 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2586 {
2587         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2588         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2589         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2590         u32 fd_ioasc;
2591
2592         if (ioa_cfg->sis64)
2593                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2594         else
2595                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2596
2597         list_del_init(&hostrcb->queue);
2598         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2599
2600         if (!ioasc) {
2601                 ipr_handle_log_data(ioa_cfg, hostrcb);
2602                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2603                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2604         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2605                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2606                 dev_err(&ioa_cfg->pdev->dev,
2607                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2608         }
2609
2610         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2611         schedule_work(&ioa_cfg->work_q);
2612         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2613
2614         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2615 }
2616
2617 /**
2618  * ipr_timeout -  An internally generated op has timed out.
2619  * @ipr_cmd:    ipr command struct
2620  *
2621  * This function blocks host requests and initiates an
2622  * adapter reset.
2623  *
2624  * Return value:
2625  *      none
2626  **/
2627 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2628 {
2629         unsigned long lock_flags = 0;
2630         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2631
2632         ENTER;
2633         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2634
2635         ioa_cfg->errors_logged++;
2636         dev_err(&ioa_cfg->pdev->dev,
2637                 "Adapter being reset due to command timeout.\n");
2638
2639         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2640                 ioa_cfg->sdt_state = GET_DUMP;
2641
2642         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2643                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2644
2645         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2646         LEAVE;
2647 }
2648
2649 /**
2650  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2651  * @ipr_cmd:    ipr command struct
2652  *
2653  * This function blocks host requests and initiates an
2654  * adapter reset.
2655  *
2656  * Return value:
2657  *      none
2658  **/
2659 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2660 {
2661         unsigned long lock_flags = 0;
2662         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2663
2664         ENTER;
2665         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2666
2667         ioa_cfg->errors_logged++;
2668         dev_err(&ioa_cfg->pdev->dev,
2669                 "Adapter timed out transitioning to operational.\n");
2670
2671         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2672                 ioa_cfg->sdt_state = GET_DUMP;
2673
2674         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2675                 if (ipr_fastfail)
2676                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2677                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2678         }
2679
2680         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2681         LEAVE;
2682 }
2683
2684 /**
2685  * ipr_find_ses_entry - Find matching SES in SES table
2686  * @res:        resource entry struct of SES
2687  *
2688  * Return value:
2689  *      pointer to SES table entry / NULL on failure
2690  **/
2691 static const struct ipr_ses_table_entry *
2692 ipr_find_ses_entry(struct ipr_resource_entry *res)
2693 {
2694         int i, j, matches;
2695         struct ipr_std_inq_vpids *vpids;
2696         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2697
2698         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2699                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2700                         if (ste->compare_product_id_byte[j] == 'X') {
2701                                 vpids = &res->std_inq_data.vpids;
2702                                 if (vpids->product_id[j] == ste->product_id[j])
2703                                         matches++;
2704                                 else
2705                                         break;
2706                         } else
2707                                 matches++;
2708                 }
2709
2710                 if (matches == IPR_PROD_ID_LEN)
2711                         return ste;
2712         }
2713
2714         return NULL;
2715 }
2716
2717 /**
2718  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2719  * @ioa_cfg:    ioa config struct
2720  * @bus:                SCSI bus
2721  * @bus_width:  bus width
2722  *
2723  * Return value:
2724  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2725  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2726  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2727  *      max 160MHz = max 320MB/sec).
2728  **/
2729 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2730 {
2731         struct ipr_resource_entry *res;
2732         const struct ipr_ses_table_entry *ste;
2733         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2734
2735         /* Loop through each config table entry in the config table buffer */
2736         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2737                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2738                         continue;
2739
2740                 if (bus != res->bus)
2741                         continue;
2742
2743                 if (!(ste = ipr_find_ses_entry(res)))
2744                         continue;
2745
2746                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2747         }
2748
2749         return max_xfer_rate;
2750 }
2751
2752 /**
2753  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2754  * @ioa_cfg:            ioa config struct
2755  * @max_delay:          max delay in micro-seconds to wait
2756  *
2757  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2758  *
2759  * Return value:
2760  *      0 on success / other on failure
2761  **/
2762 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2763 {
2764         volatile u32 pcii_reg;
2765         int delay = 1;
2766
2767         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2768         while (delay < max_delay) {
2769                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2770
2771                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2772                         return 0;
2773
2774                 /* udelay cannot be used if delay is more than a few milliseconds */
2775                 if ((delay / 1000) > MAX_UDELAY_MS)
2776                         mdelay(delay / 1000);
2777                 else
2778                         udelay(delay);
2779
2780                 delay += delay;
2781         }
2782         return -EIO;
2783 }
2784
2785 /**
2786  * ipr_get_sis64_dump_data_section - Dump IOA memory
2787  * @ioa_cfg:                    ioa config struct
2788  * @start_addr:                 adapter address to dump
2789  * @dest:                       destination kernel buffer
2790  * @length_in_words:            length to dump in 4 byte words
2791  *
2792  * Return value:
2793  *      0 on success
2794  **/
2795 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2796                                            u32 start_addr,
2797                                            __be32 *dest, u32 length_in_words)
2798 {
2799         int i;
2800
2801         for (i = 0; i < length_in_words; i++) {
2802                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2803                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2804                 dest++;
2805         }
2806
2807         return 0;
2808 }
2809
2810 /**
2811  * ipr_get_ldump_data_section - Dump IOA memory
2812  * @ioa_cfg:                    ioa config struct
2813  * @start_addr:                 adapter address to dump
2814  * @dest:                               destination kernel buffer
2815  * @length_in_words:    length to dump in 4 byte words
2816  *
2817  * Return value:
2818  *      0 on success / -EIO on failure
2819  **/
2820 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2821                                       u32 start_addr,
2822                                       __be32 *dest, u32 length_in_words)
2823 {
2824         volatile u32 temp_pcii_reg;
2825         int i, delay = 0;
2826
2827         if (ioa_cfg->sis64)
2828                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2829                                                        dest, length_in_words);
2830
2831         /* Write IOA interrupt reg starting LDUMP state  */
2832         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2833                ioa_cfg->regs.set_uproc_interrupt_reg32);
2834
2835         /* Wait for IO debug acknowledge */
2836         if (ipr_wait_iodbg_ack(ioa_cfg,
2837                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2838                 dev_err(&ioa_cfg->pdev->dev,
2839                         "IOA dump long data transfer timeout\n");
2840                 return -EIO;
2841         }
2842
2843         /* Signal LDUMP interlocked - clear IO debug ack */
2844         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2845                ioa_cfg->regs.clr_interrupt_reg);
2846
2847         /* Write Mailbox with starting address */
2848         writel(start_addr, ioa_cfg->ioa_mailbox);
2849
2850         /* Signal address valid - clear IOA Reset alert */
2851         writel(IPR_UPROCI_RESET_ALERT,
2852                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2853
2854         for (i = 0; i < length_in_words; i++) {
2855                 /* Wait for IO debug acknowledge */
2856                 if (ipr_wait_iodbg_ack(ioa_cfg,
2857                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2858                         dev_err(&ioa_cfg->pdev->dev,
2859                                 "IOA dump short data transfer timeout\n");
2860                         return -EIO;
2861                 }
2862
2863                 /* Read data from mailbox and increment destination pointer */
2864                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2865                 dest++;
2866
2867                 /* For all but the last word of data, signal data received */
2868                 if (i < (length_in_words - 1)) {
2869                         /* Signal dump data received - Clear IO debug Ack */
2870                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2871                                ioa_cfg->regs.clr_interrupt_reg);
2872                 }
2873         }
2874
2875         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2876         writel(IPR_UPROCI_RESET_ALERT,
2877                ioa_cfg->regs.set_uproc_interrupt_reg32);
2878
2879         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2880                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2881
2882         /* Signal dump data received - Clear IO debug Ack */
2883         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2884                ioa_cfg->regs.clr_interrupt_reg);
2885
2886         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2887         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2888                 temp_pcii_reg =
2889                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2890
2891                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2892                         return 0;
2893
2894                 udelay(10);
2895                 delay += 10;
2896         }
2897
2898         return 0;
2899 }
2900
2901 #ifdef CONFIG_SCSI_IPR_DUMP
2902 /**
2903  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2904  * @ioa_cfg:            ioa config struct
2905  * @pci_address:        adapter address
2906  * @length:                     length of data to copy
2907  *
2908  * Copy data from PCI adapter to kernel buffer.
2909  * Note: length MUST be a 4 byte multiple
2910  * Return value:
2911  *      0 on success / other on failure
2912  **/
2913 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2914                         unsigned long pci_address, u32 length)
2915 {
2916         int bytes_copied = 0;
2917         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2918         __be32 *page;
2919         unsigned long lock_flags = 0;
2920         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2921
2922         if (ioa_cfg->sis64)
2923                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2924         else
2925                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2926
2927         while (bytes_copied < length &&
2928                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2929                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2930                     ioa_dump->page_offset == 0) {
2931                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2932
2933                         if (!page) {
2934                                 ipr_trace;
2935                                 return bytes_copied;
2936                         }
2937
2938                         ioa_dump->page_offset = 0;
2939                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2940                         ioa_dump->next_page_index++;
2941                 } else
2942                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2943
2944                 rem_len = length - bytes_copied;
2945                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2946                 cur_len = min(rem_len, rem_page_len);
2947
2948                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2949                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2950                         rc = -EIO;
2951                 } else {
2952                         rc = ipr_get_ldump_data_section(ioa_cfg,
2953                                                         pci_address + bytes_copied,
2954                                                         &page[ioa_dump->page_offset / 4],
2955                                                         (cur_len / sizeof(u32)));
2956                 }
2957                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2958
2959                 if (!rc) {
2960                         ioa_dump->page_offset += cur_len;
2961                         bytes_copied += cur_len;
2962                 } else {
2963                         ipr_trace;
2964                         break;
2965                 }
2966                 schedule();
2967         }
2968
2969         return bytes_copied;
2970 }
2971
2972 /**
2973  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2974  * @hdr:        dump entry header struct
2975  *
2976  * Return value:
2977  *      nothing
2978  **/
2979 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2980 {
2981         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2982         hdr->num_elems = 1;
2983         hdr->offset = sizeof(*hdr);
2984         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2985 }
2986
2987 /**
2988  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2989  * @ioa_cfg:    ioa config struct
2990  * @driver_dump:        driver dump struct
2991  *
2992  * Return value:
2993  *      nothing
2994  **/
2995 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2996                                    struct ipr_driver_dump *driver_dump)
2997 {
2998         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2999
3000         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3001         driver_dump->ioa_type_entry.hdr.len =
3002                 sizeof(struct ipr_dump_ioa_type_entry) -
3003                 sizeof(struct ipr_dump_entry_header);
3004         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3005         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3006         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3007         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3008                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3009                 ucode_vpd->minor_release[1];
3010         driver_dump->hdr.num_entries++;
3011 }
3012
3013 /**
3014  * ipr_dump_version_data - Fill in the driver version in the dump.
3015  * @ioa_cfg:    ioa config struct
3016  * @driver_dump:        driver dump struct
3017  *
3018  * Return value:
3019  *      nothing
3020  **/
3021 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3022                                   struct ipr_driver_dump *driver_dump)
3023 {
3024         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3025         driver_dump->version_entry.hdr.len =
3026                 sizeof(struct ipr_dump_version_entry) -
3027                 sizeof(struct ipr_dump_entry_header);
3028         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3029         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3030         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3031         driver_dump->hdr.num_entries++;
3032 }
3033
3034 /**
3035  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3036  * @ioa_cfg:    ioa config struct
3037  * @driver_dump:        driver dump struct
3038  *
3039  * Return value:
3040  *      nothing
3041  **/
3042 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3043                                    struct ipr_driver_dump *driver_dump)
3044 {
3045         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3046         driver_dump->trace_entry.hdr.len =
3047                 sizeof(struct ipr_dump_trace_entry) -
3048                 sizeof(struct ipr_dump_entry_header);
3049         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3050         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3051         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3052         driver_dump->hdr.num_entries++;
3053 }
3054
3055 /**
3056  * ipr_dump_location_data - Fill in the IOA location in the dump.
3057  * @ioa_cfg:    ioa config struct
3058  * @driver_dump:        driver dump struct
3059  *
3060  * Return value:
3061  *      nothing
3062  **/
3063 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3064                                    struct ipr_driver_dump *driver_dump)
3065 {
3066         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3067         driver_dump->location_entry.hdr.len =
3068                 sizeof(struct ipr_dump_location_entry) -
3069                 sizeof(struct ipr_dump_entry_header);
3070         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3071         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3072         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3073         driver_dump->hdr.num_entries++;
3074 }
3075
3076 /**
3077  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3078  * @ioa_cfg:    ioa config struct
3079  * @dump:               dump struct
3080  *
3081  * Return value:
3082  *      nothing
3083  **/
3084 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3085 {
3086         unsigned long start_addr, sdt_word;
3087         unsigned long lock_flags = 0;
3088         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3089         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3090         u32 num_entries, max_num_entries, start_off, end_off;
3091         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3092         struct ipr_sdt *sdt;
3093         int valid = 1;
3094         int i;
3095
3096         ENTER;
3097
3098         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3099
3100         if (ioa_cfg->sdt_state != READ_DUMP) {
3101                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3102                 return;
3103         }
3104
3105         if (ioa_cfg->sis64) {
3106                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3107                 ssleep(IPR_DUMP_DELAY_SECONDS);
3108                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3109         }
3110
3111         start_addr = readl(ioa_cfg->ioa_mailbox);
3112
3113         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3114                 dev_err(&ioa_cfg->pdev->dev,
3115                         "Invalid dump table format: %lx\n", start_addr);
3116                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3117                 return;
3118         }
3119
3120         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3121
3122         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3123
3124         /* Initialize the overall dump header */
3125         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3126         driver_dump->hdr.num_entries = 1;
3127         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3128         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3129         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3130         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3131
3132         ipr_dump_version_data(ioa_cfg, driver_dump);
3133         ipr_dump_location_data(ioa_cfg, driver_dump);
3134         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3135         ipr_dump_trace_data(ioa_cfg, driver_dump);
3136
3137         /* Update dump_header */
3138         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3139
3140         /* IOA Dump entry */
3141         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3142         ioa_dump->hdr.len = 0;
3143         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3144         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3145
3146         /* First entries in sdt are actually a list of dump addresses and
3147          lengths to gather the real dump data.  sdt represents the pointer
3148          to the ioa generated dump table.  Dump data will be extracted based
3149          on entries in this table */
3150         sdt = &ioa_dump->sdt;
3151
3152         if (ioa_cfg->sis64) {
3153                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3154                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3155         } else {
3156                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3157                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3158         }
3159
3160         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3161                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3162         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3163                                         bytes_to_copy / sizeof(__be32));
3164
3165         /* Smart Dump table is ready to use and the first entry is valid */
3166         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3167             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3168                 dev_err(&ioa_cfg->pdev->dev,
3169                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3170                         rc, be32_to_cpu(sdt->hdr.state));
3171                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3172                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3173                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3174                 return;
3175         }
3176
3177         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3178
3179         if (num_entries > max_num_entries)
3180                 num_entries = max_num_entries;
3181
3182         /* Update dump length to the actual data to be copied */
3183         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3184         if (ioa_cfg->sis64)
3185                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3186         else
3187                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3188
3189         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3190
3191         for (i = 0; i < num_entries; i++) {
3192                 if (ioa_dump->hdr.len > max_dump_size) {
3193                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3194                         break;
3195                 }
3196
3197                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3198                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3199                         if (ioa_cfg->sis64)
3200                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3201                         else {
3202                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3203                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3204
3205                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3206                                         bytes_to_copy = end_off - start_off;
3207                                 else
3208                                         valid = 0;
3209                         }
3210                         if (valid) {
3211                                 if (bytes_to_copy > max_dump_size) {
3212                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3213                                         continue;
3214                                 }
3215
3216                                 /* Copy data from adapter to driver buffers */
3217                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3218                                                             bytes_to_copy);
3219
3220                                 ioa_dump->hdr.len += bytes_copied;
3221
3222                                 if (bytes_copied != bytes_to_copy) {
3223                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3224                                         break;
3225                                 }
3226                         }
3227                 }
3228         }
3229
3230         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3231
3232         /* Update dump_header */
3233         driver_dump->hdr.len += ioa_dump->hdr.len;
3234         wmb();
3235         ioa_cfg->sdt_state = DUMP_OBTAINED;
3236         LEAVE;
3237 }
3238
3239 #else
3240 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3241 #endif
3242
3243 /**
3244  * ipr_release_dump - Free adapter dump memory
3245  * @kref:       kref struct
3246  *
3247  * Return value:
3248  *      nothing
3249  **/
3250 static void ipr_release_dump(struct kref *kref)
3251 {
3252         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3253         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3254         unsigned long lock_flags = 0;
3255         int i;
3256
3257         ENTER;
3258         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3259         ioa_cfg->dump = NULL;
3260         ioa_cfg->sdt_state = INACTIVE;
3261         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3262
3263         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3264                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3265
3266         vfree(dump->ioa_dump.ioa_data);
3267         kfree(dump);
3268         LEAVE;
3269 }
3270
3271 /**
3272  * ipr_worker_thread - Worker thread
3273  * @work:               ioa config struct
3274  *
3275  * Called at task level from a work thread. This function takes care
3276  * of adding and removing device from the mid-layer as configuration
3277  * changes are detected by the adapter.
3278  *
3279  * Return value:
3280  *      nothing
3281  **/
3282 static void ipr_worker_thread(struct work_struct *work)
3283 {
3284         unsigned long lock_flags;
3285         struct ipr_resource_entry *res;
3286         struct scsi_device *sdev;
3287         struct ipr_dump *dump;
3288         struct ipr_ioa_cfg *ioa_cfg =
3289                 container_of(work, struct ipr_ioa_cfg, work_q);
3290         u8 bus, target, lun;
3291         int did_work;
3292
3293         ENTER;
3294         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295
3296         if (ioa_cfg->sdt_state == READ_DUMP) {
3297                 dump = ioa_cfg->dump;
3298                 if (!dump) {
3299                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3300                         return;
3301                 }
3302                 kref_get(&dump->kref);
3303                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3304                 ipr_get_ioa_dump(ioa_cfg, dump);
3305                 kref_put(&dump->kref, ipr_release_dump);
3306
3307                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3308                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3309                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3310                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3311                 return;
3312         }
3313
3314         if (!ioa_cfg->scan_enabled) {
3315                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3316                 return;
3317         }
3318
3319 restart:
3320         do {
3321                 did_work = 0;
3322                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3323                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3324                         return;
3325                 }
3326
3327                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3328                         if (res->del_from_ml && res->sdev) {
3329                                 did_work = 1;
3330                                 sdev = res->sdev;
3331                                 if (!scsi_device_get(sdev)) {
3332                                         if (!res->add_to_ml)
3333                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3334                                         else
3335                                                 res->del_from_ml = 0;
3336                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3337                                         scsi_remove_device(sdev);
3338                                         scsi_device_put(sdev);
3339                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3340                                 }
3341                                 break;
3342                         }
3343                 }
3344         } while (did_work);
3345
3346         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3347                 if (res->add_to_ml) {
3348                         bus = res->bus;
3349                         target = res->target;
3350                         lun = res->lun;
3351                         res->add_to_ml = 0;
3352                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3353                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3354                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3355                         goto restart;
3356                 }
3357         }
3358
3359         ioa_cfg->scan_done = 1;
3360         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3361         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3362         LEAVE;
3363 }
3364
3365 #ifdef CONFIG_SCSI_IPR_TRACE
3366 /**
3367  * ipr_read_trace - Dump the adapter trace
3368  * @filp:               open sysfs file
3369  * @kobj:               kobject struct
3370  * @bin_attr:           bin_attribute struct
3371  * @buf:                buffer
3372  * @off:                offset
3373  * @count:              buffer size
3374  *
3375  * Return value:
3376  *      number of bytes printed to buffer
3377  **/
3378 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3379                               struct bin_attribute *bin_attr,
3380                               char *buf, loff_t off, size_t count)
3381 {
3382         struct device *dev = container_of(kobj, struct device, kobj);
3383         struct Scsi_Host *shost = class_to_shost(dev);
3384         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3385         unsigned long lock_flags = 0;
3386         ssize_t ret;
3387
3388         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3389         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3390                                 IPR_TRACE_SIZE);
3391         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3392
3393         return ret;
3394 }
3395
3396 static struct bin_attribute ipr_trace_attr = {
3397         .attr = {
3398                 .name = "trace",
3399                 .mode = S_IRUGO,
3400         },
3401         .size = 0,
3402         .read = ipr_read_trace,
3403 };
3404 #endif
3405
3406 /**
3407  * ipr_show_fw_version - Show the firmware version
3408  * @dev:        class device struct
3409  * @buf:        buffer
3410  *
3411  * Return value:
3412  *      number of bytes printed to buffer
3413  **/
3414 static ssize_t ipr_show_fw_version(struct device *dev,
3415                                    struct device_attribute *attr, char *buf)
3416 {
3417         struct Scsi_Host *shost = class_to_shost(dev);
3418         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3419         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3420         unsigned long lock_flags = 0;
3421         int len;
3422
3423         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3424         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3425                        ucode_vpd->major_release, ucode_vpd->card_type,
3426                        ucode_vpd->minor_release[0],
3427                        ucode_vpd->minor_release[1]);
3428         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3429         return len;
3430 }
3431
3432 static struct device_attribute ipr_fw_version_attr = {
3433         .attr = {
3434                 .name =         "fw_version",
3435                 .mode =         S_IRUGO,
3436         },
3437         .show = ipr_show_fw_version,
3438 };
3439
3440 /**
3441  * ipr_show_log_level - Show the adapter's error logging level
3442  * @dev:        class device struct
3443  * @buf:        buffer
3444  *
3445  * Return value:
3446  *      number of bytes printed to buffer
3447  **/
3448 static ssize_t ipr_show_log_level(struct device *dev,
3449                                    struct device_attribute *attr, char *buf)
3450 {
3451         struct Scsi_Host *shost = class_to_shost(dev);
3452         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3453         unsigned long lock_flags = 0;
3454         int len;
3455
3456         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3457         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3458         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3459         return len;
3460 }
3461
3462 /**
3463  * ipr_store_log_level - Change the adapter's error logging level
3464  * @dev:        class device struct
3465  * @buf:        buffer
3466  *
3467  * Return value:
3468  *      number of bytes printed to buffer
3469  **/
3470 static ssize_t ipr_store_log_level(struct device *dev,
3471                                    struct device_attribute *attr,
3472                                    const char *buf, size_t count)
3473 {
3474         struct Scsi_Host *shost = class_to_shost(dev);
3475         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3476         unsigned long lock_flags = 0;
3477
3478         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3479         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3480         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3481         return strlen(buf);
3482 }
3483
3484 static struct device_attribute ipr_log_level_attr = {
3485         .attr = {
3486                 .name =         "log_level",
3487                 .mode =         S_IRUGO | S_IWUSR,
3488         },
3489         .show = ipr_show_log_level,
3490         .store = ipr_store_log_level
3491 };
3492
3493 /**
3494  * ipr_store_diagnostics - IOA Diagnostics interface
3495  * @dev:        device struct
3496  * @buf:        buffer
3497  * @count:      buffer size
3498  *
3499  * This function will reset the adapter and wait a reasonable
3500  * amount of time for any errors that the adapter might log.
3501  *
3502  * Return value:
3503  *      count on success / other on failure
3504  **/
3505 static ssize_t ipr_store_diagnostics(struct device *dev,
3506                                      struct device_attribute *attr,
3507                                      const char *buf, size_t count)
3508 {
3509         struct Scsi_Host *shost = class_to_shost(dev);
3510         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3511         unsigned long lock_flags = 0;
3512         int rc = count;
3513
3514         if (!capable(CAP_SYS_ADMIN))
3515                 return -EACCES;
3516
3517         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3518         while (ioa_cfg->in_reset_reload) {
3519                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3520                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3521                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3522         }
3523
3524         ioa_cfg->errors_logged = 0;
3525         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3526
3527         if (ioa_cfg->in_reset_reload) {
3528                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3529                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3530
3531                 /* Wait for a second for any errors to be logged */
3532                 msleep(1000);
3533         } else {
3534                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3535                 return -EIO;
3536         }
3537
3538         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3539         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3540                 rc = -EIO;
3541         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3542
3543         return rc;
3544 }
3545
3546 static struct device_attribute ipr_diagnostics_attr = {
3547         .attr = {
3548                 .name =         "run_diagnostics",
3549                 .mode =         S_IWUSR,
3550         },
3551         .store = ipr_store_diagnostics
3552 };
3553
3554 /**
3555  * ipr_show_adapter_state - Show the adapter's state
3556  * @class_dev:  device struct
3557  * @buf:        buffer
3558  *
3559  * Return value:
3560  *      number of bytes printed to buffer
3561  **/
3562 static ssize_t ipr_show_adapter_state(struct device *dev,
3563                                       struct device_attribute *attr, char *buf)
3564 {
3565         struct Scsi_Host *shost = class_to_shost(dev);
3566         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3567         unsigned long lock_flags = 0;
3568         int len;
3569
3570         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3571         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3572                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3573         else
3574                 len = snprintf(buf, PAGE_SIZE, "online\n");
3575         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3576         return len;
3577 }
3578
3579 /**
3580  * ipr_store_adapter_state - Change adapter state
3581  * @dev:        device struct
3582  * @buf:        buffer
3583  * @count:      buffer size
3584  *
3585  * This function will change the adapter's state.
3586  *
3587  * Return value:
3588  *      count on success / other on failure
3589  **/
3590 static ssize_t ipr_store_adapter_state(struct device *dev,
3591                                        struct device_attribute *attr,
3592                                        const char *buf, size_t count)
3593 {
3594         struct Scsi_Host *shost = class_to_shost(dev);
3595         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3596         unsigned long lock_flags;
3597         int result = count, i;
3598
3599         if (!capable(CAP_SYS_ADMIN))
3600                 return -EACCES;
3601
3602         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3603         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3604             !strncmp(buf, "online", 6)) {
3605                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3606                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3607                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3608                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3609                 }
3610                 wmb();
3611                 ioa_cfg->reset_retries = 0;
3612                 ioa_cfg->in_ioa_bringdown = 0;
3613                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3614         }
3615         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3616         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3617
3618         return result;
3619 }
3620
3621 static struct device_attribute ipr_ioa_state_attr = {
3622         .attr = {
3623                 .name =         "online_state",
3624                 .mode =         S_IRUGO | S_IWUSR,
3625         },
3626         .show = ipr_show_adapter_state,
3627         .store = ipr_store_adapter_state
3628 };
3629
3630 /**
3631  * ipr_store_reset_adapter - Reset the adapter
3632  * @dev:        device struct
3633  * @buf:        buffer
3634  * @count:      buffer size
3635  *
3636  * This function will reset the adapter.
3637  *
3638  * Return value:
3639  *      count on success / other on failure
3640  **/
3641 static ssize_t ipr_store_reset_adapter(struct device *dev,
3642                                        struct device_attribute *attr,
3643                                        const char *buf, size_t count)
3644 {
3645         struct Scsi_Host *shost = class_to_shost(dev);
3646         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3647         unsigned long lock_flags;
3648         int result = count;
3649
3650         if (!capable(CAP_SYS_ADMIN))
3651                 return -EACCES;
3652
3653         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3654         if (!ioa_cfg->in_reset_reload)
3655                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3656         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3657         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3658
3659         return result;
3660 }
3661
3662 static struct device_attribute ipr_ioa_reset_attr = {
3663         .attr = {
3664                 .name =         "reset_host",
3665                 .mode =         S_IWUSR,
3666         },
3667         .store = ipr_store_reset_adapter
3668 };
3669
3670 static int ipr_iopoll(struct irq_poll *iop, int budget);
3671  /**
3672  * ipr_show_iopoll_weight - Show ipr polling mode
3673  * @dev:        class device struct
3674  * @buf:        buffer
3675  *
3676  * Return value:
3677  *      number of bytes printed to buffer
3678  **/
3679 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3680                                    struct device_attribute *attr, char *buf)
3681 {
3682         struct Scsi_Host *shost = class_to_shost(dev);
3683         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3684         unsigned long lock_flags = 0;
3685         int len;
3686
3687         spin_lock_irqsave(shost->host_lock, lock_flags);
3688         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3689         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3690
3691         return len;
3692 }
3693
3694 /**
3695  * ipr_store_iopoll_weight - Change the adapter's polling mode
3696  * @dev:        class device struct
3697  * @buf:        buffer
3698  *
3699  * Return value:
3700  *      number of bytes printed to buffer
3701  **/
3702 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3703                                         struct device_attribute *attr,
3704                                         const char *buf, size_t count)
3705 {
3706         struct Scsi_Host *shost = class_to_shost(dev);
3707         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3708         unsigned long user_iopoll_weight;
3709         unsigned long lock_flags = 0;
3710         int i;
3711
3712         if (!ioa_cfg->sis64) {
3713                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3714                 return -EINVAL;
3715         }
3716         if (kstrtoul(buf, 10, &user_iopoll_weight))
3717                 return -EINVAL;
3718
3719         if (user_iopoll_weight > 256) {
3720                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3721                 return -EINVAL;
3722         }
3723
3724         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3725                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3726                 return strlen(buf);
3727         }
3728
3729         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3730                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3731                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3732         }
3733
3734         spin_lock_irqsave(shost->host_lock, lock_flags);
3735         ioa_cfg->iopoll_weight = user_iopoll_weight;
3736         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3737                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3738                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3739                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3740                 }
3741         }
3742         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3743
3744         return strlen(buf);
3745 }
3746
3747 static struct device_attribute ipr_iopoll_weight_attr = {
3748         .attr = {
3749                 .name =         "iopoll_weight",
3750                 .mode =         S_IRUGO | S_IWUSR,
3751         },
3752         .show = ipr_show_iopoll_weight,
3753         .store = ipr_store_iopoll_weight
3754 };
3755
3756 /**
3757  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3758  * @buf_len:            buffer length
3759  *
3760  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3761  * list to use for microcode download
3762  *
3763  * Return value:
3764  *      pointer to sglist / NULL on failure
3765  **/
3766 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3767 {
3768         int sg_size, order, bsize_elem, num_elem, i, j;
3769         struct ipr_sglist *sglist;
3770         struct scatterlist *scatterlist;
3771         struct page *page;
3772
3773         /* Get the minimum size per scatter/gather element */
3774         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3775
3776         /* Get the actual size per element */
3777         order = get_order(sg_size);
3778
3779         /* Determine the actual number of bytes per element */
3780         bsize_elem = PAGE_SIZE * (1 << order);
3781
3782         /* Determine the actual number of sg entries needed */
3783         if (buf_len % bsize_elem)
3784                 num_elem = (buf_len / bsize_elem) + 1;
3785         else
3786                 num_elem = buf_len / bsize_elem;
3787
3788         /* Allocate a scatter/gather list for the DMA */
3789         sglist = kzalloc(sizeof(struct ipr_sglist) +
3790                          (sizeof(struct scatterlist) * (num_elem - 1)),
3791                          GFP_KERNEL);
3792
3793         if (sglist == NULL) {
3794                 ipr_trace;
3795                 return NULL;
3796         }
3797
3798         scatterlist = sglist->scatterlist;
3799         sg_init_table(scatterlist, num_elem);
3800
3801         sglist->order = order;
3802         sglist->num_sg = num_elem;
3803
3804         /* Allocate a bunch of sg elements */
3805         for (i = 0; i < num_elem; i++) {
3806                 page = alloc_pages(GFP_KERNEL, order);
3807                 if (!page) {
3808                         ipr_trace;
3809
3810                         /* Free up what we already allocated */
3811                         for (j = i - 1; j >= 0; j--)
3812                                 __free_pages(sg_page(&scatterlist[j]), order);
3813                         kfree(sglist);
3814                         return NULL;
3815                 }
3816
3817                 sg_set_page(&scatterlist[i], page, 0, 0);
3818         }
3819
3820         return sglist;
3821 }
3822
3823 /**
3824  * ipr_free_ucode_buffer - Frees a microcode download buffer
3825  * @p_dnld:             scatter/gather list pointer
3826  *
3827  * Free a DMA'able ucode download buffer previously allocated with
3828  * ipr_alloc_ucode_buffer
3829  *
3830  * Return value:
3831  *      nothing
3832  **/
3833 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3834 {
3835         int i;
3836
3837         for (i = 0; i < sglist->num_sg; i++)
3838                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3839
3840         kfree(sglist);
3841 }
3842
3843 /**
3844  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3845  * @sglist:             scatter/gather list pointer
3846  * @buffer:             buffer pointer
3847  * @len:                buffer length
3848  *
3849  * Copy a microcode image from a user buffer into a buffer allocated by
3850  * ipr_alloc_ucode_buffer
3851  *
3852  * Return value:
3853  *      0 on success / other on failure
3854  **/
3855 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3856                                  u8 *buffer, u32 len)
3857 {
3858         int bsize_elem, i, result = 0;
3859         struct scatterlist *scatterlist;
3860         void *kaddr;
3861
3862         /* Determine the actual number of bytes per element */
3863         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3864
3865         scatterlist = sglist->scatterlist;
3866
3867         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3868                 struct page *page = sg_page(&scatterlist[i]);
3869
3870                 kaddr = kmap(page);
3871                 memcpy(kaddr, buffer, bsize_elem);
3872                 kunmap(page);
3873
3874                 scatterlist[i].length = bsize_elem;
3875
3876                 if (result != 0) {
3877                         ipr_trace;
3878                         return result;
3879                 }
3880         }
3881
3882         if (len % bsize_elem) {
3883                 struct page *page = sg_page(&scatterlist[i]);
3884
3885                 kaddr = kmap(page);
3886                 memcpy(kaddr, buffer, len % bsize_elem);
3887                 kunmap(page);
3888
3889                 scatterlist[i].length = len % bsize_elem;
3890         }
3891
3892         sglist->buffer_len = len;
3893         return result;
3894 }
3895
3896 /**
3897  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3898  * @ipr_cmd:            ipr command struct
3899  * @sglist:             scatter/gather list
3900  *
3901  * Builds a microcode download IOA data list (IOADL).
3902  *
3903  **/
3904 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3905                                     struct ipr_sglist *sglist)
3906 {
3907         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3908         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3909         struct scatterlist *scatterlist = sglist->scatterlist;
3910         int i;
3911
3912         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3913         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3914         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3915
3916         ioarcb->ioadl_len =
3917                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3918         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3919                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3920                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3921                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3922         }
3923
3924         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3925 }
3926
3927 /**
3928  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3929  * @ipr_cmd:    ipr command struct
3930  * @sglist:             scatter/gather list
3931  *
3932  * Builds a microcode download IOA data list (IOADL).
3933  *
3934  **/
3935 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3936                                   struct ipr_sglist *sglist)
3937 {
3938         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3939         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3940         struct scatterlist *scatterlist = sglist->scatterlist;
3941         int i;
3942
3943         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3944         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3945         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3946
3947         ioarcb->ioadl_len =
3948                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3949
3950         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3951                 ioadl[i].flags_and_data_len =
3952                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3953                 ioadl[i].address =
3954                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3955         }
3956
3957         ioadl[i-1].flags_and_data_len |=
3958                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3959 }
3960
3961 /**
3962  * ipr_update_ioa_ucode - Update IOA's microcode
3963  * @ioa_cfg:    ioa config struct
3964  * @sglist:             scatter/gather list
3965  *
3966  * Initiate an adapter reset to update the IOA's microcode
3967  *
3968  * Return value:
3969  *      0 on success / -EIO on failure
3970  **/
3971 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3972                                 struct ipr_sglist *sglist)
3973 {
3974         unsigned long lock_flags;
3975
3976         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3977         while (ioa_cfg->in_reset_reload) {
3978                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3979                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3980                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3981         }
3982
3983         if (ioa_cfg->ucode_sglist) {
3984                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3985                 dev_err(&ioa_cfg->pdev->dev,
3986                         "Microcode download already in progress\n");
3987                 return -EIO;
3988         }
3989
3990         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3991                                         sglist->scatterlist, sglist->num_sg,
3992                                         DMA_TO_DEVICE);
3993
3994         if (!sglist->num_dma_sg) {
3995                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3996                 dev_err(&ioa_cfg->pdev->dev,
3997                         "Failed to map microcode download buffer!\n");
3998                 return -EIO;
3999         }
4000
4001         ioa_cfg->ucode_sglist = sglist;
4002         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4003         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4004         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4005
4006         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4007         ioa_cfg->ucode_sglist = NULL;
4008         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4009         return 0;
4010 }
4011
4012 /**
4013  * ipr_store_update_fw - Update the firmware on the adapter
4014  * @class_dev:  device struct
4015  * @buf:        buffer
4016  * @count:      buffer size
4017  *
4018  * This function will update the firmware on the adapter.
4019  *
4020  * Return value:
4021  *      count on success / other on failure
4022  **/
4023 static ssize_t ipr_store_update_fw(struct device *dev,
4024                                    struct device_attribute *attr,
4025                                    const char *buf, size_t count)
4026 {
4027         struct Scsi_Host *shost = class_to_shost(dev);
4028         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4029         struct ipr_ucode_image_header *image_hdr;
4030         const struct firmware *fw_entry;
4031         struct ipr_sglist *sglist;
4032         char fname[100];
4033         char *src;
4034         char *endline;
4035         int result, dnld_size;
4036
4037         if (!capable(CAP_SYS_ADMIN))
4038                 return -EACCES;
4039
4040         snprintf(fname, sizeof(fname), "%s", buf);
4041
4042         endline = strchr(fname, '\n');
4043         if (endline)
4044                 *endline = '\0';
4045
4046         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4047                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4048                 return -EIO;
4049         }
4050
4051         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4052
4053         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4054         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4055         sglist = ipr_alloc_ucode_buffer(dnld_size);
4056
4057         if (!sglist) {
4058                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4059                 release_firmware(fw_entry);
4060                 return -ENOMEM;
4061         }
4062
4063         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4064
4065         if (result) {
4066                 dev_err(&ioa_cfg->pdev->dev,
4067                         "Microcode buffer copy to DMA buffer failed\n");
4068                 goto out;
4069         }
4070
4071         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4072
4073         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4074
4075         if (!result)
4076                 result = count;
4077 out:
4078         ipr_free_ucode_buffer(sglist);
4079         release_firmware(fw_entry);
4080         return result;
4081 }
4082
4083 static struct device_attribute ipr_update_fw_attr = {
4084         .attr = {
4085                 .name =         "update_fw",
4086                 .mode =         S_IWUSR,
4087         },
4088         .store = ipr_store_update_fw
4089 };
4090
4091 /**
4092  * ipr_show_fw_type - Show the adapter's firmware type.
4093  * @dev:        class device struct
4094  * @buf:        buffer
4095  *
4096  * Return value:
4097  *      number of bytes printed to buffer
4098  **/
4099 static ssize_t ipr_show_fw_type(struct device *dev,
4100                                 struct device_attribute *attr, char *buf)
4101 {
4102         struct Scsi_Host *shost = class_to_shost(dev);
4103         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4104         unsigned long lock_flags = 0;
4105         int len;
4106
4107         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4108         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4109         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4110         return len;
4111 }
4112
4113 static struct device_attribute ipr_ioa_fw_type_attr = {
4114         .attr = {
4115                 .name =         "fw_type",
4116                 .mode =         S_IRUGO,
4117         },
4118         .show = ipr_show_fw_type
4119 };
4120
4121 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4122                                 struct bin_attribute *bin_attr, char *buf,
4123                                 loff_t off, size_t count)
4124 {
4125         struct device *cdev = container_of(kobj, struct device, kobj);
4126         struct Scsi_Host *shost = class_to_shost(cdev);
4127         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4128         struct ipr_hostrcb *hostrcb;
4129         unsigned long lock_flags = 0;
4130         int ret;
4131
4132         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4133         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4134                                         struct ipr_hostrcb, queue);
4135         if (!hostrcb) {
4136                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4137                 return 0;
4138         }
4139         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4140                                 sizeof(hostrcb->hcam));
4141         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4142         return ret;
4143 }
4144
4145 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4146                                 struct bin_attribute *bin_attr, char *buf,
4147                                 loff_t off, size_t count)
4148 {
4149         struct device *cdev = container_of(kobj, struct device, kobj);
4150         struct Scsi_Host *shost = class_to_shost(cdev);
4151         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4152         struct ipr_hostrcb *hostrcb;
4153         unsigned long lock_flags = 0;
4154
4155         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4156         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4157                                         struct ipr_hostrcb, queue);
4158         if (!hostrcb) {
4159                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4160                 return count;
4161         }
4162
4163         /* Reclaim hostrcb before exit */
4164         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4165         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4166         return count;
4167 }
4168
4169 static struct bin_attribute ipr_ioa_async_err_log = {
4170         .attr = {
4171                 .name =         "async_err_log",
4172                 .mode =         S_IRUGO | S_IWUSR,
4173         },
4174         .size = 0,
4175         .read = ipr_read_async_err_log,
4176         .write = ipr_next_async_err_log
4177 };
4178
4179 static struct device_attribute *ipr_ioa_attrs[] = {
4180         &ipr_fw_version_attr,
4181         &ipr_log_level_attr,
4182         &ipr_diagnostics_attr,
4183         &ipr_ioa_state_attr,
4184         &ipr_ioa_reset_attr,
4185         &ipr_update_fw_attr,
4186         &ipr_ioa_fw_type_attr,
4187         &ipr_iopoll_weight_attr,
4188         NULL,
4189 };
4190
4191 #ifdef CONFIG_SCSI_IPR_DUMP
4192 /**
4193  * ipr_read_dump - Dump the adapter
4194  * @filp:               open sysfs file
4195  * @kobj:               kobject struct
4196  * @bin_attr:           bin_attribute struct
4197  * @buf:                buffer
4198  * @off:                offset
4199  * @count:              buffer size
4200  *
4201  * Return value:
4202  *      number of bytes printed to buffer
4203  **/
4204 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4205                              struct bin_attribute *bin_attr,
4206                              char *buf, loff_t off, size_t count)
4207 {
4208         struct device *cdev = container_of(kobj, struct device, kobj);
4209         struct Scsi_Host *shost = class_to_shost(cdev);
4210         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4211         struct ipr_dump *dump;
4212         unsigned long lock_flags = 0;
4213         char *src;
4214         int len, sdt_end;
4215         size_t rc = count;
4216
4217         if (!capable(CAP_SYS_ADMIN))
4218                 return -EACCES;
4219
4220         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4221         dump = ioa_cfg->dump;
4222
4223         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4224                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4225                 return 0;
4226         }
4227         kref_get(&dump->kref);
4228         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4229
4230         if (off > dump->driver_dump.hdr.len) {
4231                 kref_put(&dump->kref, ipr_release_dump);
4232                 return 0;
4233         }
4234
4235         if (off + count > dump->driver_dump.hdr.len) {
4236                 count = dump->driver_dump.hdr.len - off;
4237                 rc = count;
4238         }
4239
4240         if (count && off < sizeof(dump->driver_dump)) {
4241                 if (off + count > sizeof(dump->driver_dump))
4242                         len = sizeof(dump->driver_dump) - off;
4243                 else
4244                         len = count;
4245                 src = (u8 *)&dump->driver_dump + off;
4246                 memcpy(buf, src, len);
4247                 buf += len;
4248                 off += len;
4249                 count -= len;
4250         }
4251
4252         off -= sizeof(dump->driver_dump);
4253
4254         if (ioa_cfg->sis64)
4255                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4256                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4257                            sizeof(struct ipr_sdt_entry));
4258         else
4259                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4260                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4261
4262         if (count && off < sdt_end) {
4263                 if (off + count > sdt_end)
4264                         len = sdt_end - off;
4265                 else
4266                         len = count;
4267                 src = (u8 *)&dump->ioa_dump + off;
4268                 memcpy(buf, src, len);
4269                 buf += len;
4270                 off += len;
4271                 count -= len;
4272         }
4273
4274         off -= sdt_end;
4275
4276         while (count) {
4277                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4278                         len = PAGE_ALIGN(off) - off;
4279                 else
4280                         len = count;
4281                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4282                 src += off & ~PAGE_MASK;
4283                 memcpy(buf, src, len);
4284                 buf += len;
4285                 off += len;
4286                 count -= len;
4287         }
4288
4289         kref_put(&dump->kref, ipr_release_dump);
4290         return rc;
4291 }
4292
4293 /**
4294  * ipr_alloc_dump - Prepare for adapter dump
4295  * @ioa_cfg:    ioa config struct
4296  *
4297  * Return value:
4298  *      0 on success / other on failure
4299  **/
4300 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4301 {
4302         struct ipr_dump *dump;
4303         __be32 **ioa_data;
4304         unsigned long lock_flags = 0;
4305
4306         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4307
4308         if (!dump) {
4309                 ipr_err("Dump memory allocation failed\n");
4310                 return -ENOMEM;
4311         }
4312
4313         if (ioa_cfg->sis64)
4314                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4315         else
4316                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4317
4318         if (!ioa_data) {
4319                 ipr_err("Dump memory allocation failed\n");
4320                 kfree(dump);
4321                 return -ENOMEM;
4322         }
4323
4324         dump->ioa_dump.ioa_data = ioa_data;
4325
4326         kref_init(&dump->kref);
4327         dump->ioa_cfg = ioa_cfg;
4328
4329         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4330
4331         if (INACTIVE != ioa_cfg->sdt_state) {
4332                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4333                 vfree(dump->ioa_dump.ioa_data);
4334                 kfree(dump);
4335                 return 0;
4336         }
4337
4338         ioa_cfg->dump = dump;
4339         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4340         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4341                 ioa_cfg->dump_taken = 1;
4342                 schedule_work(&ioa_cfg->work_q);
4343         }
4344         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4345
4346         return 0;
4347 }
4348
4349 /**
4350  * ipr_free_dump - Free adapter dump memory
4351  * @ioa_cfg:    ioa config struct
4352  *
4353  * Return value:
4354  *      0 on success / other on failure
4355  **/
4356 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4357 {
4358         struct ipr_dump *dump;
4359         unsigned long lock_flags = 0;
4360
4361         ENTER;
4362
4363         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4364         dump = ioa_cfg->dump;
4365         if (!dump) {
4366                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4367                 return 0;
4368         }
4369
4370         ioa_cfg->dump = NULL;
4371         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4372
4373         kref_put(&dump->kref, ipr_release_dump);
4374
4375         LEAVE;
4376         return 0;
4377 }
4378
4379 /**
4380  * ipr_write_dump - Setup dump state of adapter
4381  * @filp:               open sysfs file
4382  * @kobj:               kobject struct
4383  * @bin_attr:           bin_attribute struct
4384  * @buf:                buffer
4385  * @off:                offset
4386  * @count:              buffer size
4387  *
4388  * Return value:
4389  *      number of bytes printed to buffer
4390  **/
4391 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4392                               struct bin_attribute *bin_attr,
4393                               char *buf, loff_t off, size_t count)
4394 {
4395         struct device *cdev = container_of(kobj, struct device, kobj);
4396         struct Scsi_Host *shost = class_to_shost(cdev);
4397         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4398         int rc;
4399
4400         if (!capable(CAP_SYS_ADMIN))
4401                 return -EACCES;
4402
4403         if (buf[0] == '1')
4404                 rc = ipr_alloc_dump(ioa_cfg);
4405         else if (buf[0] == '0')
4406                 rc = ipr_free_dump(ioa_cfg);
4407         else
4408                 return -EINVAL;
4409
4410         if (rc)
4411                 return rc;
4412         else
4413                 return count;
4414 }
4415
4416 static struct bin_attribute ipr_dump_attr = {
4417         .attr = {
4418                 .name = "dump",
4419                 .mode = S_IRUSR | S_IWUSR,
4420         },
4421         .size = 0,
4422         .read = ipr_read_dump,
4423         .write = ipr_write_dump
4424 };
4425 #else
4426 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4427 #endif
4428
4429 /**
4430  * ipr_change_queue_depth - Change the device's queue depth
4431  * @sdev:       scsi device struct
4432  * @qdepth:     depth to set
4433  * @reason:     calling context
4434  *
4435  * Return value:
4436  *      actual depth set
4437  **/
4438 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4439 {
4440         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4441         struct ipr_resource_entry *res;
4442         unsigned long lock_flags = 0;
4443
4444         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4445         res = (struct ipr_resource_entry *)sdev->hostdata;
4446
4447         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4448                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4449         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4450
4451         scsi_change_queue_depth(sdev, qdepth);
4452         return sdev->queue_depth;
4453 }
4454
4455 /**
4456  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4457  * @dev:        device struct
4458  * @attr:       device attribute structure
4459  * @buf:        buffer
4460  *
4461  * Return value:
4462  *      number of bytes printed to buffer
4463  **/
4464 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4465 {
4466         struct scsi_device *sdev = to_scsi_device(dev);
4467         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4468         struct ipr_resource_entry *res;
4469         unsigned long lock_flags = 0;
4470         ssize_t len = -ENXIO;
4471
4472         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4473         res = (struct ipr_resource_entry *)sdev->hostdata;
4474         if (res)
4475                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4476         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4477         return len;
4478 }
4479
4480 static struct device_attribute ipr_adapter_handle_attr = {
4481         .attr = {
4482                 .name =         "adapter_handle",
4483                 .mode =         S_IRUSR,
4484         },
4485         .show = ipr_show_adapter_handle
4486 };
4487
4488 /**
4489  * ipr_show_resource_path - Show the resource path or the resource address for
4490  *                          this device.
4491  * @dev:        device struct
4492  * @attr:       device attribute structure
4493  * @buf:        buffer
4494  *
4495  * Return value:
4496  *      number of bytes printed to buffer
4497  **/
4498 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4499 {
4500         struct scsi_device *sdev = to_scsi_device(dev);
4501         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4502         struct ipr_resource_entry *res;
4503         unsigned long lock_flags = 0;
4504         ssize_t len = -ENXIO;
4505         char buffer[IPR_MAX_RES_PATH_LENGTH];
4506
4507         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4508         res = (struct ipr_resource_entry *)sdev->hostdata;
4509         if (res && ioa_cfg->sis64)
4510                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4511                                __ipr_format_res_path(res->res_path, buffer,
4512                                                      sizeof(buffer)));
4513         else if (res)
4514                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4515                                res->bus, res->target, res->lun);
4516
4517         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4518         return len;
4519 }
4520
4521 static struct device_attribute ipr_resource_path_attr = {
4522         .attr = {
4523                 .name =         "resource_path",
4524                 .mode =         S_IRUGO,
4525         },
4526         .show = ipr_show_resource_path
4527 };
4528
4529 /**
4530  * ipr_show_device_id - Show the device_id for this device.
4531  * @dev:        device struct
4532  * @attr:       device attribute structure
4533  * @buf:        buffer
4534  *
4535  * Return value:
4536  *      number of bytes printed to buffer
4537  **/
4538 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4539 {
4540         struct scsi_device *sdev = to_scsi_device(dev);
4541         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4542         struct ipr_resource_entry *res;
4543         unsigned long lock_flags = 0;
4544         ssize_t len = -ENXIO;
4545
4546         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4547         res = (struct ipr_resource_entry *)sdev->hostdata;
4548         if (res && ioa_cfg->sis64)
4549                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4550         else if (res)
4551                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4552
4553         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4554         return len;
4555 }
4556
4557 static struct device_attribute ipr_device_id_attr = {
4558         .attr = {
4559                 .name =         "device_id",
4560                 .mode =         S_IRUGO,
4561         },
4562         .show = ipr_show_device_id
4563 };
4564
4565 /**
4566  * ipr_show_resource_type - Show the resource type for this device.
4567  * @dev:        device struct
4568  * @attr:       device attribute structure
4569  * @buf:        buffer
4570  *
4571  * Return value:
4572  *      number of bytes printed to buffer
4573  **/
4574 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4575 {
4576         struct scsi_device *sdev = to_scsi_device(dev);
4577         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4578         struct ipr_resource_entry *res;
4579         unsigned long lock_flags = 0;
4580         ssize_t len = -ENXIO;
4581
4582         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4583         res = (struct ipr_resource_entry *)sdev->hostdata;
4584
4585         if (res)
4586                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4587
4588         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4589         return len;
4590 }
4591
4592 static struct device_attribute ipr_resource_type_attr = {
4593         .attr = {
4594                 .name =         "resource_type",
4595                 .mode =         S_IRUGO,
4596         },
4597         .show = ipr_show_resource_type
4598 };
4599
4600 /**
4601  * ipr_show_raw_mode - Show the adapter's raw mode
4602  * @dev:        class device struct
4603  * @buf:        buffer
4604  *
4605  * Return value:
4606  *      number of bytes printed to buffer
4607  **/
4608 static ssize_t ipr_show_raw_mode(struct device *dev,
4609                                  struct device_attribute *attr, char *buf)
4610 {
4611         struct scsi_device *sdev = to_scsi_device(dev);
4612         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4613         struct ipr_resource_entry *res;
4614         unsigned long lock_flags = 0;
4615         ssize_t len;
4616
4617         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4618         res = (struct ipr_resource_entry *)sdev->hostdata;
4619         if (res)
4620                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4621         else
4622                 len = -ENXIO;
4623         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4624         return len;
4625 }
4626
4627 /**
4628  * ipr_store_raw_mode - Change the adapter's raw mode
4629  * @dev:        class device struct
4630  * @buf:        buffer
4631  *
4632  * Return value:
4633  *      number of bytes printed to buffer
4634  **/
4635 static ssize_t ipr_store_raw_mode(struct device *dev,
4636                                   struct device_attribute *attr,
4637                                   const char *buf, size_t count)
4638 {
4639         struct scsi_device *sdev = to_scsi_device(dev);
4640         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4641         struct ipr_resource_entry *res;
4642         unsigned long lock_flags = 0;
4643         ssize_t len;
4644
4645         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4646         res = (struct ipr_resource_entry *)sdev->hostdata;
4647         if (res) {
4648                 if (ipr_is_af_dasd_device(res)) {
4649                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4650                         len = strlen(buf);
4651                         if (res->sdev)
4652                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4653                                         res->raw_mode ? "enabled" : "disabled");
4654                 } else
4655                         len = -EINVAL;
4656         } else
4657                 len = -ENXIO;
4658         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4659         return len;
4660 }
4661
4662 static struct device_attribute ipr_raw_mode_attr = {
4663         .attr = {
4664                 .name =         "raw_mode",
4665                 .mode =         S_IRUGO | S_IWUSR,
4666         },
4667         .show = ipr_show_raw_mode,
4668         .store = ipr_store_raw_mode
4669 };
4670
4671 static struct device_attribute *ipr_dev_attrs[] = {
4672         &ipr_adapter_handle_attr,
4673         &ipr_resource_path_attr,
4674         &ipr_device_id_attr,
4675         &ipr_resource_type_attr,
4676         &ipr_raw_mode_attr,
4677         NULL,
4678 };
4679
4680 /**
4681  * ipr_biosparam - Return the HSC mapping
4682  * @sdev:                       scsi device struct
4683  * @block_device:       block device pointer
4684  * @capacity:           capacity of the device
4685  * @parm:                       Array containing returned HSC values.
4686  *
4687  * This function generates the HSC parms that fdisk uses.
4688  * We want to make sure we return something that places partitions
4689  * on 4k boundaries for best performance with the IOA.
4690  *
4691  * Return value:
4692  *      0 on success
4693  **/
4694 static int ipr_biosparam(struct scsi_device *sdev,
4695                          struct block_device *block_device,
4696                          sector_t capacity, int *parm)
4697 {
4698         int heads, sectors;
4699         sector_t cylinders;
4700
4701         heads = 128;
4702         sectors = 32;
4703
4704         cylinders = capacity;
4705         sector_div(cylinders, (128 * 32));
4706
4707         /* return result */
4708         parm[0] = heads;
4709         parm[1] = sectors;
4710         parm[2] = cylinders;
4711
4712         return 0;
4713 }
4714
4715 /**
4716  * ipr_find_starget - Find target based on bus/target.
4717  * @starget:    scsi target struct
4718  *
4719  * Return value:
4720  *      resource entry pointer if found / NULL if not found
4721  **/
4722 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4723 {
4724         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4725         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4726         struct ipr_resource_entry *res;
4727
4728         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4729                 if ((res->bus == starget->channel) &&
4730                     (res->target == starget->id)) {
4731                         return res;
4732                 }
4733         }
4734
4735         return NULL;
4736 }
4737
4738 static struct ata_port_info sata_port_info;
4739
4740 /**
4741  * ipr_target_alloc - Prepare for commands to a SCSI target
4742  * @starget:    scsi target struct
4743  *
4744  * If the device is a SATA device, this function allocates an
4745  * ATA port with libata, else it does nothing.
4746  *
4747  * Return value:
4748  *      0 on success / non-0 on failure
4749  **/
4750 static int ipr_target_alloc(struct scsi_target *starget)
4751 {
4752         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4753         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4754         struct ipr_sata_port *sata_port;
4755         struct ata_port *ap;
4756         struct ipr_resource_entry *res;
4757         unsigned long lock_flags;
4758
4759         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4760         res = ipr_find_starget(starget);
4761         starget->hostdata = NULL;
4762
4763         if (res && ipr_is_gata(res)) {
4764                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4765                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4766                 if (!sata_port)
4767                         return -ENOMEM;
4768
4769                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4770                 if (ap) {
4771                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4772                         sata_port->ioa_cfg = ioa_cfg;
4773                         sata_port->ap = ap;
4774                         sata_port->res = res;
4775
4776                         res->sata_port = sata_port;
4777                         ap->private_data = sata_port;
4778                         starget->hostdata = sata_port;
4779                 } else {
4780                         kfree(sata_port);
4781                         return -ENOMEM;
4782                 }
4783         }
4784         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4785
4786         return 0;
4787 }
4788
4789 /**
4790  * ipr_target_destroy - Destroy a SCSI target
4791  * @starget:    scsi target struct
4792  *
4793  * If the device was a SATA device, this function frees the libata
4794  * ATA port, else it does nothing.
4795  *
4796  **/
4797 static void ipr_target_destroy(struct scsi_target *starget)
4798 {
4799         struct ipr_sata_port *sata_port = starget->hostdata;
4800         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4801         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4802
4803         if (ioa_cfg->sis64) {
4804                 if (!ipr_find_starget(starget)) {
4805                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4806                                 clear_bit(starget->id, ioa_cfg->array_ids);
4807                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4808                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4809                         else if (starget->channel == 0)
4810                                 clear_bit(starget->id, ioa_cfg->target_ids);
4811                 }
4812         }
4813
4814         if (sata_port) {
4815                 starget->hostdata = NULL;
4816                 ata_sas_port_destroy(sata_port->ap);
4817                 kfree(sata_port);
4818         }
4819 }
4820
4821 /**
4822  * ipr_find_sdev - Find device based on bus/target/lun.
4823  * @sdev:       scsi device struct
4824  *
4825  * Return value:
4826  *      resource entry pointer if found / NULL if not found
4827  **/
4828 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4829 {
4830         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4831         struct ipr_resource_entry *res;
4832
4833         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4834                 if ((res->bus == sdev->channel) &&
4835                     (res->target == sdev->id) &&
4836                     (res->lun == sdev->lun))
4837                         return res;
4838         }
4839
4840         return NULL;
4841 }
4842
4843 /**
4844  * ipr_slave_destroy - Unconfigure a SCSI device
4845  * @sdev:       scsi device struct
4846  *
4847  * Return value:
4848  *      nothing
4849  **/
4850 static void ipr_slave_destroy(struct scsi_device *sdev)
4851 {
4852         struct ipr_resource_entry *res;
4853         struct ipr_ioa_cfg *ioa_cfg;
4854         unsigned long lock_flags = 0;
4855
4856         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4857
4858         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4859         res = (struct ipr_resource_entry *) sdev->hostdata;
4860         if (res) {
4861                 if (res->sata_port)
4862                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4863                 sdev->hostdata = NULL;
4864                 res->sdev = NULL;
4865                 res->sata_port = NULL;
4866         }
4867         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4868 }
4869
4870 /**
4871  * ipr_slave_configure - Configure a SCSI device
4872  * @sdev:       scsi device struct
4873  *
4874  * This function configures the specified scsi device.
4875  *
4876  * Return value:
4877  *      0 on success
4878  **/
4879 static int ipr_slave_configure(struct scsi_device *sdev)
4880 {
4881         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4882         struct ipr_resource_entry *res;
4883         struct ata_port *ap = NULL;
4884         unsigned long lock_flags = 0;
4885         char buffer[IPR_MAX_RES_PATH_LENGTH];
4886
4887         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4888         res = sdev->hostdata;
4889         if (res) {
4890                 if (ipr_is_af_dasd_device(res))
4891                         sdev->type = TYPE_RAID;
4892                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4893                         sdev->scsi_level = 4;
4894                         sdev->no_uld_attach = 1;
4895                 }
4896                 if (ipr_is_vset_device(res)) {
4897                         sdev->scsi_level = SCSI_SPC_3;
4898                         blk_queue_rq_timeout(sdev->request_queue,
4899                                              IPR_VSET_RW_TIMEOUT);
4900                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4901                 }
4902                 if (ipr_is_gata(res) && res->sata_port)
4903                         ap = res->sata_port->ap;
4904                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4905
4906                 if (ap) {
4907                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4908                         ata_sas_slave_configure(sdev, ap);
4909                 }
4910
4911                 if (ioa_cfg->sis64)
4912                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4913                                     ipr_format_res_path(ioa_cfg,
4914                                 res->res_path, buffer, sizeof(buffer)));
4915                 return 0;
4916         }
4917         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4918         return 0;
4919 }
4920
4921 /**
4922  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4923  * @sdev:       scsi device struct
4924  *
4925  * This function initializes an ATA port so that future commands
4926  * sent through queuecommand will work.
4927  *
4928  * Return value:
4929  *      0 on success
4930  **/
4931 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4932 {
4933         struct ipr_sata_port *sata_port = NULL;
4934         int rc = -ENXIO;
4935
4936         ENTER;
4937         if (sdev->sdev_target)
4938                 sata_port = sdev->sdev_target->hostdata;
4939         if (sata_port) {
4940                 rc = ata_sas_port_init(sata_port->ap);
4941                 if (rc == 0)
4942                         rc = ata_sas_sync_probe(sata_port->ap);
4943         }
4944
4945         if (rc)
4946                 ipr_slave_destroy(sdev);
4947
4948         LEAVE;
4949         return rc;
4950 }
4951
4952 /**
4953  * ipr_slave_alloc - Prepare for commands to a device.
4954  * @sdev:       scsi device struct
4955  *
4956  * This function saves a pointer to the resource entry
4957  * in the scsi device struct if the device exists. We
4958  * can then use this pointer in ipr_queuecommand when
4959  * handling new commands.
4960  *
4961  * Return value:
4962  *      0 on success / -ENXIO if device does not exist
4963  **/
4964 static int ipr_slave_alloc(struct scsi_device *sdev)
4965 {
4966         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4967         struct ipr_resource_entry *res;
4968         unsigned long lock_flags;
4969         int rc = -ENXIO;
4970
4971         sdev->hostdata = NULL;
4972
4973         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4974
4975         res = ipr_find_sdev(sdev);
4976         if (res) {
4977                 res->sdev = sdev;
4978                 res->add_to_ml = 0;
4979                 res->in_erp = 0;
4980                 sdev->hostdata = res;
4981                 if (!ipr_is_naca_model(res))
4982                         res->needs_sync_complete = 1;
4983                 rc = 0;
4984                 if (ipr_is_gata(res)) {
4985                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4986                         return ipr_ata_slave_alloc(sdev);
4987                 }
4988         }
4989
4990         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4991
4992         return rc;
4993 }
4994
4995 /**
4996  * ipr_match_lun - Match function for specified LUN
4997  * @ipr_cmd:    ipr command struct
4998  * @device:             device to match (sdev)
4999  *
5000  * Returns:
5001  *      1 if command matches sdev / 0 if command does not match sdev
5002  **/
5003 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5004 {
5005         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5006                 return 1;
5007         return 0;
5008 }
5009
5010 /**
5011  * ipr_wait_for_ops - Wait for matching commands to complete
5012  * @ipr_cmd:    ipr command struct
5013  * @device:             device to match (sdev)
5014  * @match:              match function to use
5015  *
5016  * Returns:
5017  *      SUCCESS / FAILED
5018  **/
5019 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5020                             int (*match)(struct ipr_cmnd *, void *))
5021 {
5022         struct ipr_cmnd *ipr_cmd;
5023         int wait;
5024         unsigned long flags;
5025         struct ipr_hrr_queue *hrrq;
5026         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5027         DECLARE_COMPLETION_ONSTACK(comp);
5028
5029         ENTER;
5030         do {
5031                 wait = 0;
5032
5033                 for_each_hrrq(hrrq, ioa_cfg) {
5034                         spin_lock_irqsave(hrrq->lock, flags);
5035                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5036                                 if (match(ipr_cmd, device)) {
5037                                         ipr_cmd->eh_comp = &comp;
5038                                         wait++;
5039                                 }
5040                         }
5041                         spin_unlock_irqrestore(hrrq->lock, flags);
5042                 }
5043
5044                 if (wait) {
5045                         timeout = wait_for_completion_timeout(&comp, timeout);
5046
5047                         if (!timeout) {
5048                                 wait = 0;
5049
5050                                 for_each_hrrq(hrrq, ioa_cfg) {
5051                                         spin_lock_irqsave(hrrq->lock, flags);
5052                                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5053                                                 if (match(ipr_cmd, device)) {
5054                                                         ipr_cmd->eh_comp = NULL;
5055                                                         wait++;
5056                                                 }
5057                                         }
5058                                         spin_unlock_irqrestore(hrrq->lock, flags);
5059                                 }
5060
5061                                 if (wait)
5062                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5063                                 LEAVE;
5064                                 return wait ? FAILED : SUCCESS;
5065                         }
5066                 }
5067         } while (wait);
5068
5069         LEAVE;
5070         return SUCCESS;
5071 }
5072
5073 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5074 {
5075         struct ipr_ioa_cfg *ioa_cfg;
5076         unsigned long lock_flags = 0;
5077         int rc = SUCCESS;
5078
5079         ENTER;
5080         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5081         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5082
5083         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5084                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5085                 dev_err(&ioa_cfg->pdev->dev,
5086                         "Adapter being reset as a result of error recovery.\n");
5087
5088                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5089                         ioa_cfg->sdt_state = GET_DUMP;
5090         }
5091
5092         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5093         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5094         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5095
5096         /* If we got hit with a host reset while we were already resetting
5097          the adapter for some reason, and the reset failed. */
5098         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5099                 ipr_trace;
5100                 rc = FAILED;
5101         }
5102
5103         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5104         LEAVE;
5105         return rc;
5106 }
5107
5108 /**
5109  * ipr_device_reset - Reset the device
5110  * @ioa_cfg:    ioa config struct
5111  * @res:                resource entry struct
5112  *
5113  * This function issues a device reset to the affected device.
5114  * If the device is a SCSI device, a LUN reset will be sent
5115  * to the device first. If that does not work, a target reset
5116  * will be sent. If the device is a SATA device, a PHY reset will
5117  * be sent.
5118  *
5119  * Return value:
5120  *      0 on success / non-zero on failure
5121  **/
5122 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5123                             struct ipr_resource_entry *res)
5124 {
5125         struct ipr_cmnd *ipr_cmd;
5126         struct ipr_ioarcb *ioarcb;
5127         struct ipr_cmd_pkt *cmd_pkt;
5128         struct ipr_ioarcb_ata_regs *regs;
5129         u32 ioasc;
5130
5131         ENTER;
5132         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5133         ioarcb = &ipr_cmd->ioarcb;
5134         cmd_pkt = &ioarcb->cmd_pkt;
5135
5136         if (ipr_cmd->ioa_cfg->sis64) {
5137                 regs = &ipr_cmd->i.ata_ioadl.regs;
5138                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5139         } else
5140                 regs = &ioarcb->u.add_data.u.regs;
5141
5142         ioarcb->res_handle = res->res_handle;
5143         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5144         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5145         if (ipr_is_gata(res)) {
5146                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5147                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5148                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5149         }
5150
5151         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5152         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5153         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5154         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5155                 if (ipr_cmd->ioa_cfg->sis64)
5156                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5157                                sizeof(struct ipr_ioasa_gata));
5158                 else
5159                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5160                                sizeof(struct ipr_ioasa_gata));
5161         }
5162
5163         LEAVE;
5164         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5165 }
5166
5167 /**
5168  * ipr_sata_reset - Reset the SATA port
5169  * @link:       SATA link to reset
5170  * @classes:    class of the attached device
5171  *
5172  * This function issues a SATA phy reset to the affected ATA link.
5173  *
5174  * Return value:
5175  *      0 on success / non-zero on failure
5176  **/
5177 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5178                                 unsigned long deadline)
5179 {
5180         struct ipr_sata_port *sata_port = link->ap->private_data;
5181         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5182         struct ipr_resource_entry *res;
5183         unsigned long lock_flags = 0;
5184         int rc = -ENXIO;
5185
5186         ENTER;
5187         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5188         while (ioa_cfg->in_reset_reload) {
5189                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5190                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5191                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5192         }
5193
5194         res = sata_port->res;
5195         if (res) {
5196                 rc = ipr_device_reset(ioa_cfg, res);
5197                 *classes = res->ata_class;
5198         }
5199
5200         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5201         LEAVE;
5202         return rc;
5203 }
5204
5205 /**
5206  * ipr_eh_dev_reset - Reset the device
5207  * @scsi_cmd:   scsi command struct
5208  *
5209  * This function issues a device reset to the affected device.
5210  * A LUN reset will be sent to the device first. If that does
5211  * not work, a target reset will be sent.
5212  *
5213  * Return value:
5214  *      SUCCESS / FAILED
5215  **/
5216 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5217 {
5218         struct ipr_cmnd *ipr_cmd;
5219         struct ipr_ioa_cfg *ioa_cfg;
5220         struct ipr_resource_entry *res;
5221         struct ata_port *ap;
5222         int rc = 0;
5223         struct ipr_hrr_queue *hrrq;
5224
5225         ENTER;
5226         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5227         res = scsi_cmd->device->hostdata;
5228
5229         if (!res)
5230                 return FAILED;
5231
5232         /*
5233          * If we are currently going through reset/reload, return failed. This will force the
5234          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5235          * reset to complete
5236          */
5237         if (ioa_cfg->in_reset_reload)
5238                 return FAILED;
5239         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5240                 return FAILED;
5241
5242         for_each_hrrq(hrrq, ioa_cfg) {
5243                 spin_lock(&hrrq->_lock);
5244                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5245                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5246                                 if (ipr_cmd->scsi_cmd)
5247                                         ipr_cmd->done = ipr_scsi_eh_done;
5248                                 if (ipr_cmd->qc)
5249                                         ipr_cmd->done = ipr_sata_eh_done;
5250                                 if (ipr_cmd->qc &&
5251                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5252                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5253                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5254                                 }
5255                         }
5256                 }
5257                 spin_unlock(&hrrq->_lock);
5258         }
5259         res->resetting_device = 1;
5260         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5261
5262         if (ipr_is_gata(res) && res->sata_port) {
5263                 ap = res->sata_port->ap;
5264                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5265                 ata_std_error_handler(ap);
5266                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5267
5268                 for_each_hrrq(hrrq, ioa_cfg) {
5269                         spin_lock(&hrrq->_lock);
5270                         list_for_each_entry(ipr_cmd,
5271                                             &hrrq->hrrq_pending_q, queue) {
5272                                 if (ipr_cmd->ioarcb.res_handle ==
5273                                     res->res_handle) {
5274                                         rc = -EIO;
5275                                         break;
5276                                 }
5277                         }
5278                         spin_unlock(&hrrq->_lock);
5279                 }
5280         } else
5281                 rc = ipr_device_reset(ioa_cfg, res);
5282         res->resetting_device = 0;
5283         res->reset_occurred = 1;
5284
5285         LEAVE;
5286         return rc ? FAILED : SUCCESS;
5287 }
5288
5289 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5290 {
5291         int rc;
5292         struct ipr_ioa_cfg *ioa_cfg;
5293
5294         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5295
5296         spin_lock_irq(cmd->device->host->host_lock);
5297         rc = __ipr_eh_dev_reset(cmd);
5298         spin_unlock_irq(cmd->device->host->host_lock);
5299
5300         if (rc == SUCCESS)
5301                 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5302
5303         return rc;
5304 }
5305
5306 /**
5307  * ipr_bus_reset_done - Op done function for bus reset.
5308  * @ipr_cmd:    ipr command struct
5309  *
5310  * This function is the op done function for a bus reset
5311  *
5312  * Return value:
5313  *      none
5314  **/
5315 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5316 {
5317         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5318         struct ipr_resource_entry *res;
5319
5320         ENTER;
5321         if (!ioa_cfg->sis64)
5322                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5323                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5324                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5325                                 break;
5326                         }
5327                 }
5328
5329         /*
5330          * If abort has not completed, indicate the reset has, else call the
5331          * abort's done function to wake the sleeping eh thread
5332          */
5333         if (ipr_cmd->sibling->sibling)
5334                 ipr_cmd->sibling->sibling = NULL;
5335         else
5336                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5337
5338         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5339         LEAVE;
5340 }
5341
5342 /**
5343  * ipr_abort_timeout - An abort task has timed out
5344  * @ipr_cmd:    ipr command struct
5345  *
5346  * This function handles when an abort task times out. If this
5347  * happens we issue a bus reset since we have resources tied
5348  * up that must be freed before returning to the midlayer.
5349  *
5350  * Return value:
5351  *      none
5352  **/
5353 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5354 {
5355         struct ipr_cmnd *reset_cmd;
5356         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5357         struct ipr_cmd_pkt *cmd_pkt;
5358         unsigned long lock_flags = 0;
5359
5360         ENTER;
5361         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5362         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5363                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5364                 return;
5365         }
5366
5367         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5368         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5369         ipr_cmd->sibling = reset_cmd;
5370         reset_cmd->sibling = ipr_cmd;
5371         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5372         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5373         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5374         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5375         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5376
5377         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5378         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5379         LEAVE;
5380 }
5381
5382 /**
5383  * ipr_cancel_op - Cancel specified op
5384  * @scsi_cmd:   scsi command struct
5385  *
5386  * This function cancels specified op.
5387  *
5388  * Return value:
5389  *      SUCCESS / FAILED
5390  **/
5391 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5392 {
5393         struct ipr_cmnd *ipr_cmd;
5394         struct ipr_ioa_cfg *ioa_cfg;
5395         struct ipr_resource_entry *res;
5396         struct ipr_cmd_pkt *cmd_pkt;
5397         u32 ioasc, int_reg;
5398         int op_found = 0;
5399         struct ipr_hrr_queue *hrrq;
5400
5401         ENTER;
5402         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5403         res = scsi_cmd->device->hostdata;
5404
5405         /* If we are currently going through reset/reload, return failed.
5406          * This will force the mid-layer to call ipr_eh_host_reset,
5407          * which will then go to sleep and wait for the reset to complete
5408          */
5409         if (ioa_cfg->in_reset_reload ||
5410             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5411                 return FAILED;
5412         if (!res)
5413                 return FAILED;
5414
5415         /*
5416          * If we are aborting a timed out op, chances are that the timeout was caused
5417          * by a still not detected EEH error. In such cases, reading a register will
5418          * trigger the EEH recovery infrastructure.
5419          */
5420         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5421
5422         if (!ipr_is_gscsi(res))
5423                 return FAILED;
5424
5425         for_each_hrrq(hrrq, ioa_cfg) {
5426                 spin_lock(&hrrq->_lock);
5427                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5428                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5429                                 ipr_cmd->done = ipr_scsi_eh_done;
5430                                 op_found = 1;
5431                                 break;
5432                         }
5433                 }
5434                 spin_unlock(&hrrq->_lock);
5435         }
5436
5437         if (!op_found)
5438                 return SUCCESS;
5439
5440         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5441         ipr_cmd->ioarcb.res_handle = res->res_handle;
5442         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5443         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5444         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5445         ipr_cmd->u.sdev = scsi_cmd->device;
5446
5447         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5448                     scsi_cmd->cmnd[0]);
5449         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5450         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5451
5452         /*
5453          * If the abort task timed out and we sent a bus reset, we will get
5454          * one the following responses to the abort
5455          */
5456         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5457                 ioasc = 0;
5458                 ipr_trace;
5459         }
5460
5461         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5462         if (!ipr_is_naca_model(res))
5463                 res->needs_sync_complete = 1;
5464
5465         LEAVE;
5466         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5467 }
5468
5469 /**
5470  * ipr_eh_abort - Abort a single op
5471  * @scsi_cmd:   scsi command struct
5472  *
5473  * Return value:
5474  *      0 if scan in progress / 1 if scan is complete
5475  **/
5476 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5477 {
5478         unsigned long lock_flags;
5479         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5480         int rc = 0;
5481
5482         spin_lock_irqsave(shost->host_lock, lock_flags);
5483         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5484                 rc = 1;
5485         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5486                 rc = 1;
5487         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5488         return rc;
5489 }
5490
5491 /**
5492  * ipr_eh_host_reset - Reset the host adapter
5493  * @scsi_cmd:   scsi command struct
5494  *
5495  * Return value:
5496  *      SUCCESS / FAILED
5497  **/
5498 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5499 {
5500         unsigned long flags;
5501         int rc;
5502         struct ipr_ioa_cfg *ioa_cfg;
5503
5504         ENTER;
5505
5506         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5507
5508         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5509         rc = ipr_cancel_op(scsi_cmd);
5510         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5511
5512         if (rc == SUCCESS)
5513                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5514         LEAVE;
5515         return rc;
5516 }
5517
5518 /**
5519  * ipr_handle_other_interrupt - Handle "other" interrupts
5520  * @ioa_cfg:    ioa config struct
5521  * @int_reg:    interrupt register
5522  *
5523  * Return value:
5524  *      IRQ_NONE / IRQ_HANDLED
5525  **/
5526 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5527                                               u32 int_reg)
5528 {
5529         irqreturn_t rc = IRQ_HANDLED;
5530         u32 int_mask_reg;
5531
5532         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5533         int_reg &= ~int_mask_reg;
5534
5535         /* If an interrupt on the adapter did not occur, ignore it.
5536          * Or in the case of SIS 64, check for a stage change interrupt.
5537          */
5538         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5539                 if (ioa_cfg->sis64) {
5540                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5541                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5542                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5543
5544                                 /* clear stage change */
5545                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5546                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5547                                 list_del(&ioa_cfg->reset_cmd->queue);
5548                                 del_timer(&ioa_cfg->reset_cmd->timer);
5549                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5550                                 return IRQ_HANDLED;
5551                         }
5552                 }
5553
5554                 return IRQ_NONE;
5555         }
5556
5557         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5558                 /* Mask the interrupt */
5559                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5560                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5561
5562                 list_del(&ioa_cfg->reset_cmd->queue);
5563                 del_timer(&ioa_cfg->reset_cmd->timer);
5564                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5565         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5566                 if (ioa_cfg->clear_isr) {
5567                         if (ipr_debug && printk_ratelimit())
5568                                 dev_err(&ioa_cfg->pdev->dev,
5569                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5570                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5571                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5572                         return IRQ_NONE;
5573                 }
5574         } else {
5575                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5576                         ioa_cfg->ioa_unit_checked = 1;
5577                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5578                         dev_err(&ioa_cfg->pdev->dev,
5579                                 "No Host RRQ. 0x%08X\n", int_reg);
5580                 else
5581                         dev_err(&ioa_cfg->pdev->dev,
5582                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5583
5584                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5585                         ioa_cfg->sdt_state = GET_DUMP;
5586
5587                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5588                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5589         }
5590
5591         return rc;
5592 }
5593
5594 /**
5595  * ipr_isr_eh - Interrupt service routine error handler
5596  * @ioa_cfg:    ioa config struct
5597  * @msg:        message to log
5598  *
5599  * Return value:
5600  *      none
5601  **/
5602 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5603 {
5604         ioa_cfg->errors_logged++;
5605         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5606
5607         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5608                 ioa_cfg->sdt_state = GET_DUMP;
5609
5610         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5611 }
5612
5613 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5614                                                 struct list_head *doneq)
5615 {
5616         u32 ioasc;
5617         u16 cmd_index;
5618         struct ipr_cmnd *ipr_cmd;
5619         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5620         int num_hrrq = 0;
5621
5622         /* If interrupts are disabled, ignore the interrupt */
5623         if (!hrr_queue->allow_interrupts)
5624                 return 0;
5625
5626         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5627                hrr_queue->toggle_bit) {
5628
5629                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5630                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5631                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5632
5633                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5634                              cmd_index < hrr_queue->min_cmd_id)) {
5635                         ipr_isr_eh(ioa_cfg,
5636                                 "Invalid response handle from IOA: ",
5637                                 cmd_index);
5638                         break;
5639                 }
5640
5641                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5642                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5643
5644                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5645
5646                 list_move_tail(&ipr_cmd->queue, doneq);
5647
5648                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5649                         hrr_queue->hrrq_curr++;
5650                 } else {
5651                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5652                         hrr_queue->toggle_bit ^= 1u;
5653                 }
5654                 num_hrrq++;
5655                 if (budget > 0 && num_hrrq >= budget)
5656                         break;
5657         }
5658
5659         return num_hrrq;
5660 }
5661
5662 static int ipr_iopoll(struct irq_poll *iop, int budget)
5663 {
5664         struct ipr_ioa_cfg *ioa_cfg;
5665         struct ipr_hrr_queue *hrrq;
5666         struct ipr_cmnd *ipr_cmd, *temp;
5667         unsigned long hrrq_flags;
5668         int completed_ops;
5669         LIST_HEAD(doneq);
5670
5671         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5672         ioa_cfg = hrrq->ioa_cfg;
5673
5674         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5675         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5676
5677         if (completed_ops < budget)
5678                 irq_poll_complete(iop);
5679         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5680
5681         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5682                 list_del(&ipr_cmd->queue);
5683                 del_timer(&ipr_cmd->timer);
5684                 ipr_cmd->fast_done(ipr_cmd);
5685         }
5686
5687         return completed_ops;
5688 }
5689
5690 /**
5691  * ipr_isr - Interrupt service routine
5692  * @irq:        irq number
5693  * @devp:       pointer to ioa config struct
5694  *
5695  * Return value:
5696  *      IRQ_NONE / IRQ_HANDLED
5697  **/
5698 static irqreturn_t ipr_isr(int irq, void *devp)
5699 {
5700         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5701         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5702         unsigned long hrrq_flags = 0;
5703         u32 int_reg = 0;
5704         int num_hrrq = 0;
5705         int irq_none = 0;
5706         struct ipr_cmnd *ipr_cmd, *temp;
5707         irqreturn_t rc = IRQ_NONE;
5708         LIST_HEAD(doneq);
5709
5710         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5711         /* If interrupts are disabled, ignore the interrupt */
5712         if (!hrrq->allow_interrupts) {
5713                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5714                 return IRQ_NONE;
5715         }
5716
5717         while (1) {
5718                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5719                         rc =  IRQ_HANDLED;
5720
5721                         if (!ioa_cfg->clear_isr)
5722                                 break;
5723
5724                         /* Clear the PCI interrupt */
5725                         num_hrrq = 0;
5726                         do {
5727                                 writel(IPR_PCII_HRRQ_UPDATED,
5728                                      ioa_cfg->regs.clr_interrupt_reg32);
5729                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5730                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5731                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5732
5733                 } else if (rc == IRQ_NONE && irq_none == 0) {
5734                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5735                         irq_none++;
5736                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5737                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5738                         ipr_isr_eh(ioa_cfg,
5739                                 "Error clearing HRRQ: ", num_hrrq);
5740                         rc = IRQ_HANDLED;
5741                         break;
5742                 } else
5743                         break;
5744         }
5745
5746         if (unlikely(rc == IRQ_NONE))
5747                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5748
5749         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5750         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5751                 list_del(&ipr_cmd->queue);
5752                 del_timer(&ipr_cmd->timer);
5753                 ipr_cmd->fast_done(ipr_cmd);
5754         }
5755         return rc;
5756 }
5757
5758 /**
5759  * ipr_isr_mhrrq - Interrupt service routine
5760  * @irq:        irq number
5761  * @devp:       pointer to ioa config struct
5762  *
5763  * Return value:
5764  *      IRQ_NONE / IRQ_HANDLED
5765  **/
5766 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5767 {
5768         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5769         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5770         unsigned long hrrq_flags = 0;
5771         struct ipr_cmnd *ipr_cmd, *temp;
5772         irqreturn_t rc = IRQ_NONE;
5773         LIST_HEAD(doneq);
5774
5775         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5776
5777         /* If interrupts are disabled, ignore the interrupt */
5778         if (!hrrq->allow_interrupts) {
5779                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5780                 return IRQ_NONE;
5781         }
5782
5783         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5784                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5785                        hrrq->toggle_bit) {
5786                         irq_poll_sched(&hrrq->iopoll);
5787                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5788                         return IRQ_HANDLED;
5789                 }
5790         } else {
5791                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5792                         hrrq->toggle_bit)
5793
5794                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5795                                 rc =  IRQ_HANDLED;
5796         }
5797
5798         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5799
5800         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5801                 list_del(&ipr_cmd->queue);
5802                 del_timer(&ipr_cmd->timer);
5803                 ipr_cmd->fast_done(ipr_cmd);
5804         }
5805         return rc;
5806 }
5807
5808 /**
5809  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5810  * @ioa_cfg:    ioa config struct
5811  * @ipr_cmd:    ipr command struct
5812  *
5813  * Return value:
5814  *      0 on success / -1 on failure
5815  **/
5816 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5817                              struct ipr_cmnd *ipr_cmd)
5818 {
5819         int i, nseg;
5820         struct scatterlist *sg;
5821         u32 length;
5822         u32 ioadl_flags = 0;
5823         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5824         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5825         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5826
5827         length = scsi_bufflen(scsi_cmd);
5828         if (!length)
5829                 return 0;
5830
5831         nseg = scsi_dma_map(scsi_cmd);
5832         if (nseg < 0) {
5833                 if (printk_ratelimit())
5834                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5835                 return -1;
5836         }
5837
5838         ipr_cmd->dma_use_sg = nseg;
5839
5840         ioarcb->data_transfer_length = cpu_to_be32(length);
5841         ioarcb->ioadl_len =
5842                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5843
5844         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5845                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5846                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5847         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5848                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5849
5850         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5851                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5852                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5853                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5854         }
5855
5856         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5857         return 0;
5858 }
5859
5860 /**
5861  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5862  * @ioa_cfg:    ioa config struct
5863  * @ipr_cmd:    ipr command struct
5864  *
5865  * Return value:
5866  *      0 on success / -1 on failure
5867  **/
5868 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5869                            struct ipr_cmnd *ipr_cmd)
5870 {
5871         int i, nseg;
5872         struct scatterlist *sg;
5873         u32 length;
5874         u32 ioadl_flags = 0;
5875         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5876         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5877         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5878
5879         length = scsi_bufflen(scsi_cmd);
5880         if (!length)
5881                 return 0;
5882
5883         nseg = scsi_dma_map(scsi_cmd);
5884         if (nseg < 0) {
5885                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5886                 return -1;
5887         }
5888
5889         ipr_cmd->dma_use_sg = nseg;
5890
5891         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5892                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5893                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5894                 ioarcb->data_transfer_length = cpu_to_be32(length);
5895                 ioarcb->ioadl_len =
5896                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5897         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5898                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5899                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5900                 ioarcb->read_ioadl_len =
5901                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5902         }
5903
5904         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5905                 ioadl = ioarcb->u.add_data.u.ioadl;
5906                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5907                                     offsetof(struct ipr_ioarcb, u.add_data));
5908                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5909         }
5910
5911         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5912                 ioadl[i].flags_and_data_len =
5913                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5914                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5915         }
5916
5917         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5918         return 0;
5919 }
5920
5921 /**
5922  * ipr_erp_done - Process completion of ERP for a device
5923  * @ipr_cmd:            ipr command struct
5924  *
5925  * This function copies the sense buffer into the scsi_cmd
5926  * struct and pushes the scsi_done function.
5927  *
5928  * Return value:
5929  *      nothing
5930  **/
5931 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5932 {
5933         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5934         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5935         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5936
5937         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5938                 scsi_cmd->result |= (DID_ERROR << 16);
5939                 scmd_printk(KERN_ERR, scsi_cmd,
5940                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5941         } else {
5942                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5943                        SCSI_SENSE_BUFFERSIZE);
5944         }
5945
5946         if (res) {
5947                 if (!ipr_is_naca_model(res))
5948                         res->needs_sync_complete = 1;
5949                 res->in_erp = 0;
5950         }
5951         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5952         scsi_cmd->scsi_done(scsi_cmd);
5953         if (ipr_cmd->eh_comp)
5954                 complete(ipr_cmd->eh_comp);
5955         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5956 }
5957
5958 /**
5959  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5960  * @ipr_cmd:    ipr command struct
5961  *
5962  * Return value:
5963  *      none
5964  **/
5965 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5966 {
5967         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5968         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5969         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5970
5971         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5972         ioarcb->data_transfer_length = 0;
5973         ioarcb->read_data_transfer_length = 0;
5974         ioarcb->ioadl_len = 0;
5975         ioarcb->read_ioadl_len = 0;
5976         ioasa->hdr.ioasc = 0;
5977         ioasa->hdr.residual_data_len = 0;
5978
5979         if (ipr_cmd->ioa_cfg->sis64)
5980                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5981                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5982         else {
5983                 ioarcb->write_ioadl_addr =
5984                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5985                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5986         }
5987 }
5988
5989 /**
5990  * ipr_erp_request_sense - Send request sense to a device
5991  * @ipr_cmd:    ipr command struct
5992  *
5993  * This function sends a request sense to a device as a result
5994  * of a check condition.
5995  *
5996  * Return value:
5997  *      nothing
5998  **/
5999 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6000 {
6001         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6002         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6003
6004         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6005                 ipr_erp_done(ipr_cmd);
6006                 return;
6007         }
6008
6009         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6010
6011         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6012         cmd_pkt->cdb[0] = REQUEST_SENSE;
6013         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6014         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6015         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6016         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6017
6018         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6019                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6020
6021         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6022                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6023 }
6024
6025 /**
6026  * ipr_erp_cancel_all - Send cancel all to a device
6027  * @ipr_cmd:    ipr command struct
6028  *
6029  * This function sends a cancel all to a device to clear the
6030  * queue. If we are running TCQ on the device, QERR is set to 1,
6031  * which means all outstanding ops have been dropped on the floor.
6032  * Cancel all will return them to us.
6033  *
6034  * Return value:
6035  *      nothing
6036  **/
6037 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6038 {
6039         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6040         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6041         struct ipr_cmd_pkt *cmd_pkt;
6042
6043         res->in_erp = 1;
6044
6045         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6046
6047         if (!scsi_cmd->device->simple_tags) {
6048                 ipr_erp_request_sense(ipr_cmd);
6049                 return;
6050         }
6051
6052         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6053         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6054         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6055
6056         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6057                    IPR_CANCEL_ALL_TIMEOUT);
6058 }
6059
6060 /**
6061  * ipr_dump_ioasa - Dump contents of IOASA
6062  * @ioa_cfg:    ioa config struct
6063  * @ipr_cmd:    ipr command struct
6064  * @res:                resource entry struct
6065  *
6066  * This function is invoked by the interrupt handler when ops
6067  * fail. It will log the IOASA if appropriate. Only called
6068  * for GPDD ops.
6069  *
6070  * Return value:
6071  *      none
6072  **/
6073 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6074                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6075 {
6076         int i;
6077         u16 data_len;
6078         u32 ioasc, fd_ioasc;
6079         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6080         __be32 *ioasa_data = (__be32 *)ioasa;
6081         int error_index;
6082
6083         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6084         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6085
6086         if (0 == ioasc)
6087                 return;
6088
6089         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6090                 return;
6091
6092         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6093                 error_index = ipr_get_error(fd_ioasc);
6094         else
6095                 error_index = ipr_get_error(ioasc);
6096
6097         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6098                 /* Don't log an error if the IOA already logged one */
6099                 if (ioasa->hdr.ilid != 0)
6100                         return;
6101
6102                 if (!ipr_is_gscsi(res))
6103                         return;
6104
6105                 if (ipr_error_table[error_index].log_ioasa == 0)
6106                         return;
6107         }
6108
6109         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6110
6111         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6112         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6113                 data_len = sizeof(struct ipr_ioasa64);
6114         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6115                 data_len = sizeof(struct ipr_ioasa);
6116
6117         ipr_err("IOASA Dump:\n");
6118
6119         for (i = 0; i < data_len / 4; i += 4) {
6120                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6121                         be32_to_cpu(ioasa_data[i]),
6122                         be32_to_cpu(ioasa_data[i+1]),
6123                         be32_to_cpu(ioasa_data[i+2]),
6124                         be32_to_cpu(ioasa_data[i+3]));
6125         }
6126 }
6127
6128 /**
6129  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6130  * @ioasa:              IOASA
6131  * @sense_buf:  sense data buffer
6132  *
6133  * Return value:
6134  *      none
6135  **/
6136 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6137 {
6138         u32 failing_lba;
6139         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6140         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6141         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6142         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6143
6144         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6145
6146         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6147                 return;
6148
6149         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6150
6151         if (ipr_is_vset_device(res) &&
6152             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6153             ioasa->u.vset.failing_lba_hi != 0) {
6154                 sense_buf[0] = 0x72;
6155                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6156                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6157                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6158
6159                 sense_buf[7] = 12;
6160                 sense_buf[8] = 0;
6161                 sense_buf[9] = 0x0A;
6162                 sense_buf[10] = 0x80;
6163
6164                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6165
6166                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6167                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6168                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6169                 sense_buf[15] = failing_lba & 0x000000ff;
6170
6171                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6172
6173                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6174                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6175                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6176                 sense_buf[19] = failing_lba & 0x000000ff;
6177         } else {
6178                 sense_buf[0] = 0x70;
6179                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6180                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6181                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6182
6183                 /* Illegal request */
6184                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6185                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6186                         sense_buf[7] = 10;      /* additional length */
6187
6188                         /* IOARCB was in error */
6189                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6190                                 sense_buf[15] = 0xC0;
6191                         else    /* Parameter data was invalid */
6192                                 sense_buf[15] = 0x80;
6193
6194                         sense_buf[16] =
6195                             ((IPR_FIELD_POINTER_MASK &
6196                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6197                         sense_buf[17] =
6198                             (IPR_FIELD_POINTER_MASK &
6199                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6200                 } else {
6201                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6202                                 if (ipr_is_vset_device(res))
6203                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6204                                 else
6205                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6206
6207                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6208                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6209                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6210                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6211                                 sense_buf[6] = failing_lba & 0x000000ff;
6212                         }
6213
6214                         sense_buf[7] = 6;       /* additional length */
6215                 }
6216         }
6217 }
6218
6219 /**
6220  * ipr_get_autosense - Copy autosense data to sense buffer
6221  * @ipr_cmd:    ipr command struct
6222  *
6223  * This function copies the autosense buffer to the buffer
6224  * in the scsi_cmd, if there is autosense available.
6225  *
6226  * Return value:
6227  *      1 if autosense was available / 0 if not
6228  **/
6229 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6230 {
6231         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6232         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6233
6234         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6235                 return 0;
6236
6237         if (ipr_cmd->ioa_cfg->sis64)
6238                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6239                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6240                            SCSI_SENSE_BUFFERSIZE));
6241         else
6242                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6243                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6244                            SCSI_SENSE_BUFFERSIZE));
6245         return 1;
6246 }
6247
6248 /**
6249  * ipr_erp_start - Process an error response for a SCSI op
6250  * @ioa_cfg:    ioa config struct
6251  * @ipr_cmd:    ipr command struct
6252  *
6253  * This function determines whether or not to initiate ERP
6254  * on the affected device.
6255  *
6256  * Return value:
6257  *      nothing
6258  **/
6259 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6260                               struct ipr_cmnd *ipr_cmd)
6261 {
6262         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6263         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6264         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6265         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6266
6267         if (!res) {
6268                 ipr_scsi_eh_done(ipr_cmd);
6269                 return;
6270         }
6271
6272         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6273                 ipr_gen_sense(ipr_cmd);
6274
6275         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6276
6277         switch (masked_ioasc) {
6278         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6279                 if (ipr_is_naca_model(res))
6280                         scsi_cmd->result |= (DID_ABORT << 16);
6281                 else
6282                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6283                 break;
6284         case IPR_IOASC_IR_RESOURCE_HANDLE:
6285         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6286                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6287                 break;
6288         case IPR_IOASC_HW_SEL_TIMEOUT:
6289                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6290                 if (!ipr_is_naca_model(res))
6291                         res->needs_sync_complete = 1;
6292                 break;
6293         case IPR_IOASC_SYNC_REQUIRED:
6294                 if (!res->in_erp)
6295                         res->needs_sync_complete = 1;
6296                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6297                 break;
6298         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6299         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6300                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6301                 break;
6302         case IPR_IOASC_BUS_WAS_RESET:
6303         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6304                 /*
6305                  * Report the bus reset and ask for a retry. The device
6306                  * will give CC/UA the next command.
6307                  */
6308                 if (!res->resetting_device)
6309                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6310                 scsi_cmd->result |= (DID_ERROR << 16);
6311                 if (!ipr_is_naca_model(res))
6312                         res->needs_sync_complete = 1;
6313                 break;
6314         case IPR_IOASC_HW_DEV_BUS_STATUS:
6315                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6316                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6317                         if (!ipr_get_autosense(ipr_cmd)) {
6318                                 if (!ipr_is_naca_model(res)) {
6319                                         ipr_erp_cancel_all(ipr_cmd);
6320                                         return;
6321                                 }
6322                         }
6323                 }
6324                 if (!ipr_is_naca_model(res))
6325                         res->needs_sync_complete = 1;
6326                 break;
6327         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6328                 break;
6329         case IPR_IOASC_IR_NON_OPTIMIZED:
6330                 if (res->raw_mode) {
6331                         res->raw_mode = 0;
6332                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6333                 } else
6334                         scsi_cmd->result |= (DID_ERROR << 16);
6335                 break;
6336         default:
6337                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6338                         scsi_cmd->result |= (DID_ERROR << 16);
6339                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6340                         res->needs_sync_complete = 1;
6341                 break;
6342         }
6343
6344         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6345         scsi_cmd->scsi_done(scsi_cmd);
6346         if (ipr_cmd->eh_comp)
6347                 complete(ipr_cmd->eh_comp);
6348         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6349 }
6350
6351 /**
6352  * ipr_scsi_done - mid-layer done function
6353  * @ipr_cmd:    ipr command struct
6354  *
6355  * This function is invoked by the interrupt handler for
6356  * ops generated by the SCSI mid-layer
6357  *
6358  * Return value:
6359  *      none
6360  **/
6361 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6362 {
6363         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6364         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6365         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6366         unsigned long lock_flags;
6367
6368         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6369
6370         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6371                 scsi_dma_unmap(scsi_cmd);
6372
6373                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6374                 scsi_cmd->scsi_done(scsi_cmd);
6375                 if (ipr_cmd->eh_comp)
6376                         complete(ipr_cmd->eh_comp);
6377                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6378                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6379         } else {
6380                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6381                 spin_lock(&ipr_cmd->hrrq->_lock);
6382                 ipr_erp_start(ioa_cfg, ipr_cmd);
6383                 spin_unlock(&ipr_cmd->hrrq->_lock);
6384                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6385         }
6386 }
6387
6388 /**
6389  * ipr_queuecommand - Queue a mid-layer request
6390  * @shost:              scsi host struct
6391  * @scsi_cmd:   scsi command struct
6392  *
6393  * This function queues a request generated by the mid-layer.
6394  *
6395  * Return value:
6396  *      0 on success
6397  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6398  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6399  **/
6400 static int ipr_queuecommand(struct Scsi_Host *shost,
6401                             struct scsi_cmnd *scsi_cmd)
6402 {
6403         struct ipr_ioa_cfg *ioa_cfg;
6404         struct ipr_resource_entry *res;
6405         struct ipr_ioarcb *ioarcb;
6406         struct ipr_cmnd *ipr_cmd;
6407         unsigned long hrrq_flags, lock_flags;
6408         int rc;
6409         struct ipr_hrr_queue *hrrq;
6410         int hrrq_id;
6411
6412         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6413
6414         scsi_cmd->result = (DID_OK << 16);
6415         res = scsi_cmd->device->hostdata;
6416
6417         if (ipr_is_gata(res) && res->sata_port) {
6418                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6419                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6420                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6421                 return rc;
6422         }
6423
6424         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6425         hrrq = &ioa_cfg->hrrq[hrrq_id];
6426
6427         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6428         /*
6429          * We are currently blocking all devices due to a host reset
6430          * We have told the host to stop giving us new requests, but
6431          * ERP ops don't count. FIXME
6432          */
6433         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6434                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6435                 return SCSI_MLQUEUE_HOST_BUSY;
6436         }
6437
6438         /*
6439          * FIXME - Create scsi_set_host_offline interface
6440          *  and the ioa_is_dead check can be removed
6441          */
6442         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6443                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6444                 goto err_nodev;
6445         }
6446
6447         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6448         if (ipr_cmd == NULL) {
6449                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6450                 return SCSI_MLQUEUE_HOST_BUSY;
6451         }
6452         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6453
6454         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6455         ioarcb = &ipr_cmd->ioarcb;
6456
6457         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6458         ipr_cmd->scsi_cmd = scsi_cmd;
6459         ipr_cmd->done = ipr_scsi_eh_done;
6460
6461         if (ipr_is_gscsi(res)) {
6462                 if (scsi_cmd->underflow == 0)
6463                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6464
6465                 if (res->reset_occurred) {
6466                         res->reset_occurred = 0;
6467                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6468                 }
6469         }
6470
6471         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6472                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6473
6474                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6475                 if (scsi_cmd->flags & SCMD_TAGGED)
6476                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6477                 else
6478                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6479         }
6480
6481         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6482             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6483                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6484         }
6485         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6486                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6487
6488                 if (scsi_cmd->underflow == 0)
6489                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6490         }
6491
6492         if (ioa_cfg->sis64)
6493                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6494         else
6495                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6496
6497         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6498         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6499                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6500                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6501                 if (!rc)
6502                         scsi_dma_unmap(scsi_cmd);
6503                 return SCSI_MLQUEUE_HOST_BUSY;
6504         }
6505
6506         if (unlikely(hrrq->ioa_is_dead)) {
6507                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6508                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6509                 scsi_dma_unmap(scsi_cmd);
6510                 goto err_nodev;
6511         }
6512
6513         ioarcb->res_handle = res->res_handle;
6514         if (res->needs_sync_complete) {
6515                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6516                 res->needs_sync_complete = 0;
6517         }
6518         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6519         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6520         ipr_send_command(ipr_cmd);
6521         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6522         return 0;
6523
6524 err_nodev:
6525         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6526         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6527         scsi_cmd->result = (DID_NO_CONNECT << 16);
6528         scsi_cmd->scsi_done(scsi_cmd);
6529         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6530         return 0;
6531 }
6532
6533 /**
6534  * ipr_ioctl - IOCTL handler
6535  * @sdev:       scsi device struct
6536  * @cmd:        IOCTL cmd
6537  * @arg:        IOCTL arg
6538  *
6539  * Return value:
6540  *      0 on success / other on failure
6541  **/
6542 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6543 {
6544         struct ipr_resource_entry *res;
6545
6546         res = (struct ipr_resource_entry *)sdev->hostdata;
6547         if (res && ipr_is_gata(res)) {
6548                 if (cmd == HDIO_GET_IDENTITY)
6549                         return -ENOTTY;
6550                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6551         }
6552
6553         return -EINVAL;
6554 }
6555
6556 /**
6557  * ipr_info - Get information about the card/driver
6558  * @scsi_host:  scsi host struct
6559  *
6560  * Return value:
6561  *      pointer to buffer with description string
6562  **/
6563 static const char *ipr_ioa_info(struct Scsi_Host *host)
6564 {
6565         static char buffer[512];
6566         struct ipr_ioa_cfg *ioa_cfg;
6567         unsigned long lock_flags = 0;
6568
6569         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6570
6571         spin_lock_irqsave(host->host_lock, lock_flags);
6572         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6573         spin_unlock_irqrestore(host->host_lock, lock_flags);
6574
6575         return buffer;
6576 }
6577
6578 static struct scsi_host_template driver_template = {
6579         .module = THIS_MODULE,
6580         .name = "IPR",
6581         .info = ipr_ioa_info,
6582         .ioctl = ipr_ioctl,
6583         .queuecommand = ipr_queuecommand,
6584         .eh_abort_handler = ipr_eh_abort,
6585         .eh_device_reset_handler = ipr_eh_dev_reset,
6586         .eh_host_reset_handler = ipr_eh_host_reset,
6587         .slave_alloc = ipr_slave_alloc,
6588         .slave_configure = ipr_slave_configure,
6589         .slave_destroy = ipr_slave_destroy,
6590         .scan_finished = ipr_scan_finished,
6591         .target_alloc = ipr_target_alloc,
6592         .target_destroy = ipr_target_destroy,
6593         .change_queue_depth = ipr_change_queue_depth,
6594         .bios_param = ipr_biosparam,
6595         .can_queue = IPR_MAX_COMMANDS,
6596         .this_id = -1,
6597         .sg_tablesize = IPR_MAX_SGLIST,
6598         .max_sectors = IPR_IOA_MAX_SECTORS,
6599         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6600         .use_clustering = ENABLE_CLUSTERING,
6601         .shost_attrs = ipr_ioa_attrs,
6602         .sdev_attrs = ipr_dev_attrs,
6603         .proc_name = IPR_NAME,
6604 };
6605
6606 /**
6607  * ipr_ata_phy_reset - libata phy_reset handler
6608  * @ap:         ata port to reset
6609  *
6610  **/
6611 static void ipr_ata_phy_reset(struct ata_port *ap)
6612 {
6613         unsigned long flags;
6614         struct ipr_sata_port *sata_port = ap->private_data;
6615         struct ipr_resource_entry *res = sata_port->res;
6616         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6617         int rc;
6618
6619         ENTER;
6620         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6621         while (ioa_cfg->in_reset_reload) {
6622                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6623                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6624                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6625         }
6626
6627         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6628                 goto out_unlock;
6629
6630         rc = ipr_device_reset(ioa_cfg, res);
6631
6632         if (rc) {
6633                 ap->link.device[0].class = ATA_DEV_NONE;
6634                 goto out_unlock;
6635         }
6636
6637         ap->link.device[0].class = res->ata_class;
6638         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6639                 ap->link.device[0].class = ATA_DEV_NONE;
6640
6641 out_unlock:
6642         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6643         LEAVE;
6644 }
6645
6646 /**
6647  * ipr_ata_post_internal - Cleanup after an internal command
6648  * @qc: ATA queued command
6649  *
6650  * Return value:
6651  *      none
6652  **/
6653 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6654 {
6655         struct ipr_sata_port *sata_port = qc->ap->private_data;
6656         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6657         struct ipr_cmnd *ipr_cmd;
6658         struct ipr_hrr_queue *hrrq;
6659         unsigned long flags;
6660
6661         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6662         while (ioa_cfg->in_reset_reload) {
6663                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6664                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6665                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6666         }
6667
6668         for_each_hrrq(hrrq, ioa_cfg) {
6669                 spin_lock(&hrrq->_lock);
6670                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6671                         if (ipr_cmd->qc == qc) {
6672                                 ipr_device_reset(ioa_cfg, sata_port->res);
6673                                 break;
6674                         }
6675                 }
6676                 spin_unlock(&hrrq->_lock);
6677         }
6678         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6679 }
6680
6681 /**
6682  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6683  * @regs:       destination
6684  * @tf: source ATA taskfile
6685  *
6686  * Return value:
6687  *      none
6688  **/
6689 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6690                              struct ata_taskfile *tf)
6691 {
6692         regs->feature = tf->feature;
6693         regs->nsect = tf->nsect;
6694         regs->lbal = tf->lbal;
6695         regs->lbam = tf->lbam;
6696         regs->lbah = tf->lbah;
6697         regs->device = tf->device;
6698         regs->command = tf->command;
6699         regs->hob_feature = tf->hob_feature;
6700         regs->hob_nsect = tf->hob_nsect;
6701         regs->hob_lbal = tf->hob_lbal;
6702         regs->hob_lbam = tf->hob_lbam;
6703         regs->hob_lbah = tf->hob_lbah;
6704         regs->ctl = tf->ctl;
6705 }
6706
6707 /**
6708  * ipr_sata_done - done function for SATA commands
6709  * @ipr_cmd:    ipr command struct
6710  *
6711  * This function is invoked by the interrupt handler for
6712  * ops generated by the SCSI mid-layer to SATA devices
6713  *
6714  * Return value:
6715  *      none
6716  **/
6717 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6718 {
6719         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6720         struct ata_queued_cmd *qc = ipr_cmd->qc;
6721         struct ipr_sata_port *sata_port = qc->ap->private_data;
6722         struct ipr_resource_entry *res = sata_port->res;
6723         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6724
6725         spin_lock(&ipr_cmd->hrrq->_lock);
6726         if (ipr_cmd->ioa_cfg->sis64)
6727                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6728                        sizeof(struct ipr_ioasa_gata));
6729         else
6730                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6731                        sizeof(struct ipr_ioasa_gata));
6732         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6733
6734         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6735                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6736
6737         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6738                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6739         else
6740                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6741         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6742         spin_unlock(&ipr_cmd->hrrq->_lock);
6743         ata_qc_complete(qc);
6744 }
6745
6746 /**
6747  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6748  * @ipr_cmd:    ipr command struct
6749  * @qc:         ATA queued command
6750  *
6751  **/
6752 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6753                                   struct ata_queued_cmd *qc)
6754 {
6755         u32 ioadl_flags = 0;
6756         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6757         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6758         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6759         int len = qc->nbytes;
6760         struct scatterlist *sg;
6761         unsigned int si;
6762         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6763
6764         if (len == 0)
6765                 return;
6766
6767         if (qc->dma_dir == DMA_TO_DEVICE) {
6768                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6769                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6770         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6771                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6772
6773         ioarcb->data_transfer_length = cpu_to_be32(len);
6774         ioarcb->ioadl_len =
6775                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6776         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6777                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6778
6779         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6780                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6781                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6782                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6783
6784                 last_ioadl64 = ioadl64;
6785                 ioadl64++;
6786         }
6787
6788         if (likely(last_ioadl64))
6789                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6790 }
6791
6792 /**
6793  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6794  * @ipr_cmd:    ipr command struct
6795  * @qc:         ATA queued command
6796  *
6797  **/
6798 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6799                                 struct ata_queued_cmd *qc)
6800 {
6801         u32 ioadl_flags = 0;
6802         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6803         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6804         struct ipr_ioadl_desc *last_ioadl = NULL;
6805         int len = qc->nbytes;
6806         struct scatterlist *sg;
6807         unsigned int si;
6808
6809         if (len == 0)
6810                 return;
6811
6812         if (qc->dma_dir == DMA_TO_DEVICE) {
6813                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6814                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6815                 ioarcb->data_transfer_length = cpu_to_be32(len);
6816                 ioarcb->ioadl_len =
6817                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6818         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6819                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6820                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6821                 ioarcb->read_ioadl_len =
6822                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6823         }
6824
6825         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6826                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6827                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6828
6829                 last_ioadl = ioadl;
6830                 ioadl++;
6831         }
6832
6833         if (likely(last_ioadl))
6834                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6835 }
6836
6837 /**
6838  * ipr_qc_defer - Get a free ipr_cmd
6839  * @qc: queued command
6840  *
6841  * Return value:
6842  *      0 if success
6843  **/
6844 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6845 {
6846         struct ata_port *ap = qc->ap;
6847         struct ipr_sata_port *sata_port = ap->private_data;
6848         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6849         struct ipr_cmnd *ipr_cmd;
6850         struct ipr_hrr_queue *hrrq;
6851         int hrrq_id;
6852
6853         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6854         hrrq = &ioa_cfg->hrrq[hrrq_id];
6855
6856         qc->lldd_task = NULL;
6857         spin_lock(&hrrq->_lock);
6858         if (unlikely(hrrq->ioa_is_dead)) {
6859                 spin_unlock(&hrrq->_lock);
6860                 return 0;
6861         }
6862
6863         if (unlikely(!hrrq->allow_cmds)) {
6864                 spin_unlock(&hrrq->_lock);
6865                 return ATA_DEFER_LINK;
6866         }
6867
6868         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6869         if (ipr_cmd == NULL) {
6870                 spin_unlock(&hrrq->_lock);
6871                 return ATA_DEFER_LINK;
6872         }
6873
6874         qc->lldd_task = ipr_cmd;
6875         spin_unlock(&hrrq->_lock);
6876         return 0;
6877 }
6878
6879 /**
6880  * ipr_qc_issue - Issue a SATA qc to a device
6881  * @qc: queued command
6882  *
6883  * Return value:
6884  *      0 if success
6885  **/
6886 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6887 {
6888         struct ata_port *ap = qc->ap;
6889         struct ipr_sata_port *sata_port = ap->private_data;
6890         struct ipr_resource_entry *res = sata_port->res;
6891         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6892         struct ipr_cmnd *ipr_cmd;
6893         struct ipr_ioarcb *ioarcb;
6894         struct ipr_ioarcb_ata_regs *regs;
6895
6896         if (qc->lldd_task == NULL)
6897                 ipr_qc_defer(qc);
6898
6899         ipr_cmd = qc->lldd_task;
6900         if (ipr_cmd == NULL)
6901                 return AC_ERR_SYSTEM;
6902
6903         qc->lldd_task = NULL;
6904         spin_lock(&ipr_cmd->hrrq->_lock);
6905         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6906                         ipr_cmd->hrrq->ioa_is_dead)) {
6907                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6908                 spin_unlock(&ipr_cmd->hrrq->_lock);
6909                 return AC_ERR_SYSTEM;
6910         }
6911
6912         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6913         ioarcb = &ipr_cmd->ioarcb;
6914
6915         if (ioa_cfg->sis64) {
6916                 regs = &ipr_cmd->i.ata_ioadl.regs;
6917                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6918         } else
6919                 regs = &ioarcb->u.add_data.u.regs;
6920
6921         memset(regs, 0, sizeof(*regs));
6922         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6923
6924         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6925         ipr_cmd->qc = qc;
6926         ipr_cmd->done = ipr_sata_done;
6927         ipr_cmd->ioarcb.res_handle = res->res_handle;
6928         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6929         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6930         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6931         ipr_cmd->dma_use_sg = qc->n_elem;
6932
6933         if (ioa_cfg->sis64)
6934                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6935         else
6936                 ipr_build_ata_ioadl(ipr_cmd, qc);
6937
6938         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6939         ipr_copy_sata_tf(regs, &qc->tf);
6940         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6941         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6942
6943         switch (qc->tf.protocol) {
6944         case ATA_PROT_NODATA:
6945         case ATA_PROT_PIO:
6946                 break;
6947
6948         case ATA_PROT_DMA:
6949                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6950                 break;
6951
6952         case ATAPI_PROT_PIO:
6953         case ATAPI_PROT_NODATA:
6954                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6955                 break;
6956
6957         case ATAPI_PROT_DMA:
6958                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6959                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6960                 break;
6961
6962         default:
6963                 WARN_ON(1);
6964                 spin_unlock(&ipr_cmd->hrrq->_lock);
6965                 return AC_ERR_INVALID;
6966         }
6967
6968         ipr_send_command(ipr_cmd);
6969         spin_unlock(&ipr_cmd->hrrq->_lock);
6970
6971         return 0;
6972 }
6973
6974 /**
6975  * ipr_qc_fill_rtf - Read result TF
6976  * @qc: ATA queued command
6977  *
6978  * Return value:
6979  *      true
6980  **/
6981 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6982 {
6983         struct ipr_sata_port *sata_port = qc->ap->private_data;
6984         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6985         struct ata_taskfile *tf = &qc->result_tf;
6986
6987         tf->feature = g->error;
6988         tf->nsect = g->nsect;
6989         tf->lbal = g->lbal;
6990         tf->lbam = g->lbam;
6991         tf->lbah = g->lbah;
6992         tf->device = g->device;
6993         tf->command = g->status;
6994         tf->hob_nsect = g->hob_nsect;
6995         tf->hob_lbal = g->hob_lbal;
6996         tf->hob_lbam = g->hob_lbam;
6997         tf->hob_lbah = g->hob_lbah;
6998
6999         return true;
7000 }
7001
7002 static struct ata_port_operations ipr_sata_ops = {
7003         .phy_reset = ipr_ata_phy_reset,
7004         .hardreset = ipr_sata_reset,
7005         .post_internal_cmd = ipr_ata_post_internal,
7006         .qc_prep = ata_noop_qc_prep,
7007         .qc_defer = ipr_qc_defer,
7008         .qc_issue = ipr_qc_issue,
7009         .qc_fill_rtf = ipr_qc_fill_rtf,
7010         .port_start = ata_sas_port_start,
7011         .port_stop = ata_sas_port_stop
7012 };
7013
7014 static struct ata_port_info sata_port_info = {
7015         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7016                           ATA_FLAG_SAS_HOST,
7017         .pio_mask       = ATA_PIO4_ONLY,
7018         .mwdma_mask     = ATA_MWDMA2,
7019         .udma_mask      = ATA_UDMA6,
7020         .port_ops       = &ipr_sata_ops
7021 };
7022
7023 #ifdef CONFIG_PPC_PSERIES
7024 static const u16 ipr_blocked_processors[] = {
7025         PVR_NORTHSTAR,
7026         PVR_PULSAR,
7027         PVR_POWER4,
7028         PVR_ICESTAR,
7029         PVR_SSTAR,
7030         PVR_POWER4p,
7031         PVR_630,
7032         PVR_630p
7033 };
7034
7035 /**
7036  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7037  * @ioa_cfg:    ioa cfg struct
7038  *
7039  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7040  * certain pSeries hardware. This function determines if the given
7041  * adapter is in one of these confgurations or not.
7042  *
7043  * Return value:
7044  *      1 if adapter is not supported / 0 if adapter is supported
7045  **/
7046 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7047 {
7048         int i;
7049
7050         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7051                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7052                         if (pvr_version_is(ipr_blocked_processors[i]))
7053                                 return 1;
7054                 }
7055         }
7056         return 0;
7057 }
7058 #else
7059 #define ipr_invalid_adapter(ioa_cfg) 0
7060 #endif
7061
7062 /**
7063  * ipr_ioa_bringdown_done - IOA bring down completion.
7064  * @ipr_cmd:    ipr command struct
7065  *
7066  * This function processes the completion of an adapter bring down.
7067  * It wakes any reset sleepers.
7068  *
7069  * Return value:
7070  *      IPR_RC_JOB_RETURN
7071  **/
7072 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7073 {
7074         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7075         int i;
7076
7077         ENTER;
7078         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7079                 ipr_trace;
7080                 spin_unlock_irq(ioa_cfg->host->host_lock);
7081                 scsi_unblock_requests(ioa_cfg->host);
7082                 spin_lock_irq(ioa_cfg->host->host_lock);
7083         }
7084
7085         ioa_cfg->in_reset_reload = 0;
7086         ioa_cfg->reset_retries = 0;
7087         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7088                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7089                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7090                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7091         }
7092         wmb();
7093
7094         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7095         wake_up_all(&ioa_cfg->reset_wait_q);
7096         LEAVE;
7097
7098         return IPR_RC_JOB_RETURN;
7099 }
7100
7101 /**
7102  * ipr_ioa_reset_done - IOA reset completion.
7103  * @ipr_cmd:    ipr command struct
7104  *
7105  * This function processes the completion of an adapter reset.
7106  * It schedules any necessary mid-layer add/removes and
7107  * wakes any reset sleepers.
7108  *
7109  * Return value:
7110  *      IPR_RC_JOB_RETURN
7111  **/
7112 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7113 {
7114         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7115         struct ipr_resource_entry *res;
7116         int j;
7117
7118         ENTER;
7119         ioa_cfg->in_reset_reload = 0;
7120         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7121                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7122                 ioa_cfg->hrrq[j].allow_cmds = 1;
7123                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7124         }
7125         wmb();
7126         ioa_cfg->reset_cmd = NULL;
7127         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7128
7129         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7130                 if (res->add_to_ml || res->del_from_ml) {
7131                         ipr_trace;
7132                         break;
7133                 }
7134         }
7135         schedule_work(&ioa_cfg->work_q);
7136
7137         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7138                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7139                 if (j < IPR_NUM_LOG_HCAMS)
7140                         ipr_send_hcam(ioa_cfg,
7141                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7142                                 ioa_cfg->hostrcb[j]);
7143                 else
7144                         ipr_send_hcam(ioa_cfg,
7145                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7146                                 ioa_cfg->hostrcb[j]);
7147         }
7148
7149         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7150         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7151
7152         ioa_cfg->reset_retries = 0;
7153         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7154         wake_up_all(&ioa_cfg->reset_wait_q);
7155
7156         spin_unlock(ioa_cfg->host->host_lock);
7157         scsi_unblock_requests(ioa_cfg->host);
7158         spin_lock(ioa_cfg->host->host_lock);
7159
7160         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
7161                 scsi_block_requests(ioa_cfg->host);
7162
7163         schedule_work(&ioa_cfg->work_q);
7164         LEAVE;
7165         return IPR_RC_JOB_RETURN;
7166 }
7167
7168 /**
7169  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7170  * @supported_dev:      supported device struct
7171  * @vpids:                      vendor product id struct
7172  *
7173  * Return value:
7174  *      none
7175  **/
7176 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7177                                  struct ipr_std_inq_vpids *vpids)
7178 {
7179         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7180         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7181         supported_dev->num_records = 1;
7182         supported_dev->data_length =
7183                 cpu_to_be16(sizeof(struct ipr_supported_device));
7184         supported_dev->reserved = 0;
7185 }
7186
7187 /**
7188  * ipr_set_supported_devs - Send Set Supported Devices for a device
7189  * @ipr_cmd:    ipr command struct
7190  *
7191  * This function sends a Set Supported Devices to the adapter
7192  *
7193  * Return value:
7194  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7195  **/
7196 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7197 {
7198         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7199         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7200         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7201         struct ipr_resource_entry *res = ipr_cmd->u.res;
7202
7203         ipr_cmd->job_step = ipr_ioa_reset_done;
7204
7205         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7206                 if (!ipr_is_scsi_disk(res))
7207                         continue;
7208
7209                 ipr_cmd->u.res = res;
7210                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7211
7212                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7213                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7214                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7215
7216                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7217                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7218                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7219                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7220
7221                 ipr_init_ioadl(ipr_cmd,
7222                                ioa_cfg->vpd_cbs_dma +
7223                                  offsetof(struct ipr_misc_cbs, supp_dev),
7224                                sizeof(struct ipr_supported_device),
7225                                IPR_IOADL_FLAGS_WRITE_LAST);
7226
7227                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7228                            IPR_SET_SUP_DEVICE_TIMEOUT);
7229
7230                 if (!ioa_cfg->sis64)
7231                         ipr_cmd->job_step = ipr_set_supported_devs;
7232                 LEAVE;
7233                 return IPR_RC_JOB_RETURN;
7234         }
7235
7236         LEAVE;
7237         return IPR_RC_JOB_CONTINUE;
7238 }
7239
7240 /**
7241  * ipr_get_mode_page - Locate specified mode page
7242  * @mode_pages: mode page buffer
7243  * @page_code:  page code to find
7244  * @len:                minimum required length for mode page
7245  *
7246  * Return value:
7247  *      pointer to mode page / NULL on failure
7248  **/
7249 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7250                                u32 page_code, u32 len)
7251 {
7252         struct ipr_mode_page_hdr *mode_hdr;
7253         u32 page_length;
7254         u32 length;
7255
7256         if (!mode_pages || (mode_pages->hdr.length == 0))
7257                 return NULL;
7258
7259         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7260         mode_hdr = (struct ipr_mode_page_hdr *)
7261                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7262
7263         while (length) {
7264                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7265                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7266                                 return mode_hdr;
7267                         break;
7268                 } else {
7269                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7270                                        mode_hdr->page_length);
7271                         length -= page_length;
7272                         mode_hdr = (struct ipr_mode_page_hdr *)
7273                                 ((unsigned long)mode_hdr + page_length);
7274                 }
7275         }
7276         return NULL;
7277 }
7278
7279 /**
7280  * ipr_check_term_power - Check for term power errors
7281  * @ioa_cfg:    ioa config struct
7282  * @mode_pages: IOAFP mode pages buffer
7283  *
7284  * Check the IOAFP's mode page 28 for term power errors
7285  *
7286  * Return value:
7287  *      nothing
7288  **/
7289 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7290                                  struct ipr_mode_pages *mode_pages)
7291 {
7292         int i;
7293         int entry_length;
7294         struct ipr_dev_bus_entry *bus;
7295         struct ipr_mode_page28 *mode_page;
7296
7297         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7298                                       sizeof(struct ipr_mode_page28));
7299
7300         entry_length = mode_page->entry_length;
7301
7302         bus = mode_page->bus;
7303
7304         for (i = 0; i < mode_page->num_entries; i++) {
7305                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7306                         dev_err(&ioa_cfg->pdev->dev,
7307                                 "Term power is absent on scsi bus %d\n",
7308                                 bus->res_addr.bus);
7309                 }
7310
7311                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7312         }
7313 }
7314
7315 /**
7316  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7317  * @ioa_cfg:    ioa config struct
7318  *
7319  * Looks through the config table checking for SES devices. If
7320  * the SES device is in the SES table indicating a maximum SCSI
7321  * bus speed, the speed is limited for the bus.
7322  *
7323  * Return value:
7324  *      none
7325  **/
7326 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7327 {
7328         u32 max_xfer_rate;
7329         int i;
7330
7331         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7332                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7333                                                        ioa_cfg->bus_attr[i].bus_width);
7334
7335                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7336                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7337         }
7338 }
7339
7340 /**
7341  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7342  * @ioa_cfg:    ioa config struct
7343  * @mode_pages: mode page 28 buffer
7344  *
7345  * Updates mode page 28 based on driver configuration
7346  *
7347  * Return value:
7348  *      none
7349  **/
7350 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7351                                           struct ipr_mode_pages *mode_pages)
7352 {
7353         int i, entry_length;
7354         struct ipr_dev_bus_entry *bus;
7355         struct ipr_bus_attributes *bus_attr;
7356         struct ipr_mode_page28 *mode_page;
7357
7358         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7359                                       sizeof(struct ipr_mode_page28));
7360
7361         entry_length = mode_page->entry_length;
7362
7363         /* Loop for each device bus entry */
7364         for (i = 0, bus = mode_page->bus;
7365              i < mode_page->num_entries;
7366              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7367                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7368                         dev_err(&ioa_cfg->pdev->dev,
7369                                 "Invalid resource address reported: 0x%08X\n",
7370                                 IPR_GET_PHYS_LOC(bus->res_addr));
7371                         continue;
7372                 }
7373
7374                 bus_attr = &ioa_cfg->bus_attr[i];
7375                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7376                 bus->bus_width = bus_attr->bus_width;
7377                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7378                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7379                 if (bus_attr->qas_enabled)
7380                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7381                 else
7382                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7383         }
7384 }
7385
7386 /**
7387  * ipr_build_mode_select - Build a mode select command
7388  * @ipr_cmd:    ipr command struct
7389  * @res_handle: resource handle to send command to
7390  * @parm:               Byte 2 of Mode Sense command
7391  * @dma_addr:   DMA buffer address
7392  * @xfer_len:   data transfer length
7393  *
7394  * Return value:
7395  *      none
7396  **/
7397 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7398                                   __be32 res_handle, u8 parm,
7399                                   dma_addr_t dma_addr, u8 xfer_len)
7400 {
7401         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7402
7403         ioarcb->res_handle = res_handle;
7404         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7405         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7406         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7407         ioarcb->cmd_pkt.cdb[1] = parm;
7408         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7409
7410         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7411 }
7412
7413 /**
7414  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7415  * @ipr_cmd:    ipr command struct
7416  *
7417  * This function sets up the SCSI bus attributes and sends
7418  * a Mode Select for Page 28 to activate them.
7419  *
7420  * Return value:
7421  *      IPR_RC_JOB_RETURN
7422  **/
7423 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7424 {
7425         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7426         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7427         int length;
7428
7429         ENTER;
7430         ipr_scsi_bus_speed_limit(ioa_cfg);
7431         ipr_check_term_power(ioa_cfg, mode_pages);
7432         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7433         length = mode_pages->hdr.length + 1;
7434         mode_pages->hdr.length = 0;
7435
7436         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7437                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7438                               length);
7439
7440         ipr_cmd->job_step = ipr_set_supported_devs;
7441         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7442                                     struct ipr_resource_entry, queue);
7443         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7444
7445         LEAVE;
7446         return IPR_RC_JOB_RETURN;
7447 }
7448
7449 /**
7450  * ipr_build_mode_sense - Builds a mode sense command
7451  * @ipr_cmd:    ipr command struct
7452  * @res:                resource entry struct
7453  * @parm:               Byte 2 of mode sense command
7454  * @dma_addr:   DMA address of mode sense buffer
7455  * @xfer_len:   Size of DMA buffer
7456  *
7457  * Return value:
7458  *      none
7459  **/
7460 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7461                                  __be32 res_handle,
7462                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7463 {
7464         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7465
7466         ioarcb->res_handle = res_handle;
7467         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7468         ioarcb->cmd_pkt.cdb[2] = parm;
7469         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7470         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7471
7472         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7473 }
7474
7475 /**
7476  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7477  * @ipr_cmd:    ipr command struct
7478  *
7479  * This function handles the failure of an IOA bringup command.
7480  *
7481  * Return value:
7482  *      IPR_RC_JOB_RETURN
7483  **/
7484 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7485 {
7486         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7487         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7488
7489         dev_err(&ioa_cfg->pdev->dev,
7490                 "0x%02X failed with IOASC: 0x%08X\n",
7491                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7492
7493         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7494         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7495         return IPR_RC_JOB_RETURN;
7496 }
7497
7498 /**
7499  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7500  * @ipr_cmd:    ipr command struct
7501  *
7502  * This function handles the failure of a Mode Sense to the IOAFP.
7503  * Some adapters do not handle all mode pages.
7504  *
7505  * Return value:
7506  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7507  **/
7508 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7509 {
7510         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7511         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7512
7513         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7514                 ipr_cmd->job_step = ipr_set_supported_devs;
7515                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7516                                             struct ipr_resource_entry, queue);
7517                 return IPR_RC_JOB_CONTINUE;
7518         }
7519
7520         return ipr_reset_cmd_failed(ipr_cmd);
7521 }
7522
7523 /**
7524  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7525  * @ipr_cmd:    ipr command struct
7526  *
7527  * This function send a Page 28 mode sense to the IOA to
7528  * retrieve SCSI bus attributes.
7529  *
7530  * Return value:
7531  *      IPR_RC_JOB_RETURN
7532  **/
7533 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7534 {
7535         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7536
7537         ENTER;
7538         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7539                              0x28, ioa_cfg->vpd_cbs_dma +
7540                              offsetof(struct ipr_misc_cbs, mode_pages),
7541                              sizeof(struct ipr_mode_pages));
7542
7543         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7544         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7545
7546         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7547
7548         LEAVE;
7549         return IPR_RC_JOB_RETURN;
7550 }
7551
7552 /**
7553  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7554  * @ipr_cmd:    ipr command struct
7555  *
7556  * This function enables dual IOA RAID support if possible.
7557  *
7558  * Return value:
7559  *      IPR_RC_JOB_RETURN
7560  **/
7561 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7562 {
7563         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7564         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7565         struct ipr_mode_page24 *mode_page;
7566         int length;
7567
7568         ENTER;
7569         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7570                                       sizeof(struct ipr_mode_page24));
7571
7572         if (mode_page)
7573                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7574
7575         length = mode_pages->hdr.length + 1;
7576         mode_pages->hdr.length = 0;
7577
7578         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7579                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7580                               length);
7581
7582         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7583         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7584
7585         LEAVE;
7586         return IPR_RC_JOB_RETURN;
7587 }
7588
7589 /**
7590  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7591  * @ipr_cmd:    ipr command struct
7592  *
7593  * This function handles the failure of a Mode Sense to the IOAFP.
7594  * Some adapters do not handle all mode pages.
7595  *
7596  * Return value:
7597  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7598  **/
7599 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7600 {
7601         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7602
7603         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7604                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7605                 return IPR_RC_JOB_CONTINUE;
7606         }
7607
7608         return ipr_reset_cmd_failed(ipr_cmd);
7609 }
7610
7611 /**
7612  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7613  * @ipr_cmd:    ipr command struct
7614  *
7615  * This function send a mode sense to the IOA to retrieve
7616  * the IOA Advanced Function Control mode page.
7617  *
7618  * Return value:
7619  *      IPR_RC_JOB_RETURN
7620  **/
7621 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7622 {
7623         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7624
7625         ENTER;
7626         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7627                              0x24, ioa_cfg->vpd_cbs_dma +
7628                              offsetof(struct ipr_misc_cbs, mode_pages),
7629                              sizeof(struct ipr_mode_pages));
7630
7631         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7632         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7633
7634         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7635
7636         LEAVE;
7637         return IPR_RC_JOB_RETURN;
7638 }
7639
7640 /**
7641  * ipr_init_res_table - Initialize the resource table
7642  * @ipr_cmd:    ipr command struct
7643  *
7644  * This function looks through the existing resource table, comparing
7645  * it with the config table. This function will take care of old/new
7646  * devices and schedule adding/removing them from the mid-layer
7647  * as appropriate.
7648  *
7649  * Return value:
7650  *      IPR_RC_JOB_CONTINUE
7651  **/
7652 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7653 {
7654         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7655         struct ipr_resource_entry *res, *temp;
7656         struct ipr_config_table_entry_wrapper cfgtew;
7657         int entries, found, flag, i;
7658         LIST_HEAD(old_res);
7659
7660         ENTER;
7661         if (ioa_cfg->sis64)
7662                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7663         else
7664                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7665
7666         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7667                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7668
7669         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7670                 list_move_tail(&res->queue, &old_res);
7671
7672         if (ioa_cfg->sis64)
7673                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7674         else
7675                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7676
7677         for (i = 0; i < entries; i++) {
7678                 if (ioa_cfg->sis64)
7679                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7680                 else
7681                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7682                 found = 0;
7683
7684                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7685                         if (ipr_is_same_device(res, &cfgtew)) {
7686                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7687                                 found = 1;
7688                                 break;
7689                         }
7690                 }
7691
7692                 if (!found) {
7693                         if (list_empty(&ioa_cfg->free_res_q)) {
7694                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7695                                 break;
7696                         }
7697
7698                         found = 1;
7699                         res = list_entry(ioa_cfg->free_res_q.next,
7700                                          struct ipr_resource_entry, queue);
7701                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7702                         ipr_init_res_entry(res, &cfgtew);
7703                         res->add_to_ml = 1;
7704                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7705                         res->sdev->allow_restart = 1;
7706
7707                 if (found)
7708                         ipr_update_res_entry(res, &cfgtew);
7709         }
7710
7711         list_for_each_entry_safe(res, temp, &old_res, queue) {
7712                 if (res->sdev) {
7713                         res->del_from_ml = 1;
7714                         res->res_handle = IPR_INVALID_RES_HANDLE;
7715                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7716                 }
7717         }
7718
7719         list_for_each_entry_safe(res, temp, &old_res, queue) {
7720                 ipr_clear_res_target(res);
7721                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7722         }
7723
7724         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7725                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7726         else
7727                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7728
7729         LEAVE;
7730         return IPR_RC_JOB_CONTINUE;
7731 }
7732
7733 /**
7734  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7735  * @ipr_cmd:    ipr command struct
7736  *
7737  * This function sends a Query IOA Configuration command
7738  * to the adapter to retrieve the IOA configuration table.
7739  *
7740  * Return value:
7741  *      IPR_RC_JOB_RETURN
7742  **/
7743 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7744 {
7745         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7746         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7747         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7748         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7749
7750         ENTER;
7751         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7752                 ioa_cfg->dual_raid = 1;
7753         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7754                  ucode_vpd->major_release, ucode_vpd->card_type,
7755                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7756         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7757         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7758
7759         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7760         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7761         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7762         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7763
7764         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7765                        IPR_IOADL_FLAGS_READ_LAST);
7766
7767         ipr_cmd->job_step = ipr_init_res_table;
7768
7769         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7770
7771         LEAVE;
7772         return IPR_RC_JOB_RETURN;
7773 }
7774
7775 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7776 {
7777         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7778
7779         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7780                 return IPR_RC_JOB_CONTINUE;
7781
7782         return ipr_reset_cmd_failed(ipr_cmd);
7783 }
7784
7785 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7786                                          __be32 res_handle, u8 sa_code)
7787 {
7788         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7789
7790         ioarcb->res_handle = res_handle;
7791         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7792         ioarcb->cmd_pkt.cdb[1] = sa_code;
7793         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7794 }
7795
7796 /**
7797  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7798  * action
7799  *
7800  * Return value:
7801  *      none
7802  **/
7803 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7804 {
7805         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7806         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7807         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7808
7809         ENTER;
7810
7811         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7812
7813         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7814                 ipr_build_ioa_service_action(ipr_cmd,
7815                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7816                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7817
7818                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7819
7820                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7821                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7822                            IPR_SET_SUP_DEVICE_TIMEOUT);
7823
7824                 LEAVE;
7825                 return IPR_RC_JOB_RETURN;
7826         }
7827
7828         LEAVE;
7829         return IPR_RC_JOB_CONTINUE;
7830 }
7831
7832 /**
7833  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7834  * @ipr_cmd:    ipr command struct
7835  *
7836  * This utility function sends an inquiry to the adapter.
7837  *
7838  * Return value:
7839  *      none
7840  **/
7841 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7842                               dma_addr_t dma_addr, u8 xfer_len)
7843 {
7844         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7845
7846         ENTER;
7847         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7848         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7849
7850         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7851         ioarcb->cmd_pkt.cdb[1] = flags;
7852         ioarcb->cmd_pkt.cdb[2] = page;
7853         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7854
7855         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7856
7857         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7858         LEAVE;
7859 }
7860
7861 /**
7862  * ipr_inquiry_page_supported - Is the given inquiry page supported
7863  * @page0:              inquiry page 0 buffer
7864  * @page:               page code.
7865  *
7866  * This function determines if the specified inquiry page is supported.
7867  *
7868  * Return value:
7869  *      1 if page is supported / 0 if not
7870  **/
7871 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7872 {
7873         int i;
7874
7875         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7876                 if (page0->page[i] == page)
7877                         return 1;
7878
7879         return 0;
7880 }
7881
7882 /**
7883  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7884  * @ipr_cmd:    ipr command struct
7885  *
7886  * This function sends a Page 0xC4 inquiry to the adapter
7887  * to retrieve software VPD information.
7888  *
7889  * Return value:
7890  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7891  **/
7892 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
7893 {
7894         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7895         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7896         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7897
7898         ENTER;
7899         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
7900         memset(pageC4, 0, sizeof(*pageC4));
7901
7902         if (ipr_inquiry_page_supported(page0, 0xC4)) {
7903                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
7904                                   (ioa_cfg->vpd_cbs_dma
7905                                    + offsetof(struct ipr_misc_cbs,
7906                                               pageC4_data)),
7907                                   sizeof(struct ipr_inquiry_pageC4));
7908                 return IPR_RC_JOB_RETURN;
7909         }
7910
7911         LEAVE;
7912         return IPR_RC_JOB_CONTINUE;
7913 }
7914
7915 /**
7916  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7917  * @ipr_cmd:    ipr command struct
7918  *
7919  * This function sends a Page 0xD0 inquiry to the adapter
7920  * to retrieve adapter capabilities.
7921  *
7922  * Return value:
7923  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7924  **/
7925 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7926 {
7927         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7928         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7929         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7930
7931         ENTER;
7932         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
7933         memset(cap, 0, sizeof(*cap));
7934
7935         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7936                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7937                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7938                                   sizeof(struct ipr_inquiry_cap));
7939                 return IPR_RC_JOB_RETURN;
7940         }
7941
7942         LEAVE;
7943         return IPR_RC_JOB_CONTINUE;
7944 }
7945
7946 /**
7947  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7948  * @ipr_cmd:    ipr command struct
7949  *
7950  * This function sends a Page 3 inquiry to the adapter
7951  * to retrieve software VPD information.
7952  *
7953  * Return value:
7954  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7955  **/
7956 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7957 {
7958         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7959
7960         ENTER;
7961
7962         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7963
7964         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7965                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7966                           sizeof(struct ipr_inquiry_page3));
7967
7968         LEAVE;
7969         return IPR_RC_JOB_RETURN;
7970 }
7971
7972 /**
7973  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7974  * @ipr_cmd:    ipr command struct
7975  *
7976  * This function sends a Page 0 inquiry to the adapter
7977  * to retrieve supported inquiry pages.
7978  *
7979  * Return value:
7980  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7981  **/
7982 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7983 {
7984         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7985         char type[5];
7986
7987         ENTER;
7988
7989         /* Grab the type out of the VPD and store it away */
7990         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7991         type[4] = '\0';
7992         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7993
7994         if (ipr_invalid_adapter(ioa_cfg)) {
7995                 dev_err(&ioa_cfg->pdev->dev,
7996                         "Adapter not supported in this hardware configuration.\n");
7997
7998                 if (!ipr_testmode) {
7999                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8000                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8001                         list_add_tail(&ipr_cmd->queue,
8002                                         &ioa_cfg->hrrq->hrrq_free_q);
8003                         return IPR_RC_JOB_RETURN;
8004                 }
8005         }
8006
8007         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8008
8009         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8010                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8011                           sizeof(struct ipr_inquiry_page0));
8012
8013         LEAVE;
8014         return IPR_RC_JOB_RETURN;
8015 }
8016
8017 /**
8018  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8019  * @ipr_cmd:    ipr command struct
8020  *
8021  * This function sends a standard inquiry to the adapter.
8022  *
8023  * Return value:
8024  *      IPR_RC_JOB_RETURN
8025  **/
8026 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8027 {
8028         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8029
8030         ENTER;
8031         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8032
8033         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8034                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8035                           sizeof(struct ipr_ioa_vpd));
8036
8037         LEAVE;
8038         return IPR_RC_JOB_RETURN;
8039 }
8040
8041 /**
8042  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8043  * @ipr_cmd:    ipr command struct
8044  *
8045  * This function send an Identify Host Request Response Queue
8046  * command to establish the HRRQ with the adapter.
8047  *
8048  * Return value:
8049  *      IPR_RC_JOB_RETURN
8050  **/
8051 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8052 {
8053         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8054         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8055         struct ipr_hrr_queue *hrrq;
8056
8057         ENTER;
8058         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8059         if (ioa_cfg->identify_hrrq_index == 0)
8060                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8061
8062         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8063                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8064
8065                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8066                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8067
8068                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8069                 if (ioa_cfg->sis64)
8070                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8071
8072                 if (ioa_cfg->nvectors == 1)
8073                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8074                 else
8075                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8076
8077                 ioarcb->cmd_pkt.cdb[2] =
8078                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8079                 ioarcb->cmd_pkt.cdb[3] =
8080                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8081                 ioarcb->cmd_pkt.cdb[4] =
8082                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8083                 ioarcb->cmd_pkt.cdb[5] =
8084                         ((u64) hrrq->host_rrq_dma) & 0xff;
8085                 ioarcb->cmd_pkt.cdb[7] =
8086                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8087                 ioarcb->cmd_pkt.cdb[8] =
8088                         (sizeof(u32) * hrrq->size) & 0xff;
8089
8090                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8091                         ioarcb->cmd_pkt.cdb[9] =
8092                                         ioa_cfg->identify_hrrq_index;
8093
8094                 if (ioa_cfg->sis64) {
8095                         ioarcb->cmd_pkt.cdb[10] =
8096                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8097                         ioarcb->cmd_pkt.cdb[11] =
8098                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8099                         ioarcb->cmd_pkt.cdb[12] =
8100                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8101                         ioarcb->cmd_pkt.cdb[13] =
8102                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8103                 }
8104
8105                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8106                         ioarcb->cmd_pkt.cdb[14] =
8107                                         ioa_cfg->identify_hrrq_index;
8108
8109                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8110                            IPR_INTERNAL_TIMEOUT);
8111
8112                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8113                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8114
8115                 LEAVE;
8116                 return IPR_RC_JOB_RETURN;
8117         }
8118
8119         LEAVE;
8120         return IPR_RC_JOB_CONTINUE;
8121 }
8122
8123 /**
8124  * ipr_reset_timer_done - Adapter reset timer function
8125  * @ipr_cmd:    ipr command struct
8126  *
8127  * Description: This function is used in adapter reset processing
8128  * for timing events. If the reset_cmd pointer in the IOA
8129  * config struct is not this adapter's we are doing nested
8130  * resets and fail_all_ops will take care of freeing the
8131  * command block.
8132  *
8133  * Return value:
8134  *      none
8135  **/
8136 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8137 {
8138         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8139         unsigned long lock_flags = 0;
8140
8141         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8142
8143         if (ioa_cfg->reset_cmd == ipr_cmd) {
8144                 list_del(&ipr_cmd->queue);
8145                 ipr_cmd->done(ipr_cmd);
8146         }
8147
8148         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8149 }
8150
8151 /**
8152  * ipr_reset_start_timer - Start a timer for adapter reset job
8153  * @ipr_cmd:    ipr command struct
8154  * @timeout:    timeout value
8155  *
8156  * Description: This function is used in adapter reset processing
8157  * for timing events. If the reset_cmd pointer in the IOA
8158  * config struct is not this adapter's we are doing nested
8159  * resets and fail_all_ops will take care of freeing the
8160  * command block.
8161  *
8162  * Return value:
8163  *      none
8164  **/
8165 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8166                                   unsigned long timeout)
8167 {
8168
8169         ENTER;
8170         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8171         ipr_cmd->done = ipr_reset_ioa_job;
8172
8173         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8174         ipr_cmd->timer.expires = jiffies + timeout;
8175         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8176         add_timer(&ipr_cmd->timer);
8177 }
8178
8179 /**
8180  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8181  * @ioa_cfg:    ioa cfg struct
8182  *
8183  * Return value:
8184  *      nothing
8185  **/
8186 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8187 {
8188         struct ipr_hrr_queue *hrrq;
8189
8190         for_each_hrrq(hrrq, ioa_cfg) {
8191                 spin_lock(&hrrq->_lock);
8192                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8193
8194                 /* Initialize Host RRQ pointers */
8195                 hrrq->hrrq_start = hrrq->host_rrq;
8196                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8197                 hrrq->hrrq_curr = hrrq->hrrq_start;
8198                 hrrq->toggle_bit = 1;
8199                 spin_unlock(&hrrq->_lock);
8200         }
8201         wmb();
8202
8203         ioa_cfg->identify_hrrq_index = 0;
8204         if (ioa_cfg->hrrq_num == 1)
8205                 atomic_set(&ioa_cfg->hrrq_index, 0);
8206         else
8207                 atomic_set(&ioa_cfg->hrrq_index, 1);
8208
8209         /* Zero out config table */
8210         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8211 }
8212
8213 /**
8214  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8215  * @ipr_cmd:    ipr command struct
8216  *
8217  * Return value:
8218  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8219  **/
8220 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8221 {
8222         unsigned long stage, stage_time;
8223         u32 feedback;
8224         volatile u32 int_reg;
8225         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8226         u64 maskval = 0;
8227
8228         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8229         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8230         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8231
8232         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8233
8234         /* sanity check the stage_time value */
8235         if (stage_time == 0)
8236                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8237         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8238                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8239         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8240                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8241
8242         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8243                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8244                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8245                 stage_time = ioa_cfg->transop_timeout;
8246                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8247         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8248                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8249                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8250                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8251                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8252                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8253                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8254                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8255                         return IPR_RC_JOB_CONTINUE;
8256                 }
8257         }
8258
8259         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8260         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8261         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8262         ipr_cmd->done = ipr_reset_ioa_job;
8263         add_timer(&ipr_cmd->timer);
8264
8265         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8266
8267         return IPR_RC_JOB_RETURN;
8268 }
8269
8270 /**
8271  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8272  * @ipr_cmd:    ipr command struct
8273  *
8274  * This function reinitializes some control blocks and
8275  * enables destructive diagnostics on the adapter.
8276  *
8277  * Return value:
8278  *      IPR_RC_JOB_RETURN
8279  **/
8280 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8281 {
8282         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8283         volatile u32 int_reg;
8284         volatile u64 maskval;
8285         int i;
8286
8287         ENTER;
8288         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8289         ipr_init_ioa_mem(ioa_cfg);
8290
8291         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8292                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8293                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8294                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8295         }
8296         wmb();
8297         if (ioa_cfg->sis64) {
8298                 /* Set the adapter to the correct endian mode. */
8299                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8300                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8301         }
8302
8303         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8304
8305         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8306                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8307                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8308                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8309                 return IPR_RC_JOB_CONTINUE;
8310         }
8311
8312         /* Enable destructive diagnostics on IOA */
8313         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8314
8315         if (ioa_cfg->sis64) {
8316                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8317                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8318                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8319         } else
8320                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8321
8322         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8323
8324         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8325
8326         if (ioa_cfg->sis64) {
8327                 ipr_cmd->job_step = ipr_reset_next_stage;
8328                 return IPR_RC_JOB_CONTINUE;
8329         }
8330
8331         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8332         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8333         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8334         ipr_cmd->done = ipr_reset_ioa_job;
8335         add_timer(&ipr_cmd->timer);
8336         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8337
8338         LEAVE;
8339         return IPR_RC_JOB_RETURN;
8340 }
8341
8342 /**
8343  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8344  * @ipr_cmd:    ipr command struct
8345  *
8346  * This function is invoked when an adapter dump has run out
8347  * of processing time.
8348  *
8349  * Return value:
8350  *      IPR_RC_JOB_CONTINUE
8351  **/
8352 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8353 {
8354         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8355
8356         if (ioa_cfg->sdt_state == GET_DUMP)
8357                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8358         else if (ioa_cfg->sdt_state == READ_DUMP)
8359                 ioa_cfg->sdt_state = ABORT_DUMP;
8360
8361         ioa_cfg->dump_timeout = 1;
8362         ipr_cmd->job_step = ipr_reset_alert;
8363
8364         return IPR_RC_JOB_CONTINUE;
8365 }
8366
8367 /**
8368  * ipr_unit_check_no_data - Log a unit check/no data error log
8369  * @ioa_cfg:            ioa config struct
8370  *
8371  * Logs an error indicating the adapter unit checked, but for some
8372  * reason, we were unable to fetch the unit check buffer.
8373  *
8374  * Return value:
8375  *      nothing
8376  **/
8377 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8378 {
8379         ioa_cfg->errors_logged++;
8380         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8381 }
8382
8383 /**
8384  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8385  * @ioa_cfg:            ioa config struct
8386  *
8387  * Fetches the unit check buffer from the adapter by clocking the data
8388  * through the mailbox register.
8389  *
8390  * Return value:
8391  *      nothing
8392  **/
8393 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8394 {
8395         unsigned long mailbox;
8396         struct ipr_hostrcb *hostrcb;
8397         struct ipr_uc_sdt sdt;
8398         int rc, length;
8399         u32 ioasc;
8400
8401         mailbox = readl(ioa_cfg->ioa_mailbox);
8402
8403         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8404                 ipr_unit_check_no_data(ioa_cfg);
8405                 return;
8406         }
8407
8408         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8409         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8410                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8411
8412         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8413             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8414             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8415                 ipr_unit_check_no_data(ioa_cfg);
8416                 return;
8417         }
8418
8419         /* Find length of the first sdt entry (UC buffer) */
8420         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8421                 length = be32_to_cpu(sdt.entry[0].end_token);
8422         else
8423                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8424                           be32_to_cpu(sdt.entry[0].start_token)) &
8425                           IPR_FMT2_MBX_ADDR_MASK;
8426
8427         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8428                              struct ipr_hostrcb, queue);
8429         list_del_init(&hostrcb->queue);
8430         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8431
8432         rc = ipr_get_ldump_data_section(ioa_cfg,
8433                                         be32_to_cpu(sdt.entry[0].start_token),
8434                                         (__be32 *)&hostrcb->hcam,
8435                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8436
8437         if (!rc) {
8438                 ipr_handle_log_data(ioa_cfg, hostrcb);
8439                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8440                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8441                     ioa_cfg->sdt_state == GET_DUMP)
8442                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8443         } else
8444                 ipr_unit_check_no_data(ioa_cfg);
8445
8446         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8447 }
8448
8449 /**
8450  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8451  * @ipr_cmd:    ipr command struct
8452  *
8453  * Description: This function will call to get the unit check buffer.
8454  *
8455  * Return value:
8456  *      IPR_RC_JOB_RETURN
8457  **/
8458 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8459 {
8460         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8461
8462         ENTER;
8463         ioa_cfg->ioa_unit_checked = 0;
8464         ipr_get_unit_check_buffer(ioa_cfg);
8465         ipr_cmd->job_step = ipr_reset_alert;
8466         ipr_reset_start_timer(ipr_cmd, 0);
8467
8468         LEAVE;
8469         return IPR_RC_JOB_RETURN;
8470 }
8471
8472 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8473 {
8474         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8475
8476         ENTER;
8477
8478         if (ioa_cfg->sdt_state != GET_DUMP)
8479                 return IPR_RC_JOB_RETURN;
8480
8481         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8482             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8483              IPR_PCII_MAILBOX_STABLE)) {
8484
8485                 if (!ipr_cmd->u.time_left)
8486                         dev_err(&ioa_cfg->pdev->dev,
8487                                 "Timed out waiting for Mailbox register.\n");
8488
8489                 ioa_cfg->sdt_state = READ_DUMP;
8490                 ioa_cfg->dump_timeout = 0;
8491                 if (ioa_cfg->sis64)
8492                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8493                 else
8494                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8495                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8496                 schedule_work(&ioa_cfg->work_q);
8497
8498         } else {
8499                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8500                 ipr_reset_start_timer(ipr_cmd,
8501                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8502         }
8503
8504         LEAVE;
8505         return IPR_RC_JOB_RETURN;
8506 }
8507
8508 /**
8509  * ipr_reset_restore_cfg_space - Restore PCI config space.
8510  * @ipr_cmd:    ipr command struct
8511  *
8512  * Description: This function restores the saved PCI config space of
8513  * the adapter, fails all outstanding ops back to the callers, and
8514  * fetches the dump/unit check if applicable to this reset.
8515  *
8516  * Return value:
8517  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8518  **/
8519 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8520 {
8521         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8522         u32 int_reg;
8523
8524         ENTER;
8525         ioa_cfg->pdev->state_saved = true;
8526         pci_restore_state(ioa_cfg->pdev);
8527
8528         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8529                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8530                 return IPR_RC_JOB_CONTINUE;
8531         }
8532
8533         ipr_fail_all_ops(ioa_cfg);
8534
8535         if (ioa_cfg->sis64) {
8536                 /* Set the adapter to the correct endian mode. */
8537                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8538                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8539         }
8540
8541         if (ioa_cfg->ioa_unit_checked) {
8542                 if (ioa_cfg->sis64) {
8543                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8544                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8545                         return IPR_RC_JOB_RETURN;
8546                 } else {
8547                         ioa_cfg->ioa_unit_checked = 0;
8548                         ipr_get_unit_check_buffer(ioa_cfg);
8549                         ipr_cmd->job_step = ipr_reset_alert;
8550                         ipr_reset_start_timer(ipr_cmd, 0);
8551                         return IPR_RC_JOB_RETURN;
8552                 }
8553         }
8554
8555         if (ioa_cfg->in_ioa_bringdown) {
8556                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8557         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8558                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8559                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8560         } else {
8561                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8562         }
8563
8564         LEAVE;
8565         return IPR_RC_JOB_CONTINUE;
8566 }
8567
8568 /**
8569  * ipr_reset_bist_done - BIST has completed on the adapter.
8570  * @ipr_cmd:    ipr command struct
8571  *
8572  * Description: Unblock config space and resume the reset process.
8573  *
8574  * Return value:
8575  *      IPR_RC_JOB_CONTINUE
8576  **/
8577 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8578 {
8579         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8580
8581         ENTER;
8582         if (ioa_cfg->cfg_locked)
8583                 pci_cfg_access_unlock(ioa_cfg->pdev);
8584         ioa_cfg->cfg_locked = 0;
8585         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8586         LEAVE;
8587         return IPR_RC_JOB_CONTINUE;
8588 }
8589
8590 /**
8591  * ipr_reset_start_bist - Run BIST on the adapter.
8592  * @ipr_cmd:    ipr command struct
8593  *
8594  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8595  *
8596  * Return value:
8597  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8598  **/
8599 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8600 {
8601         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8602         int rc = PCIBIOS_SUCCESSFUL;
8603
8604         ENTER;
8605         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8606                 writel(IPR_UPROCI_SIS64_START_BIST,
8607                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8608         else
8609                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8610
8611         if (rc == PCIBIOS_SUCCESSFUL) {
8612                 ipr_cmd->job_step = ipr_reset_bist_done;
8613                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8614                 rc = IPR_RC_JOB_RETURN;
8615         } else {
8616                 if (ioa_cfg->cfg_locked)
8617                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8618                 ioa_cfg->cfg_locked = 0;
8619                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8620                 rc = IPR_RC_JOB_CONTINUE;
8621         }
8622
8623         LEAVE;
8624         return rc;
8625 }
8626
8627 /**
8628  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8629  * @ipr_cmd:    ipr command struct
8630  *
8631  * Description: This clears PCI reset to the adapter and delays two seconds.
8632  *
8633  * Return value:
8634  *      IPR_RC_JOB_RETURN
8635  **/
8636 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8637 {
8638         ENTER;
8639         ipr_cmd->job_step = ipr_reset_bist_done;
8640         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8641         LEAVE;
8642         return IPR_RC_JOB_RETURN;
8643 }
8644
8645 /**
8646  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8647  * @work:       work struct
8648  *
8649  * Description: This pulses warm reset to a slot.
8650  *
8651  **/
8652 static void ipr_reset_reset_work(struct work_struct *work)
8653 {
8654         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8655         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8656         struct pci_dev *pdev = ioa_cfg->pdev;
8657         unsigned long lock_flags = 0;
8658
8659         ENTER;
8660         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8661         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8662         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8663
8664         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8665         if (ioa_cfg->reset_cmd == ipr_cmd)
8666                 ipr_reset_ioa_job(ipr_cmd);
8667         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8668         LEAVE;
8669 }
8670
8671 /**
8672  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8673  * @ipr_cmd:    ipr command struct
8674  *
8675  * Description: This asserts PCI reset to the adapter.
8676  *
8677  * Return value:
8678  *      IPR_RC_JOB_RETURN
8679  **/
8680 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8681 {
8682         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8683
8684         ENTER;
8685         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8686         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8687         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8688         LEAVE;
8689         return IPR_RC_JOB_RETURN;
8690 }
8691
8692 /**
8693  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8694  * @ipr_cmd:    ipr command struct
8695  *
8696  * Description: This attempts to block config access to the IOA.
8697  *
8698  * Return value:
8699  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8700  **/
8701 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8702 {
8703         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8704         int rc = IPR_RC_JOB_CONTINUE;
8705
8706         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8707                 ioa_cfg->cfg_locked = 1;
8708                 ipr_cmd->job_step = ioa_cfg->reset;
8709         } else {
8710                 if (ipr_cmd->u.time_left) {
8711                         rc = IPR_RC_JOB_RETURN;
8712                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8713                         ipr_reset_start_timer(ipr_cmd,
8714                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8715                 } else {
8716                         ipr_cmd->job_step = ioa_cfg->reset;
8717                         dev_err(&ioa_cfg->pdev->dev,
8718                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8719                 }
8720         }
8721
8722         return rc;
8723 }
8724
8725 /**
8726  * ipr_reset_block_config_access - Block config access to the IOA
8727  * @ipr_cmd:    ipr command struct
8728  *
8729  * Description: This attempts to block config access to the IOA
8730  *
8731  * Return value:
8732  *      IPR_RC_JOB_CONTINUE
8733  **/
8734 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8735 {
8736         ipr_cmd->ioa_cfg->cfg_locked = 0;
8737         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8738         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8739         return IPR_RC_JOB_CONTINUE;
8740 }
8741
8742 /**
8743  * ipr_reset_allowed - Query whether or not IOA can be reset
8744  * @ioa_cfg:    ioa config struct
8745  *
8746  * Return value:
8747  *      0 if reset not allowed / non-zero if reset is allowed
8748  **/
8749 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8750 {
8751         volatile u32 temp_reg;
8752
8753         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8754         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8755 }
8756
8757 /**
8758  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8759  * @ipr_cmd:    ipr command struct
8760  *
8761  * Description: This function waits for adapter permission to run BIST,
8762  * then runs BIST. If the adapter does not give permission after a
8763  * reasonable time, we will reset the adapter anyway. The impact of
8764  * resetting the adapter without warning the adapter is the risk of
8765  * losing the persistent error log on the adapter. If the adapter is
8766  * reset while it is writing to the flash on the adapter, the flash
8767  * segment will have bad ECC and be zeroed.
8768  *
8769  * Return value:
8770  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8771  **/
8772 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8773 {
8774         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8775         int rc = IPR_RC_JOB_RETURN;
8776
8777         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8778                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8779                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8780         } else {
8781                 ipr_cmd->job_step = ipr_reset_block_config_access;
8782                 rc = IPR_RC_JOB_CONTINUE;
8783         }
8784
8785         return rc;
8786 }
8787
8788 /**
8789  * ipr_reset_alert - Alert the adapter of a pending reset
8790  * @ipr_cmd:    ipr command struct
8791  *
8792  * Description: This function alerts the adapter that it will be reset.
8793  * If memory space is not currently enabled, proceed directly
8794  * to running BIST on the adapter. The timer must always be started
8795  * so we guarantee we do not run BIST from ipr_isr.
8796  *
8797  * Return value:
8798  *      IPR_RC_JOB_RETURN
8799  **/
8800 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8801 {
8802         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8803         u16 cmd_reg;
8804         int rc;
8805
8806         ENTER;
8807         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8808
8809         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8810                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8811                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8812                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8813         } else {
8814                 ipr_cmd->job_step = ipr_reset_block_config_access;
8815         }
8816
8817         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8818         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8819
8820         LEAVE;
8821         return IPR_RC_JOB_RETURN;
8822 }
8823
8824 /**
8825  * ipr_reset_quiesce_done - Complete IOA disconnect
8826  * @ipr_cmd:    ipr command struct
8827  *
8828  * Description: Freeze the adapter to complete quiesce processing
8829  *
8830  * Return value:
8831  *      IPR_RC_JOB_CONTINUE
8832  **/
8833 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8834 {
8835         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8836
8837         ENTER;
8838         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8839         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8840         LEAVE;
8841         return IPR_RC_JOB_CONTINUE;
8842 }
8843
8844 /**
8845  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8846  * @ipr_cmd:    ipr command struct
8847  *
8848  * Description: Ensure nothing is outstanding to the IOA and
8849  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8850  *
8851  * Return value:
8852  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8853  **/
8854 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8855 {
8856         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8857         struct ipr_cmnd *loop_cmd;
8858         struct ipr_hrr_queue *hrrq;
8859         int rc = IPR_RC_JOB_CONTINUE;
8860         int count = 0;
8861
8862         ENTER;
8863         ipr_cmd->job_step = ipr_reset_quiesce_done;
8864
8865         for_each_hrrq(hrrq, ioa_cfg) {
8866                 spin_lock(&hrrq->_lock);
8867                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8868                         count++;
8869                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8870                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8871                         rc = IPR_RC_JOB_RETURN;
8872                         break;
8873                 }
8874                 spin_unlock(&hrrq->_lock);
8875
8876                 if (count)
8877                         break;
8878         }
8879
8880         LEAVE;
8881         return rc;
8882 }
8883
8884 /**
8885  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8886  * @ipr_cmd:    ipr command struct
8887  *
8888  * Description: Cancel any oustanding HCAMs to the IOA.
8889  *
8890  * Return value:
8891  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8892  **/
8893 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8894 {
8895         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8896         int rc = IPR_RC_JOB_CONTINUE;
8897         struct ipr_cmd_pkt *cmd_pkt;
8898         struct ipr_cmnd *hcam_cmd;
8899         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8900
8901         ENTER;
8902         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8903
8904         if (!hrrq->ioa_is_dead) {
8905                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8906                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8907                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8908                                         continue;
8909
8910                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8911                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8912                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8913                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8914                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8915                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8916                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8917                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8918                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8919                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8920                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8921                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8922                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8923                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8924
8925                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8926                                            IPR_CANCEL_TIMEOUT);
8927
8928                                 rc = IPR_RC_JOB_RETURN;
8929                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8930                                 break;
8931                         }
8932                 }
8933         } else
8934                 ipr_cmd->job_step = ipr_reset_alert;
8935
8936         LEAVE;
8937         return rc;
8938 }
8939
8940 /**
8941  * ipr_reset_ucode_download_done - Microcode download completion
8942  * @ipr_cmd:    ipr command struct
8943  *
8944  * Description: This function unmaps the microcode download buffer.
8945  *
8946  * Return value:
8947  *      IPR_RC_JOB_CONTINUE
8948  **/
8949 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8950 {
8951         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8952         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8953
8954         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8955                      sglist->num_sg, DMA_TO_DEVICE);
8956
8957         ipr_cmd->job_step = ipr_reset_alert;
8958         return IPR_RC_JOB_CONTINUE;
8959 }
8960
8961 /**
8962  * ipr_reset_ucode_download - Download microcode to the adapter
8963  * @ipr_cmd:    ipr command struct
8964  *
8965  * Description: This function checks to see if it there is microcode
8966  * to download to the adapter. If there is, a download is performed.
8967  *
8968  * Return value:
8969  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8970  **/
8971 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8972 {
8973         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8974         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8975
8976         ENTER;
8977         ipr_cmd->job_step = ipr_reset_alert;
8978
8979         if (!sglist)
8980                 return IPR_RC_JOB_CONTINUE;
8981
8982         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8983         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8984         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8985         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8986         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8987         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8988         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8989
8990         if (ioa_cfg->sis64)
8991                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8992         else
8993                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8994         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8995
8996         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8997                    IPR_WRITE_BUFFER_TIMEOUT);
8998
8999         LEAVE;
9000         return IPR_RC_JOB_RETURN;
9001 }
9002
9003 /**
9004  * ipr_reset_shutdown_ioa - Shutdown the adapter
9005  * @ipr_cmd:    ipr command struct
9006  *
9007  * Description: This function issues an adapter shutdown of the
9008  * specified type to the specified adapter as part of the
9009  * adapter reset job.
9010  *
9011  * Return value:
9012  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9013  **/
9014 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9015 {
9016         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9017         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9018         unsigned long timeout;
9019         int rc = IPR_RC_JOB_CONTINUE;
9020
9021         ENTER;
9022         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9023                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9024         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9025                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9026                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9027                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9028                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9029                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9030
9031                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9032                         timeout = IPR_SHUTDOWN_TIMEOUT;
9033                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9034                         timeout = IPR_INTERNAL_TIMEOUT;
9035                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9036                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9037                 else
9038                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9039
9040                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9041
9042                 rc = IPR_RC_JOB_RETURN;
9043                 ipr_cmd->job_step = ipr_reset_ucode_download;
9044         } else
9045                 ipr_cmd->job_step = ipr_reset_alert;
9046
9047         LEAVE;
9048         return rc;
9049 }
9050
9051 /**
9052  * ipr_reset_ioa_job - Adapter reset job
9053  * @ipr_cmd:    ipr command struct
9054  *
9055  * Description: This function is the job router for the adapter reset job.
9056  *
9057  * Return value:
9058  *      none
9059  **/
9060 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9061 {
9062         u32 rc, ioasc;
9063         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9064
9065         do {
9066                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9067
9068                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9069                         /*
9070                          * We are doing nested adapter resets and this is
9071                          * not the current reset job.
9072                          */
9073                         list_add_tail(&ipr_cmd->queue,
9074                                         &ipr_cmd->hrrq->hrrq_free_q);
9075                         return;
9076                 }
9077
9078                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9079                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9080                         if (rc == IPR_RC_JOB_RETURN)
9081                                 return;
9082                 }
9083
9084                 ipr_reinit_ipr_cmnd(ipr_cmd);
9085                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9086                 rc = ipr_cmd->job_step(ipr_cmd);
9087         } while (rc == IPR_RC_JOB_CONTINUE);
9088 }
9089
9090 /**
9091  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9092  * @ioa_cfg:            ioa config struct
9093  * @job_step:           first job step of reset job
9094  * @shutdown_type:      shutdown type
9095  *
9096  * Description: This function will initiate the reset of the given adapter
9097  * starting at the selected job step.
9098  * If the caller needs to wait on the completion of the reset,
9099  * the caller must sleep on the reset_wait_q.
9100  *
9101  * Return value:
9102  *      none
9103  **/
9104 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9105                                     int (*job_step) (struct ipr_cmnd *),
9106                                     enum ipr_shutdown_type shutdown_type)
9107 {
9108         struct ipr_cmnd *ipr_cmd;
9109         int i;
9110
9111         ioa_cfg->in_reset_reload = 1;
9112         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9113                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9114                 ioa_cfg->hrrq[i].allow_cmds = 0;
9115                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9116         }
9117         wmb();
9118         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
9119                 scsi_block_requests(ioa_cfg->host);
9120
9121         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9122         ioa_cfg->reset_cmd = ipr_cmd;
9123         ipr_cmd->job_step = job_step;
9124         ipr_cmd->u.shutdown_type = shutdown_type;
9125
9126         ipr_reset_ioa_job(ipr_cmd);
9127 }
9128
9129 /**
9130  * ipr_initiate_ioa_reset - Initiate an adapter reset
9131  * @ioa_cfg:            ioa config struct
9132  * @shutdown_type:      shutdown type
9133  *
9134  * Description: This function will initiate the reset of the given adapter.
9135  * If the caller needs to wait on the completion of the reset,
9136  * the caller must sleep on the reset_wait_q.
9137  *
9138  * Return value:
9139  *      none
9140  **/
9141 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9142                                    enum ipr_shutdown_type shutdown_type)
9143 {
9144         int i;
9145
9146         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9147                 return;
9148
9149         if (ioa_cfg->in_reset_reload) {
9150                 if (ioa_cfg->sdt_state == GET_DUMP)
9151                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9152                 else if (ioa_cfg->sdt_state == READ_DUMP)
9153                         ioa_cfg->sdt_state = ABORT_DUMP;
9154         }
9155
9156         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9157                 dev_err(&ioa_cfg->pdev->dev,
9158                         "IOA taken offline - error recovery failed\n");
9159
9160                 ioa_cfg->reset_retries = 0;
9161                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9162                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9163                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9164                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9165                 }
9166                 wmb();
9167
9168                 if (ioa_cfg->in_ioa_bringdown) {
9169                         ioa_cfg->reset_cmd = NULL;
9170                         ioa_cfg->in_reset_reload = 0;
9171                         ipr_fail_all_ops(ioa_cfg);
9172                         wake_up_all(&ioa_cfg->reset_wait_q);
9173
9174                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9175                                 spin_unlock_irq(ioa_cfg->host->host_lock);
9176                                 scsi_unblock_requests(ioa_cfg->host);
9177                                 spin_lock_irq(ioa_cfg->host->host_lock);
9178                         }
9179                         return;
9180                 } else {
9181                         ioa_cfg->in_ioa_bringdown = 1;
9182                         shutdown_type = IPR_SHUTDOWN_NONE;
9183                 }
9184         }
9185
9186         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9187                                 shutdown_type);
9188 }
9189
9190 /**
9191  * ipr_reset_freeze - Hold off all I/O activity
9192  * @ipr_cmd:    ipr command struct
9193  *
9194  * Description: If the PCI slot is frozen, hold off all I/O
9195  * activity; then, as soon as the slot is available again,
9196  * initiate an adapter reset.
9197  */
9198 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9199 {
9200         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9201         int i;
9202
9203         /* Disallow new interrupts, avoid loop */
9204         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9205                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9206                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9207                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9208         }
9209         wmb();
9210         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9211         ipr_cmd->done = ipr_reset_ioa_job;
9212         return IPR_RC_JOB_RETURN;
9213 }
9214
9215 /**
9216  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9217  * @pdev:       PCI device struct
9218  *
9219  * Description: This routine is called to tell us that the MMIO
9220  * access to the IOA has been restored
9221  */
9222 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9223 {
9224         unsigned long flags = 0;
9225         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9226
9227         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9228         if (!ioa_cfg->probe_done)
9229                 pci_save_state(pdev);
9230         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9231         return PCI_ERS_RESULT_NEED_RESET;
9232 }
9233
9234 /**
9235  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9236  * @pdev:       PCI device struct
9237  *
9238  * Description: This routine is called to tell us that the PCI bus
9239  * is down. Can't do anything here, except put the device driver
9240  * into a holding pattern, waiting for the PCI bus to come back.
9241  */
9242 static void ipr_pci_frozen(struct pci_dev *pdev)
9243 {
9244         unsigned long flags = 0;
9245         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9246
9247         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9248         if (ioa_cfg->probe_done)
9249                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9250         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9251 }
9252
9253 /**
9254  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9255  * @pdev:       PCI device struct
9256  *
9257  * Description: This routine is called by the pci error recovery
9258  * code after the PCI slot has been reset, just before we
9259  * should resume normal operations.
9260  */
9261 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9262 {
9263         unsigned long flags = 0;
9264         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9265
9266         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9267         if (ioa_cfg->probe_done) {
9268                 if (ioa_cfg->needs_warm_reset)
9269                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9270                 else
9271                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9272                                                 IPR_SHUTDOWN_NONE);
9273         } else
9274                 wake_up_all(&ioa_cfg->eeh_wait_q);
9275         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9276         return PCI_ERS_RESULT_RECOVERED;
9277 }
9278
9279 /**
9280  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9281  * @pdev:       PCI device struct
9282  *
9283  * Description: This routine is called when the PCI bus has
9284  * permanently failed.
9285  */
9286 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9287 {
9288         unsigned long flags = 0;
9289         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9290         int i;
9291
9292         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9293         if (ioa_cfg->probe_done) {
9294                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9295                         ioa_cfg->sdt_state = ABORT_DUMP;
9296                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9297                 ioa_cfg->in_ioa_bringdown = 1;
9298                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9299                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9300                         ioa_cfg->hrrq[i].allow_cmds = 0;
9301                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9302                 }
9303                 wmb();
9304                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9305         } else
9306                 wake_up_all(&ioa_cfg->eeh_wait_q);
9307         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9308 }
9309
9310 /**
9311  * ipr_pci_error_detected - Called when a PCI error is detected.
9312  * @pdev:       PCI device struct
9313  * @state:      PCI channel state
9314  *
9315  * Description: Called when a PCI error is detected.
9316  *
9317  * Return value:
9318  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9319  */
9320 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9321                                                pci_channel_state_t state)
9322 {
9323         switch (state) {
9324         case pci_channel_io_frozen:
9325                 ipr_pci_frozen(pdev);
9326                 return PCI_ERS_RESULT_CAN_RECOVER;
9327         case pci_channel_io_perm_failure:
9328                 ipr_pci_perm_failure(pdev);
9329                 return PCI_ERS_RESULT_DISCONNECT;
9330                 break;
9331         default:
9332                 break;
9333         }
9334         return PCI_ERS_RESULT_NEED_RESET;
9335 }
9336
9337 /**
9338  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9339  * @ioa_cfg:    ioa cfg struct
9340  *
9341  * Description: This is the second phase of adapter intialization
9342  * This function takes care of initilizing the adapter to the point
9343  * where it can accept new commands.
9344
9345  * Return value:
9346  *      0 on success / -EIO on failure
9347  **/
9348 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9349 {
9350         int rc = 0;
9351         unsigned long host_lock_flags = 0;
9352
9353         ENTER;
9354         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9355         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9356         ioa_cfg->probe_done = 1;
9357         if (ioa_cfg->needs_hard_reset) {
9358                 ioa_cfg->needs_hard_reset = 0;
9359                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9360         } else
9361                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9362                                         IPR_SHUTDOWN_NONE);
9363         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9364
9365         LEAVE;
9366         return rc;
9367 }
9368
9369 /**
9370  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9371  * @ioa_cfg:    ioa config struct
9372  *
9373  * Return value:
9374  *      none
9375  **/
9376 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9377 {
9378         int i;
9379
9380         if (ioa_cfg->ipr_cmnd_list) {
9381                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9382                         if (ioa_cfg->ipr_cmnd_list[i])
9383                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9384                                               ioa_cfg->ipr_cmnd_list[i],
9385                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9386
9387                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9388                 }
9389         }
9390
9391         if (ioa_cfg->ipr_cmd_pool)
9392                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9393
9394         kfree(ioa_cfg->ipr_cmnd_list);
9395         kfree(ioa_cfg->ipr_cmnd_list_dma);
9396         ioa_cfg->ipr_cmnd_list = NULL;
9397         ioa_cfg->ipr_cmnd_list_dma = NULL;
9398         ioa_cfg->ipr_cmd_pool = NULL;
9399 }
9400
9401 /**
9402  * ipr_free_mem - Frees memory allocated for an adapter
9403  * @ioa_cfg:    ioa cfg struct
9404  *
9405  * Return value:
9406  *      nothing
9407  **/
9408 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9409 {
9410         int i;
9411
9412         kfree(ioa_cfg->res_entries);
9413         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9414                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9415         ipr_free_cmd_blks(ioa_cfg);
9416
9417         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9418                 dma_free_coherent(&ioa_cfg->pdev->dev,
9419                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9420                                   ioa_cfg->hrrq[i].host_rrq,
9421                                   ioa_cfg->hrrq[i].host_rrq_dma);
9422
9423         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9424                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9425
9426         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9427                 dma_free_coherent(&ioa_cfg->pdev->dev,
9428                                   sizeof(struct ipr_hostrcb),
9429                                   ioa_cfg->hostrcb[i],
9430                                   ioa_cfg->hostrcb_dma[i]);
9431         }
9432
9433         ipr_free_dump(ioa_cfg);
9434         kfree(ioa_cfg->trace);
9435 }
9436
9437 /**
9438  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9439  * @ioa_cfg:    ipr cfg struct
9440  *
9441  * This function frees all allocated IRQs for the
9442  * specified adapter.
9443  *
9444  * Return value:
9445  *      none
9446  **/
9447 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9448 {
9449         struct pci_dev *pdev = ioa_cfg->pdev;
9450
9451         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9452             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9453                 int i;
9454                 for (i = 0; i < ioa_cfg->nvectors; i++)
9455                         free_irq(ioa_cfg->vectors_info[i].vec,
9456                                  &ioa_cfg->hrrq[i]);
9457         } else
9458                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9459
9460         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9461                 pci_disable_msi(pdev);
9462                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9463         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9464                 pci_disable_msix(pdev);
9465                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9466         }
9467 }
9468
9469 /**
9470  * ipr_free_all_resources - Free all allocated resources for an adapter.
9471  * @ipr_cmd:    ipr command struct
9472  *
9473  * This function frees all allocated resources for the
9474  * specified adapter.
9475  *
9476  * Return value:
9477  *      none
9478  **/
9479 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9480 {
9481         struct pci_dev *pdev = ioa_cfg->pdev;
9482
9483         ENTER;
9484         ipr_free_irqs(ioa_cfg);
9485         if (ioa_cfg->reset_work_q)
9486                 destroy_workqueue(ioa_cfg->reset_work_q);
9487         iounmap(ioa_cfg->hdw_dma_regs);
9488         pci_release_regions(pdev);
9489         ipr_free_mem(ioa_cfg);
9490         scsi_host_put(ioa_cfg->host);
9491         pci_disable_device(pdev);
9492         LEAVE;
9493 }
9494
9495 /**
9496  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9497  * @ioa_cfg:    ioa config struct
9498  *
9499  * Return value:
9500  *      0 on success / -ENOMEM on allocation failure
9501  **/
9502 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9503 {
9504         struct ipr_cmnd *ipr_cmd;
9505         struct ipr_ioarcb *ioarcb;
9506         dma_addr_t dma_addr;
9507         int i, entries_each_hrrq, hrrq_id = 0;
9508
9509         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9510                                                 sizeof(struct ipr_cmnd), 512, 0);
9511
9512         if (!ioa_cfg->ipr_cmd_pool)
9513                 return -ENOMEM;
9514
9515         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9516         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9517
9518         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9519                 ipr_free_cmd_blks(ioa_cfg);
9520                 return -ENOMEM;
9521         }
9522
9523         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9524                 if (ioa_cfg->hrrq_num > 1) {
9525                         if (i == 0) {
9526                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9527                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9528                                         ioa_cfg->hrrq[i].max_cmd_id =
9529                                                 (entries_each_hrrq - 1);
9530                         } else {
9531                                 entries_each_hrrq =
9532                                         IPR_NUM_BASE_CMD_BLKS/
9533                                         (ioa_cfg->hrrq_num - 1);
9534                                 ioa_cfg->hrrq[i].min_cmd_id =
9535                                         IPR_NUM_INTERNAL_CMD_BLKS +
9536                                         (i - 1) * entries_each_hrrq;
9537                                 ioa_cfg->hrrq[i].max_cmd_id =
9538                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9539                                         i * entries_each_hrrq - 1);
9540                         }
9541                 } else {
9542                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9543                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9544                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9545                 }
9546                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9547         }
9548
9549         BUG_ON(ioa_cfg->hrrq_num == 0);
9550
9551         i = IPR_NUM_CMD_BLKS -
9552                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9553         if (i > 0) {
9554                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9555                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9556         }
9557
9558         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9559                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9560
9561                 if (!ipr_cmd) {
9562                         ipr_free_cmd_blks(ioa_cfg);
9563                         return -ENOMEM;
9564                 }
9565
9566                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9567                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9568                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9569
9570                 ioarcb = &ipr_cmd->ioarcb;
9571                 ipr_cmd->dma_addr = dma_addr;
9572                 if (ioa_cfg->sis64)
9573                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9574                 else
9575                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9576
9577                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9578                 if (ioa_cfg->sis64) {
9579                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9580                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9581                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9582                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9583                 } else {
9584                         ioarcb->write_ioadl_addr =
9585                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9586                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9587                         ioarcb->ioasa_host_pci_addr =
9588                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9589                 }
9590                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9591                 ipr_cmd->cmd_index = i;
9592                 ipr_cmd->ioa_cfg = ioa_cfg;
9593                 ipr_cmd->sense_buffer_dma = dma_addr +
9594                         offsetof(struct ipr_cmnd, sense_buffer);
9595
9596                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9597                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9598                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9599                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9600                         hrrq_id++;
9601         }
9602
9603         return 0;
9604 }
9605
9606 /**
9607  * ipr_alloc_mem - Allocate memory for an adapter
9608  * @ioa_cfg:    ioa config struct
9609  *
9610  * Return value:
9611  *      0 on success / non-zero for error
9612  **/
9613 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9614 {
9615         struct pci_dev *pdev = ioa_cfg->pdev;
9616         int i, rc = -ENOMEM;
9617
9618         ENTER;
9619         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9620                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9621
9622         if (!ioa_cfg->res_entries)
9623                 goto out;
9624
9625         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9626                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9627                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9628         }
9629
9630         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9631                                               sizeof(struct ipr_misc_cbs),
9632                                               &ioa_cfg->vpd_cbs_dma,
9633                                               GFP_KERNEL);
9634
9635         if (!ioa_cfg->vpd_cbs)
9636                 goto out_free_res_entries;
9637
9638         if (ipr_alloc_cmd_blks(ioa_cfg))
9639                 goto out_free_vpd_cbs;
9640
9641         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9642                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9643                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9644                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9645                                         GFP_KERNEL);
9646
9647                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9648                         while (--i > 0)
9649                                 dma_free_coherent(&pdev->dev,
9650                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9651                                         ioa_cfg->hrrq[i].host_rrq,
9652                                         ioa_cfg->hrrq[i].host_rrq_dma);
9653                         goto out_ipr_free_cmd_blocks;
9654                 }
9655                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9656         }
9657
9658         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9659                                                   ioa_cfg->cfg_table_size,
9660                                                   &ioa_cfg->cfg_table_dma,
9661                                                   GFP_KERNEL);
9662
9663         if (!ioa_cfg->u.cfg_table)
9664                 goto out_free_host_rrq;
9665
9666         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9667                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9668                                                          sizeof(struct ipr_hostrcb),
9669                                                          &ioa_cfg->hostrcb_dma[i],
9670                                                          GFP_KERNEL);
9671
9672                 if (!ioa_cfg->hostrcb[i])
9673                         goto out_free_hostrcb_dma;
9674
9675                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9676                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9677                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9678                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9679         }
9680
9681         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9682                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9683
9684         if (!ioa_cfg->trace)
9685                 goto out_free_hostrcb_dma;
9686
9687         rc = 0;
9688 out:
9689         LEAVE;
9690         return rc;
9691
9692 out_free_hostrcb_dma:
9693         while (i-- > 0) {
9694                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9695                                   ioa_cfg->hostrcb[i],
9696                                   ioa_cfg->hostrcb_dma[i]);
9697         }
9698         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9699                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9700 out_free_host_rrq:
9701         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9702                 dma_free_coherent(&pdev->dev,
9703                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9704                                   ioa_cfg->hrrq[i].host_rrq,
9705                                   ioa_cfg->hrrq[i].host_rrq_dma);
9706         }
9707 out_ipr_free_cmd_blocks:
9708         ipr_free_cmd_blks(ioa_cfg);
9709 out_free_vpd_cbs:
9710         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9711                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9712 out_free_res_entries:
9713         kfree(ioa_cfg->res_entries);
9714         goto out;
9715 }
9716
9717 /**
9718  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9719  * @ioa_cfg:    ioa config struct
9720  *
9721  * Return value:
9722  *      none
9723  **/
9724 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9725 {
9726         int i;
9727
9728         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9729                 ioa_cfg->bus_attr[i].bus = i;
9730                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9731                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9732                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9733                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9734                 else
9735                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9736         }
9737 }
9738
9739 /**
9740  * ipr_init_regs - Initialize IOA registers
9741  * @ioa_cfg:    ioa config struct
9742  *
9743  * Return value:
9744  *      none
9745  **/
9746 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9747 {
9748         const struct ipr_interrupt_offsets *p;
9749         struct ipr_interrupts *t;
9750         void __iomem *base;
9751
9752         p = &ioa_cfg->chip_cfg->regs;
9753         t = &ioa_cfg->regs;
9754         base = ioa_cfg->hdw_dma_regs;
9755
9756         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9757         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9758         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9759         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9760         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9761         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9762         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9763         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9764         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9765         t->ioarrin_reg = base + p->ioarrin_reg;
9766         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9767         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9768         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9769         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9770         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9771         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9772
9773         if (ioa_cfg->sis64) {
9774                 t->init_feedback_reg = base + p->init_feedback_reg;
9775                 t->dump_addr_reg = base + p->dump_addr_reg;
9776                 t->dump_data_reg = base + p->dump_data_reg;
9777                 t->endian_swap_reg = base + p->endian_swap_reg;
9778         }
9779 }
9780
9781 /**
9782  * ipr_init_ioa_cfg - Initialize IOA config struct
9783  * @ioa_cfg:    ioa config struct
9784  * @host:               scsi host struct
9785  * @pdev:               PCI dev struct
9786  *
9787  * Return value:
9788  *      none
9789  **/
9790 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9791                              struct Scsi_Host *host, struct pci_dev *pdev)
9792 {
9793         int i;
9794
9795         ioa_cfg->host = host;
9796         ioa_cfg->pdev = pdev;
9797         ioa_cfg->log_level = ipr_log_level;
9798         ioa_cfg->doorbell = IPR_DOORBELL;
9799         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9800         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9801         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9802         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9803         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9804         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9805
9806         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9807         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9808         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9809         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9810         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9811         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9812         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9813         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9814         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9815         ioa_cfg->sdt_state = INACTIVE;
9816
9817         ipr_initialize_bus_attr(ioa_cfg);
9818         ioa_cfg->max_devs_supported = ipr_max_devs;
9819
9820         if (ioa_cfg->sis64) {
9821                 host->max_channel = IPR_MAX_SIS64_BUSES;
9822                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9823                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9824                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9825                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9826                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9827                                            + ((sizeof(struct ipr_config_table_entry64)
9828                                                * ioa_cfg->max_devs_supported)));
9829         } else {
9830                 host->max_channel = IPR_VSET_BUS;
9831                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9832                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9833                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9834                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9835                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9836                                            + ((sizeof(struct ipr_config_table_entry)
9837                                                * ioa_cfg->max_devs_supported)));
9838         }
9839
9840         host->unique_id = host->host_no;
9841         host->max_cmd_len = IPR_MAX_CDB_LEN;
9842         host->can_queue = ioa_cfg->max_cmds;
9843         pci_set_drvdata(pdev, ioa_cfg);
9844
9845         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9846                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9847                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9848                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9849                 if (i == 0)
9850                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9851                 else
9852                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9853         }
9854 }
9855
9856 /**
9857  * ipr_get_chip_info - Find adapter chip information
9858  * @dev_id:             PCI device id struct
9859  *
9860  * Return value:
9861  *      ptr to chip information on success / NULL on failure
9862  **/
9863 static const struct ipr_chip_t *
9864 ipr_get_chip_info(const struct pci_device_id *dev_id)
9865 {
9866         int i;
9867
9868         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9869                 if (ipr_chip[i].vendor == dev_id->vendor &&
9870                     ipr_chip[i].device == dev_id->device)
9871                         return &ipr_chip[i];
9872         return NULL;
9873 }
9874
9875 /**
9876  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9877  *                                              during probe time
9878  * @ioa_cfg:    ioa config struct
9879  *
9880  * Return value:
9881  *      None
9882  **/
9883 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9884 {
9885         struct pci_dev *pdev = ioa_cfg->pdev;
9886
9887         if (pci_channel_offline(pdev)) {
9888                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9889                                    !pci_channel_offline(pdev),
9890                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9891                 pci_restore_state(pdev);
9892         }
9893 }
9894
9895 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9896 {
9897         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9898         int i, vectors;
9899
9900         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9901                 entries[i].entry = i;
9902
9903         vectors = pci_enable_msix_range(ioa_cfg->pdev,
9904                                         entries, 1, ipr_number_of_msix);
9905         if (vectors < 0) {
9906                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9907                 return vectors;
9908         }
9909
9910         for (i = 0; i < vectors; i++)
9911                 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9912         ioa_cfg->nvectors = vectors;
9913
9914         return 0;
9915 }
9916
9917 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9918 {
9919         int i, vectors;
9920
9921         vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9922         if (vectors < 0) {
9923                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9924                 return vectors;
9925         }
9926
9927         for (i = 0; i < vectors; i++)
9928                 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9929         ioa_cfg->nvectors = vectors;
9930
9931         return 0;
9932 }
9933
9934 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9935 {
9936         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9937
9938         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9939                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9940                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9941                 ioa_cfg->vectors_info[vec_idx].
9942                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9943         }
9944 }
9945
9946 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9947 {
9948         int i, rc;
9949
9950         for (i = 1; i < ioa_cfg->nvectors; i++) {
9951                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9952                         ipr_isr_mhrrq,
9953                         0,
9954                         ioa_cfg->vectors_info[i].desc,
9955                         &ioa_cfg->hrrq[i]);
9956                 if (rc) {
9957                         while (--i >= 0)
9958                                 free_irq(ioa_cfg->vectors_info[i].vec,
9959                                         &ioa_cfg->hrrq[i]);
9960                         return rc;
9961                 }
9962         }
9963         return 0;
9964 }
9965
9966 /**
9967  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9968  * @pdev:               PCI device struct
9969  *
9970  * Description: Simply set the msi_received flag to 1 indicating that
9971  * Message Signaled Interrupts are supported.
9972  *
9973  * Return value:
9974  *      0 on success / non-zero on failure
9975  **/
9976 static irqreturn_t ipr_test_intr(int irq, void *devp)
9977 {
9978         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9979         unsigned long lock_flags = 0;
9980         irqreturn_t rc = IRQ_HANDLED;
9981
9982         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9983         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9984
9985         ioa_cfg->msi_received = 1;
9986         wake_up(&ioa_cfg->msi_wait_q);
9987
9988         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9989         return rc;
9990 }
9991
9992 /**
9993  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9994  * @pdev:               PCI device struct
9995  *
9996  * Description: The return value from pci_enable_msi_range() can not always be
9997  * trusted.  This routine sets up and initiates a test interrupt to determine
9998  * if the interrupt is received via the ipr_test_intr() service routine.
9999  * If the tests fails, the driver will fall back to LSI.
10000  *
10001  * Return value:
10002  *      0 on success / non-zero on failure
10003  **/
10004 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10005 {
10006         int rc;
10007         volatile u32 int_reg;
10008         unsigned long lock_flags = 0;
10009
10010         ENTER;
10011
10012         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10013         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10014         ioa_cfg->msi_received = 0;
10015         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10016         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10017         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10018         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10019
10020         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10021                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10022         else
10023                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10024         if (rc) {
10025                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
10026                 return rc;
10027         } else if (ipr_debug)
10028                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
10029
10030         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10031         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10032         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10033         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10034         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10035
10036         if (!ioa_cfg->msi_received) {
10037                 /* MSI test failed */
10038                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10039                 rc = -EOPNOTSUPP;
10040         } else if (ipr_debug)
10041                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10042
10043         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10044
10045         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10046                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
10047         else
10048                 free_irq(pdev->irq, ioa_cfg);
10049
10050         LEAVE;
10051
10052         return rc;
10053 }
10054
10055  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10056  * @pdev:               PCI device struct
10057  * @dev_id:             PCI device id struct
10058  *
10059  * Return value:
10060  *      0 on success / non-zero on failure
10061  **/
10062 static int ipr_probe_ioa(struct pci_dev *pdev,
10063                          const struct pci_device_id *dev_id)
10064 {
10065         struct ipr_ioa_cfg *ioa_cfg;
10066         struct Scsi_Host *host;
10067         unsigned long ipr_regs_pci;
10068         void __iomem *ipr_regs;
10069         int rc = PCIBIOS_SUCCESSFUL;
10070         volatile u32 mask, uproc, interrupts;
10071         unsigned long lock_flags, driver_lock_flags;
10072
10073         ENTER;
10074
10075         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10076         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10077
10078         if (!host) {
10079                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10080                 rc = -ENOMEM;
10081                 goto out;
10082         }
10083
10084         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10085         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10086         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10087
10088         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10089
10090         if (!ioa_cfg->ipr_chip) {
10091                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10092                         dev_id->vendor, dev_id->device);
10093                 goto out_scsi_host_put;
10094         }
10095
10096         /* set SIS 32 or SIS 64 */
10097         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10098         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10099         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10100         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10101
10102         if (ipr_transop_timeout)
10103                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10104         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10105                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10106         else
10107                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10108
10109         ioa_cfg->revid = pdev->revision;
10110
10111         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10112
10113         ipr_regs_pci = pci_resource_start(pdev, 0);
10114
10115         rc = pci_request_regions(pdev, IPR_NAME);
10116         if (rc < 0) {
10117                 dev_err(&pdev->dev,
10118                         "Couldn't register memory range of registers\n");
10119                 goto out_scsi_host_put;
10120         }
10121
10122         rc = pci_enable_device(pdev);
10123
10124         if (rc || pci_channel_offline(pdev)) {
10125                 if (pci_channel_offline(pdev)) {
10126                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10127                         rc = pci_enable_device(pdev);
10128                 }
10129
10130                 if (rc) {
10131                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10132                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10133                         goto out_release_regions;
10134                 }
10135         }
10136
10137         ipr_regs = pci_ioremap_bar(pdev, 0);
10138
10139         if (!ipr_regs) {
10140                 dev_err(&pdev->dev,
10141                         "Couldn't map memory range of registers\n");
10142                 rc = -ENOMEM;
10143                 goto out_disable;
10144         }
10145
10146         ioa_cfg->hdw_dma_regs = ipr_regs;
10147         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10148         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10149
10150         ipr_init_regs(ioa_cfg);
10151
10152         if (ioa_cfg->sis64) {
10153                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10154                 if (rc < 0) {
10155                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10156                         rc = dma_set_mask_and_coherent(&pdev->dev,
10157                                                        DMA_BIT_MASK(32));
10158                 }
10159         } else
10160                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10161
10162         if (rc < 0) {
10163                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10164                 goto cleanup_nomem;
10165         }
10166
10167         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10168                                    ioa_cfg->chip_cfg->cache_line_size);
10169
10170         if (rc != PCIBIOS_SUCCESSFUL) {
10171                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10172                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10173                 rc = -EIO;
10174                 goto cleanup_nomem;
10175         }
10176
10177         /* Issue MMIO read to ensure card is not in EEH */
10178         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10179         ipr_wait_for_pci_err_recovery(ioa_cfg);
10180
10181         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10182                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10183                         IPR_MAX_MSIX_VECTORS);
10184                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10185         }
10186
10187         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10188                         ipr_enable_msix(ioa_cfg) == 0)
10189                 ioa_cfg->intr_flag = IPR_USE_MSIX;
10190         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10191                         ipr_enable_msi(ioa_cfg) == 0)
10192                 ioa_cfg->intr_flag = IPR_USE_MSI;
10193         else {
10194                 ioa_cfg->intr_flag = IPR_USE_LSI;
10195                 ioa_cfg->clear_isr = 1;
10196                 ioa_cfg->nvectors = 1;
10197                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
10198         }
10199
10200         pci_set_master(pdev);
10201
10202         if (pci_channel_offline(pdev)) {
10203                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10204                 pci_set_master(pdev);
10205                 if (pci_channel_offline(pdev)) {
10206                         rc = -EIO;
10207                         goto out_msi_disable;
10208                 }
10209         }
10210
10211         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
10212             ioa_cfg->intr_flag == IPR_USE_MSIX) {
10213                 rc = ipr_test_msi(ioa_cfg, pdev);
10214                 if (rc == -EOPNOTSUPP) {
10215                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10216                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
10217                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
10218                                 pci_disable_msi(pdev);
10219                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
10220                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
10221                                 pci_disable_msix(pdev);
10222                         }
10223
10224                         ioa_cfg->intr_flag = IPR_USE_LSI;
10225                         ioa_cfg->nvectors = 1;
10226                 }
10227                 else if (rc)
10228                         goto out_msi_disable;
10229                 else {
10230                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
10231                                 dev_info(&pdev->dev,
10232                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
10233                                         ioa_cfg->nvectors, pdev->irq);
10234                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10235                                 dev_info(&pdev->dev,
10236                                         "Request for %d MSIXs succeeded.",
10237                                         ioa_cfg->nvectors);
10238                 }
10239         }
10240
10241         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10242                                 (unsigned int)num_online_cpus(),
10243                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10244
10245         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10246                 goto out_msi_disable;
10247
10248         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10249                 goto out_msi_disable;
10250
10251         rc = ipr_alloc_mem(ioa_cfg);
10252         if (rc < 0) {
10253                 dev_err(&pdev->dev,
10254                         "Couldn't allocate enough memory for device driver!\n");
10255                 goto out_msi_disable;
10256         }
10257
10258         /* Save away PCI config space for use following IOA reset */
10259         rc = pci_save_state(pdev);
10260
10261         if (rc != PCIBIOS_SUCCESSFUL) {
10262                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10263                 rc = -EIO;
10264                 goto cleanup_nolog;
10265         }
10266
10267         /*
10268          * If HRRQ updated interrupt is not masked, or reset alert is set,
10269          * the card is in an unknown state and needs a hard reset
10270          */
10271         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10272         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10273         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10274         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10275                 ioa_cfg->needs_hard_reset = 1;
10276         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10277                 ioa_cfg->needs_hard_reset = 1;
10278         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10279                 ioa_cfg->ioa_unit_checked = 1;
10280
10281         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10282         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10283         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10284
10285         if (ioa_cfg->intr_flag == IPR_USE_MSI
10286                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10287                 name_msi_vectors(ioa_cfg);
10288                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
10289                         0,
10290                         ioa_cfg->vectors_info[0].desc,
10291                         &ioa_cfg->hrrq[0]);
10292                 if (!rc)
10293                         rc = ipr_request_other_msi_irqs(ioa_cfg);
10294         } else {
10295                 rc = request_irq(pdev->irq, ipr_isr,
10296                          IRQF_SHARED,
10297                          IPR_NAME, &ioa_cfg->hrrq[0]);
10298         }
10299         if (rc) {
10300                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10301                         pdev->irq, rc);
10302                 goto cleanup_nolog;
10303         }
10304
10305         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10306             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10307                 ioa_cfg->needs_warm_reset = 1;
10308                 ioa_cfg->reset = ipr_reset_slot_reset;
10309
10310                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10311                                                                 WQ_MEM_RECLAIM, host->host_no);
10312
10313                 if (!ioa_cfg->reset_work_q) {
10314                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10315                         rc = -ENOMEM;
10316                         goto out_free_irq;
10317                 }
10318         } else
10319                 ioa_cfg->reset = ipr_reset_start_bist;
10320
10321         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10322         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10323         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10324
10325         LEAVE;
10326 out:
10327         return rc;
10328
10329 out_free_irq:
10330         ipr_free_irqs(ioa_cfg);
10331 cleanup_nolog:
10332         ipr_free_mem(ioa_cfg);
10333 out_msi_disable:
10334         ipr_wait_for_pci_err_recovery(ioa_cfg);
10335         if (ioa_cfg->intr_flag == IPR_USE_MSI)
10336                 pci_disable_msi(pdev);
10337         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10338                 pci_disable_msix(pdev);
10339 cleanup_nomem:
10340         iounmap(ipr_regs);
10341 out_disable:
10342         pci_disable_device(pdev);
10343 out_release_regions:
10344         pci_release_regions(pdev);
10345 out_scsi_host_put:
10346         scsi_host_put(host);
10347         goto out;
10348 }
10349
10350 /**
10351  * ipr_initiate_ioa_bringdown - Bring down an adapter
10352  * @ioa_cfg:            ioa config struct
10353  * @shutdown_type:      shutdown type
10354  *
10355  * Description: This function will initiate bringing down the adapter.
10356  * This consists of issuing an IOA shutdown to the adapter
10357  * to flush the cache, and running BIST.
10358  * If the caller needs to wait on the completion of the reset,
10359  * the caller must sleep on the reset_wait_q.
10360  *
10361  * Return value:
10362  *      none
10363  **/
10364 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10365                                        enum ipr_shutdown_type shutdown_type)
10366 {
10367         ENTER;
10368         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10369                 ioa_cfg->sdt_state = ABORT_DUMP;
10370         ioa_cfg->reset_retries = 0;
10371         ioa_cfg->in_ioa_bringdown = 1;
10372         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10373         LEAVE;
10374 }
10375
10376 /**
10377  * __ipr_remove - Remove a single adapter
10378  * @pdev:       pci device struct
10379  *
10380  * Adapter hot plug remove entry point.
10381  *
10382  * Return value:
10383  *      none
10384  **/
10385 static void __ipr_remove(struct pci_dev *pdev)
10386 {
10387         unsigned long host_lock_flags = 0;
10388         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10389         int i;
10390         unsigned long driver_lock_flags;
10391         ENTER;
10392
10393         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10394         while (ioa_cfg->in_reset_reload) {
10395                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10396                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10397                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10398         }
10399
10400         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10401                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10402                 ioa_cfg->hrrq[i].removing_ioa = 1;
10403                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10404         }
10405         wmb();
10406         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10407
10408         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10409         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10410         flush_work(&ioa_cfg->work_q);
10411         if (ioa_cfg->reset_work_q)
10412                 flush_workqueue(ioa_cfg->reset_work_q);
10413         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10414         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10415
10416         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10417         list_del(&ioa_cfg->queue);
10418         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10419
10420         if (ioa_cfg->sdt_state == ABORT_DUMP)
10421                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10422         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10423
10424         ipr_free_all_resources(ioa_cfg);
10425
10426         LEAVE;
10427 }
10428
10429 /**
10430  * ipr_remove - IOA hot plug remove entry point
10431  * @pdev:       pci device struct
10432  *
10433  * Adapter hot plug remove entry point.
10434  *
10435  * Return value:
10436  *      none
10437  **/
10438 static void ipr_remove(struct pci_dev *pdev)
10439 {
10440         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10441
10442         ENTER;
10443
10444         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10445                               &ipr_trace_attr);
10446         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10447                              &ipr_dump_attr);
10448         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10449                         &ipr_ioa_async_err_log);
10450         scsi_remove_host(ioa_cfg->host);
10451
10452         __ipr_remove(pdev);
10453
10454         LEAVE;
10455 }
10456
10457 /**
10458  * ipr_probe - Adapter hot plug add entry point
10459  *
10460  * Return value:
10461  *      0 on success / non-zero on failure
10462  **/
10463 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10464 {
10465         struct ipr_ioa_cfg *ioa_cfg;
10466         unsigned long flags;
10467         int rc, i;
10468
10469         rc = ipr_probe_ioa(pdev, dev_id);
10470
10471         if (rc)
10472                 return rc;
10473
10474         ioa_cfg = pci_get_drvdata(pdev);
10475         rc = ipr_probe_ioa_part2(ioa_cfg);
10476
10477         if (rc) {
10478                 __ipr_remove(pdev);
10479                 return rc;
10480         }
10481
10482         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10483
10484         if (rc) {
10485                 __ipr_remove(pdev);
10486                 return rc;
10487         }
10488
10489         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10490                                    &ipr_trace_attr);
10491
10492         if (rc) {
10493                 scsi_remove_host(ioa_cfg->host);
10494                 __ipr_remove(pdev);
10495                 return rc;
10496         }
10497
10498         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10499                         &ipr_ioa_async_err_log);
10500
10501         if (rc) {
10502                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10503                                 &ipr_dump_attr);
10504                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10505                                 &ipr_trace_attr);
10506                 scsi_remove_host(ioa_cfg->host);
10507                 __ipr_remove(pdev);
10508                 return rc;
10509         }
10510
10511         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10512                                    &ipr_dump_attr);
10513
10514         if (rc) {
10515                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10516                                       &ipr_ioa_async_err_log);
10517                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10518                                       &ipr_trace_attr);
10519                 scsi_remove_host(ioa_cfg->host);
10520                 __ipr_remove(pdev);
10521                 return rc;
10522         }
10523         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10524         ioa_cfg->scan_enabled = 1;
10525         schedule_work(&ioa_cfg->work_q);
10526         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10527
10528         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10529
10530         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10531                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10532                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10533                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10534                 }
10535         }
10536
10537         scsi_scan_host(ioa_cfg->host);
10538
10539         return 0;
10540 }
10541
10542 /**
10543  * ipr_shutdown - Shutdown handler.
10544  * @pdev:       pci device struct
10545  *
10546  * This function is invoked upon system shutdown/reboot. It will issue
10547  * an adapter shutdown to the adapter to flush the write cache.
10548  *
10549  * Return value:
10550  *      none
10551  **/
10552 static void ipr_shutdown(struct pci_dev *pdev)
10553 {
10554         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10555         unsigned long lock_flags = 0;
10556         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10557         int i;
10558
10559         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10560         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10561                 ioa_cfg->iopoll_weight = 0;
10562                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10563                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10564         }
10565
10566         while (ioa_cfg->in_reset_reload) {
10567                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10568                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10569                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10570         }
10571
10572         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10573                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10574
10575         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10576         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10577         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10578         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10579                 ipr_free_irqs(ioa_cfg);
10580                 pci_disable_device(ioa_cfg->pdev);
10581         }
10582 }
10583
10584 static struct pci_device_id ipr_pci_table[] = {
10585         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10586                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10587         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10588                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10589         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10590                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10591         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10592                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10593         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10594                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10595         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10596                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10597         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10598                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10599         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10600                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10601                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10602         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10603               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10604         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10605               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10606               IPR_USE_LONG_TRANSOP_TIMEOUT },
10607         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10608               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10609               IPR_USE_LONG_TRANSOP_TIMEOUT },
10610         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10611               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10612         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10613               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10614               IPR_USE_LONG_TRANSOP_TIMEOUT},
10615         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10616               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10617               IPR_USE_LONG_TRANSOP_TIMEOUT },
10618         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10619               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10620               IPR_USE_LONG_TRANSOP_TIMEOUT },
10621         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10622               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10623         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10624               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10625         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10626               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10627               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10628         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10629                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10630         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10631                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10632         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10633                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10634                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10635         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10636                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10637                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10638         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10639                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10640         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10641                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10642         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10643                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10644         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10645                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10646         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10647                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10648         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10649                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10650         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10651                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10652         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10653                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10654         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10655                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10656         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10657                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10658         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10659                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10660         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10661                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10662         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10663                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10664         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10665                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10666         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10667                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10668         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10669                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10670         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10671                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10672         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10673                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10674         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10675                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10676         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10677                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10678         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10679                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10680         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10681                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10682         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10683                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10684         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10685                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10686         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10687                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10688         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10689                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10690         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10691                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10692         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10693                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10694         { }
10695 };
10696 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10697
10698 static const struct pci_error_handlers ipr_err_handler = {
10699         .error_detected = ipr_pci_error_detected,
10700         .mmio_enabled = ipr_pci_mmio_enabled,
10701         .slot_reset = ipr_pci_slot_reset,
10702 };
10703
10704 static struct pci_driver ipr_driver = {
10705         .name = IPR_NAME,
10706         .id_table = ipr_pci_table,
10707         .probe = ipr_probe,
10708         .remove = ipr_remove,
10709         .shutdown = ipr_shutdown,
10710         .err_handler = &ipr_err_handler,
10711 };
10712
10713 /**
10714  * ipr_halt_done - Shutdown prepare completion
10715  *
10716  * Return value:
10717  *      none
10718  **/
10719 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10720 {
10721         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10722 }
10723
10724 /**
10725  * ipr_halt - Issue shutdown prepare to all adapters
10726  *
10727  * Return value:
10728  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10729  **/
10730 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10731 {
10732         struct ipr_cmnd *ipr_cmd;
10733         struct ipr_ioa_cfg *ioa_cfg;
10734         unsigned long flags = 0, driver_lock_flags;
10735
10736         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10737                 return NOTIFY_DONE;
10738
10739         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10740
10741         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10742                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10743                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10744                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10745                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10746                         continue;
10747                 }
10748
10749                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10750                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10751                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10752                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10753                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10754
10755                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10756                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10757         }
10758         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10759
10760         return NOTIFY_OK;
10761 }
10762
10763 static struct notifier_block ipr_notifier = {
10764         ipr_halt, NULL, 0
10765 };
10766
10767 /**
10768  * ipr_init - Module entry point
10769  *
10770  * Return value:
10771  *      0 on success / negative value on failure
10772  **/
10773 static int __init ipr_init(void)
10774 {
10775         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10776                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10777
10778         register_reboot_notifier(&ipr_notifier);
10779         return pci_register_driver(&ipr_driver);
10780 }
10781
10782 /**
10783  * ipr_exit - Module unload
10784  *
10785  * Module unload entry point.
10786  *
10787  * Return value:
10788  *      none
10789  **/
10790 static void __exit ipr_exit(void)
10791 {
10792         unregister_reboot_notifier(&ipr_notifier);
10793         pci_unregister_driver(&ipr_driver);
10794 }
10795
10796 module_init(ipr_init);
10797 module_exit(ipr_exit);