GNU Linux-libre 4.14.313-gnu1
[releases.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
439         "3140: Device bus not ready to ready transition"},
440         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "FFFB: SCSI bus was reset"},
442         {0x06290500, 0, 0,
443         "FFFE: SCSI bus transition to single ended"},
444         {0x06290600, 0, 0,
445         "FFFE: SCSI bus transition to LVD"},
446         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
447         "FFFB: SCSI bus was reset by another initiator"},
448         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
449         "3029: A device replacement has occurred"},
450         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "4102: Device bus fabric performance degradation"},
452         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9051: IOA cache data exists for a missing or failed device"},
454         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
456         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9025: Disk unit is not supported at its physical location"},
458         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
459         "3020: IOA detected a SCSI bus configuration error"},
460         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3150: SCSI bus configuration error"},
462         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
463         "9074: Asymmetric advanced function disk configuration"},
464         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
465         "4040: Incomplete multipath connection between IOA and enclosure"},
466         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4041: Incomplete multipath connection between enclosure and device"},
468         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
469         "9075: Incomplete multipath connection between IOA and remote IOA"},
470         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9076: Configuration error, missing remote IOA"},
472         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4050: Enclosure does not support a required multipath function"},
474         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4121: Configuration error, required cable is missing"},
476         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4122: Cable is not plugged into the correct location on remote IOA"},
478         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4123: Configuration error, invalid cable vital product data"},
480         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4124: Configuration error, both cable ends are plugged into the same IOA"},
482         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4070: Logically bad block written on device"},
484         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9041: Array protection temporarily suspended"},
486         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9042: Corrupt array parity detected on specified device"},
488         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9030: Array no longer protected due to missing or failed disk unit"},
490         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9071: Link operational transition"},
492         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9072: Link not operational transition"},
494         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9032: Array exposed but still protected"},
496         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
497         "70DD: Device forced failed by disrupt device command"},
498         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
499         "4061: Multipath redundancy level got better"},
500         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4060: Multipath redundancy level got worse"},
502         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
503         "9083: Device raw mode enabled"},
504         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
505         "9084: Device raw mode disabled"},
506         {0x07270000, 0, 0,
507         "Failure due to other device"},
508         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9008: IOA does not support functions expected by devices"},
510         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9010: Cache data associated with attached devices cannot be found"},
512         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9011: Cache data belongs to devices other than those attached"},
514         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9020: Array missing 2 or more devices with only 1 device present"},
516         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9021: Array missing 2 or more devices with 2 or more devices present"},
518         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9022: Exposed array is missing a required device"},
520         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9023: Array member(s) not at required physical locations"},
522         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9024: Array not functional due to present hardware configuration"},
524         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9026: Array not functional due to present hardware configuration"},
526         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9027: Array is missing a device and parity is out of sync"},
528         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9028: Maximum number of arrays already exist"},
530         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9050: Required cache data cannot be located for a disk unit"},
532         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9052: Cache data exists for a device that has been modified"},
534         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9054: IOA resources not available due to previous problems"},
536         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9092: Disk unit requires initialization before use"},
538         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9029: Incorrect hardware configuration change has been detected"},
540         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9060: One or more disk pairs are missing from an array"},
542         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9061: One or more disks are missing from an array"},
544         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9062: One or more disks are missing from an array"},
546         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9063: Maximum number of functional arrays has been exceeded"},
548         {0x07279A00, 0, 0,
549         "Data protect, other volume set problem"},
550         {0x0B260000, 0, 0,
551         "Aborted command, invalid descriptor"},
552         {0x0B3F9000, 0, 0,
553         "Target operating conditions have changed, dual adapter takeover"},
554         {0x0B530200, 0, 0,
555         "Aborted command, medium removal prevented"},
556         {0x0B5A0000, 0, 0,
557         "Command terminated by host"},
558         {0x0B5B8000, 0, 0,
559         "Aborted command, command terminated by host"}
560 };
561
562 static const struct ipr_ses_table_entry ipr_ses_table[] = {
563         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
564         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
565         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
570         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
571         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
574         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
576 };
577
578 /*
579  *  Function Prototypes
580  */
581 static int ipr_reset_alert(struct ipr_cmnd *);
582 static void ipr_process_ccn(struct ipr_cmnd *);
583 static void ipr_process_error(struct ipr_cmnd *);
584 static void ipr_reset_ioa_job(struct ipr_cmnd *);
585 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586                                    enum ipr_shutdown_type);
587
588 #ifdef CONFIG_SCSI_IPR_TRACE
589 /**
590  * ipr_trc_hook - Add a trace entry to the driver trace
591  * @ipr_cmd:    ipr command struct
592  * @type:               trace type
593  * @add_data:   additional data
594  *
595  * Return value:
596  *      none
597  **/
598 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599                          u8 type, u32 add_data)
600 {
601         struct ipr_trace_entry *trace_entry;
602         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
603         unsigned int trace_index;
604
605         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606         trace_entry = &ioa_cfg->trace[trace_index];
607         trace_entry->time = jiffies;
608         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609         trace_entry->type = type;
610         if (ipr_cmd->ioa_cfg->sis64)
611                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
612         else
613                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
614         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
615         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616         trace_entry->u.add_data = add_data;
617         wmb();
618 }
619 #else
620 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
621 #endif
622
623 /**
624  * ipr_lock_and_done - Acquire lock and complete command
625  * @ipr_cmd:    ipr command struct
626  *
627  * Return value:
628  *      none
629  **/
630 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
631 {
632         unsigned long lock_flags;
633         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
634
635         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636         ipr_cmd->done(ipr_cmd);
637         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
638 }
639
640 /**
641  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642  * @ipr_cmd:    ipr command struct
643  *
644  * Return value:
645  *      none
646  **/
647 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
648 {
649         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
650         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
652         dma_addr_t dma_addr = ipr_cmd->dma_addr;
653         int hrrq_id;
654
655         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
656         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
657         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
658         ioarcb->data_transfer_length = 0;
659         ioarcb->read_data_transfer_length = 0;
660         ioarcb->ioadl_len = 0;
661         ioarcb->read_ioadl_len = 0;
662
663         if (ipr_cmd->ioa_cfg->sis64) {
664                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
666                 ioasa64->u.gata.status = 0;
667         } else {
668                 ioarcb->write_ioadl_addr =
669                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
671                 ioasa->u.gata.status = 0;
672         }
673
674         ioasa->hdr.ioasc = 0;
675         ioasa->hdr.residual_data_len = 0;
676         ipr_cmd->scsi_cmd = NULL;
677         ipr_cmd->qc = NULL;
678         ipr_cmd->sense_buffer[0] = 0;
679         ipr_cmd->dma_use_sg = 0;
680 }
681
682 /**
683  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684  * @ipr_cmd:    ipr command struct
685  *
686  * Return value:
687  *      none
688  **/
689 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690                               void (*fast_done) (struct ipr_cmnd *))
691 {
692         ipr_reinit_ipr_cmnd(ipr_cmd);
693         ipr_cmd->u.scratch = 0;
694         ipr_cmd->sibling = NULL;
695         ipr_cmd->eh_comp = NULL;
696         ipr_cmd->fast_done = fast_done;
697         init_timer(&ipr_cmd->timer);
698 }
699
700 /**
701  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
702  * @ioa_cfg:    ioa config struct
703  *
704  * Return value:
705  *      pointer to ipr command struct
706  **/
707 static
708 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
709 {
710         struct ipr_cmnd *ipr_cmd = NULL;
711
712         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714                         struct ipr_cmnd, queue);
715                 list_del(&ipr_cmd->queue);
716         }
717
718
719         return ipr_cmd;
720 }
721
722 /**
723  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724  * @ioa_cfg:    ioa config struct
725  *
726  * Return value:
727  *      pointer to ipr command struct
728  **/
729 static
730 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
731 {
732         struct ipr_cmnd *ipr_cmd =
733                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
734         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
735         return ipr_cmd;
736 }
737
738 /**
739  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740  * @ioa_cfg:    ioa config struct
741  * @clr_ints:     interrupts to clear
742  *
743  * This function masks all interrupts on the adapter, then clears the
744  * interrupts specified in the mask
745  *
746  * Return value:
747  *      none
748  **/
749 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
750                                           u32 clr_ints)
751 {
752         volatile u32 int_reg;
753         int i;
754
755         /* Stop new interrupts */
756         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757                 spin_lock(&ioa_cfg->hrrq[i]._lock);
758                 ioa_cfg->hrrq[i].allow_interrupts = 0;
759                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
760         }
761         wmb();
762
763         /* Set interrupt mask to stop all new interrupts */
764         if (ioa_cfg->sis64)
765                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766         else
767                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768
769         /* Clear any pending interrupts */
770         if (ioa_cfg->sis64)
771                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
773         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
774 }
775
776 /**
777  * ipr_save_pcix_cmd_reg - Save PCI-X command register
778  * @ioa_cfg:    ioa config struct
779  *
780  * Return value:
781  *      0 on success / -EIO on failure
782  **/
783 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
784 {
785         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
786
787         if (pcix_cmd_reg == 0)
788                 return 0;
789
790         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
793                 return -EIO;
794         }
795
796         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
797         return 0;
798 }
799
800 /**
801  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802  * @ioa_cfg:    ioa config struct
803  *
804  * Return value:
805  *      0 on success / -EIO on failure
806  **/
807 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
808 {
809         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
810
811         if (pcix_cmd_reg) {
812                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
815                         return -EIO;
816                 }
817         }
818
819         return 0;
820 }
821
822 /**
823  * __ipr_sata_eh_done - done function for aborted SATA commands
824  * @ipr_cmd:    ipr command struct
825  *
826  * This function is invoked for ops generated to SATA
827  * devices which are being aborted.
828  *
829  * Return value:
830  *      none
831  **/
832 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
833 {
834         struct ata_queued_cmd *qc = ipr_cmd->qc;
835         struct ipr_sata_port *sata_port = qc->ap->private_data;
836
837         qc->err_mask |= AC_ERR_OTHER;
838         sata_port->ioasa.status |= ATA_BUSY;
839         ata_qc_complete(qc);
840         if (ipr_cmd->eh_comp)
841                 complete(ipr_cmd->eh_comp);
842         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
843 }
844
845 /**
846  * ipr_sata_eh_done - done function for aborted SATA commands
847  * @ipr_cmd:    ipr command struct
848  *
849  * This function is invoked for ops generated to SATA
850  * devices which are being aborted.
851  *
852  * Return value:
853  *      none
854  **/
855 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
856 {
857         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
858         unsigned long hrrq_flags;
859
860         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
861         __ipr_sata_eh_done(ipr_cmd);
862         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
863 }
864
865 /**
866  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
867  * @ipr_cmd:    ipr command struct
868  *
869  * This function is invoked by the interrupt handler for
870  * ops generated by the SCSI mid-layer which are being aborted.
871  *
872  * Return value:
873  *      none
874  **/
875 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
876 {
877         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
878
879         scsi_cmd->result |= (DID_ERROR << 16);
880
881         scsi_dma_unmap(ipr_cmd->scsi_cmd);
882         scsi_cmd->scsi_done(scsi_cmd);
883         if (ipr_cmd->eh_comp)
884                 complete(ipr_cmd->eh_comp);
885         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
886 }
887
888 /**
889  * ipr_scsi_eh_done - mid-layer done function for aborted ops
890  * @ipr_cmd:    ipr command struct
891  *
892  * This function is invoked by the interrupt handler for
893  * ops generated by the SCSI mid-layer which are being aborted.
894  *
895  * Return value:
896  *      none
897  **/
898 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
899 {
900         unsigned long hrrq_flags;
901         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
902
903         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
904         __ipr_scsi_eh_done(ipr_cmd);
905         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
906 }
907
908 /**
909  * ipr_fail_all_ops - Fails all outstanding ops.
910  * @ioa_cfg:    ioa config struct
911  *
912  * This function fails all outstanding ops.
913  *
914  * Return value:
915  *      none
916  **/
917 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
918 {
919         struct ipr_cmnd *ipr_cmd, *temp;
920         struct ipr_hrr_queue *hrrq;
921
922         ENTER;
923         for_each_hrrq(hrrq, ioa_cfg) {
924                 spin_lock(&hrrq->_lock);
925                 list_for_each_entry_safe(ipr_cmd,
926                                         temp, &hrrq->hrrq_pending_q, queue) {
927                         list_del(&ipr_cmd->queue);
928
929                         ipr_cmd->s.ioasa.hdr.ioasc =
930                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
931                         ipr_cmd->s.ioasa.hdr.ilid =
932                                 cpu_to_be32(IPR_DRIVER_ILID);
933
934                         if (ipr_cmd->scsi_cmd)
935                                 ipr_cmd->done = __ipr_scsi_eh_done;
936                         else if (ipr_cmd->qc)
937                                 ipr_cmd->done = __ipr_sata_eh_done;
938
939                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
940                                      IPR_IOASC_IOA_WAS_RESET);
941                         del_timer(&ipr_cmd->timer);
942                         ipr_cmd->done(ipr_cmd);
943                 }
944                 spin_unlock(&hrrq->_lock);
945         }
946         LEAVE;
947 }
948
949 /**
950  * ipr_send_command -  Send driver initiated requests.
951  * @ipr_cmd:            ipr command struct
952  *
953  * This function sends a command to the adapter using the correct write call.
954  * In the case of sis64, calculate the ioarcb size required. Then or in the
955  * appropriate bits.
956  *
957  * Return value:
958  *      none
959  **/
960 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
961 {
962         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
963         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
964
965         if (ioa_cfg->sis64) {
966                 /* The default size is 256 bytes */
967                 send_dma_addr |= 0x1;
968
969                 /* If the number of ioadls * size of ioadl > 128 bytes,
970                    then use a 512 byte ioarcb */
971                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
972                         send_dma_addr |= 0x4;
973                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
974         } else
975                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 }
977
978 /**
979  * ipr_do_req -  Send driver initiated requests.
980  * @ipr_cmd:            ipr command struct
981  * @done:                       done function
982  * @timeout_func:       timeout function
983  * @timeout:            timeout value
984  *
985  * This function sends the specified command to the adapter with the
986  * timeout given. The done function is invoked on command completion.
987  *
988  * Return value:
989  *      none
990  **/
991 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
992                        void (*done) (struct ipr_cmnd *),
993                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
994 {
995         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
996
997         ipr_cmd->done = done;
998
999         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
1000         ipr_cmd->timer.expires = jiffies + timeout;
1001         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
1002
1003         add_timer(&ipr_cmd->timer);
1004
1005         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
1007         ipr_send_command(ipr_cmd);
1008 }
1009
1010 /**
1011  * ipr_internal_cmd_done - Op done function for an internally generated op.
1012  * @ipr_cmd:    ipr command struct
1013  *
1014  * This function is the op done function for an internally generated,
1015  * blocking op. It simply wakes the sleeping thread.
1016  *
1017  * Return value:
1018  *      none
1019  **/
1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021 {
1022         if (ipr_cmd->sibling)
1023                 ipr_cmd->sibling = NULL;
1024         else
1025                 complete(&ipr_cmd->completion);
1026 }
1027
1028 /**
1029  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030  * @ipr_cmd:    ipr command struct
1031  * @dma_addr:   dma address
1032  * @len:        transfer length
1033  * @flags:      ioadl flag value
1034  *
1035  * This function initializes an ioadl in the case where there is only a single
1036  * descriptor.
1037  *
1038  * Return value:
1039  *      nothing
1040  **/
1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042                            u32 len, int flags)
1043 {
1044         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047         ipr_cmd->dma_use_sg = 1;
1048
1049         if (ipr_cmd->ioa_cfg->sis64) {
1050                 ioadl64->flags = cpu_to_be32(flags);
1051                 ioadl64->data_len = cpu_to_be32(len);
1052                 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054                 ipr_cmd->ioarcb.ioadl_len =
1055                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057         } else {
1058                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059                 ioadl->address = cpu_to_be32(dma_addr);
1060
1061                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062                         ipr_cmd->ioarcb.read_ioadl_len =
1063                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065                 } else {
1066                         ipr_cmd->ioarcb.ioadl_len =
1067                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069                 }
1070         }
1071 }
1072
1073 /**
1074  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075  * @ipr_cmd:    ipr command struct
1076  * @timeout_func:       function to invoke if command times out
1077  * @timeout:    timeout
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1084                                   u32 timeout)
1085 {
1086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088         init_completion(&ipr_cmd->completion);
1089         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091         spin_unlock_irq(ioa_cfg->host->host_lock);
1092         wait_for_completion(&ipr_cmd->completion);
1093         spin_lock_irq(ioa_cfg->host->host_lock);
1094 }
1095
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097 {
1098         unsigned int hrrq;
1099
1100         if (ioa_cfg->hrrq_num == 1)
1101                 hrrq = 0;
1102         else {
1103                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105         }
1106         return hrrq;
1107 }
1108
1109 /**
1110  * ipr_send_hcam - Send an HCAM to the adapter.
1111  * @ioa_cfg:    ioa config struct
1112  * @type:               HCAM type
1113  * @hostrcb:    hostrcb struct
1114  *
1115  * This function will send a Host Controlled Async command to the adapter.
1116  * If HCAMs are currently not allowed to be issued to the adapter, it will
1117  * place the hostrcb on the free queue.
1118  *
1119  * Return value:
1120  *      none
1121  **/
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123                           struct ipr_hostrcb *hostrcb)
1124 {
1125         struct ipr_cmnd *ipr_cmd;
1126         struct ipr_ioarcb *ioarcb;
1127
1128         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1129                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1130                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1131                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133                 ipr_cmd->u.hostrcb = hostrcb;
1134                 ioarcb = &ipr_cmd->ioarcb;
1135
1136                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139                 ioarcb->cmd_pkt.cdb[1] = type;
1140                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
1143                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1145
1146                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147                         ipr_cmd->done = ipr_process_ccn;
1148                 else
1149                         ipr_cmd->done = ipr_process_error;
1150
1151                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
1153                 ipr_send_command(ipr_cmd);
1154         } else {
1155                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156         }
1157 }
1158
1159 /**
1160  * ipr_update_ata_class - Update the ata class in the resource entry
1161  * @res:        resource entry struct
1162  * @proto:      cfgte device bus protocol value
1163  *
1164  * Return value:
1165  *      none
1166  **/
1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1168 {
1169         switch (proto) {
1170         case IPR_PROTO_SATA:
1171         case IPR_PROTO_SAS_STP:
1172                 res->ata_class = ATA_DEV_ATA;
1173                 break;
1174         case IPR_PROTO_SATA_ATAPI:
1175         case IPR_PROTO_SAS_STP_ATAPI:
1176                 res->ata_class = ATA_DEV_ATAPI;
1177                 break;
1178         default:
1179                 res->ata_class = ATA_DEV_UNKNOWN;
1180                 break;
1181         };
1182 }
1183
1184 /**
1185  * ipr_init_res_entry - Initialize a resource entry struct.
1186  * @res:        resource entry struct
1187  * @cfgtew:     config table entry wrapper struct
1188  *
1189  * Return value:
1190  *      none
1191  **/
1192 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193                                struct ipr_config_table_entry_wrapper *cfgtew)
1194 {
1195         int found = 0;
1196         unsigned int proto;
1197         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198         struct ipr_resource_entry *gscsi_res = NULL;
1199
1200         res->needs_sync_complete = 0;
1201         res->in_erp = 0;
1202         res->add_to_ml = 0;
1203         res->del_from_ml = 0;
1204         res->resetting_device = 0;
1205         res->reset_occurred = 0;
1206         res->sdev = NULL;
1207         res->sata_port = NULL;
1208
1209         if (ioa_cfg->sis64) {
1210                 proto = cfgtew->u.cfgte64->proto;
1211                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1213                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1214                 res->type = cfgtew->u.cfgte64->res_type;
1215
1216                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217                         sizeof(res->res_path));
1218
1219                 res->bus = 0;
1220                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221                         sizeof(res->dev_lun.scsi_lun));
1222                 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227                                         found = 1;
1228                                         res->target = gscsi_res->target;
1229                                         break;
1230                                 }
1231                         }
1232                         if (!found) {
1233                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234                                                                   ioa_cfg->max_devs_supported);
1235                                 set_bit(res->target, ioa_cfg->target_ids);
1236                         }
1237                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239                         res->target = 0;
1240                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243                                                           ioa_cfg->max_devs_supported);
1244                         set_bit(res->target, ioa_cfg->array_ids);
1245                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246                         res->bus = IPR_VSET_VIRTUAL_BUS;
1247                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248                                                           ioa_cfg->max_devs_supported);
1249                         set_bit(res->target, ioa_cfg->vset_ids);
1250                 } else {
1251                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252                                                           ioa_cfg->max_devs_supported);
1253                         set_bit(res->target, ioa_cfg->target_ids);
1254                 }
1255         } else {
1256                 proto = cfgtew->u.cfgte->proto;
1257                 res->qmodel = IPR_QUEUEING_MODEL(res);
1258                 res->flags = cfgtew->u.cfgte->flags;
1259                 if (res->flags & IPR_IS_IOA_RESOURCE)
1260                         res->type = IPR_RES_TYPE_IOAFP;
1261                 else
1262                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265                 res->target = cfgtew->u.cfgte->res_addr.target;
1266                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1267                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1268         }
1269
1270         ipr_update_ata_class(res, proto);
1271 }
1272
1273 /**
1274  * ipr_is_same_device - Determine if two devices are the same.
1275  * @res:        resource entry struct
1276  * @cfgtew:     config table entry wrapper struct
1277  *
1278  * Return value:
1279  *      1 if the devices are the same / 0 otherwise
1280  **/
1281 static int ipr_is_same_device(struct ipr_resource_entry *res,
1282                               struct ipr_config_table_entry_wrapper *cfgtew)
1283 {
1284         if (res->ioa_cfg->sis64) {
1285                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1287                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1288                                         sizeof(cfgtew->u.cfgte64->lun))) {
1289                         return 1;
1290                 }
1291         } else {
1292                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293                     res->target == cfgtew->u.cfgte->res_addr.target &&
1294                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1295                         return 1;
1296         }
1297
1298         return 0;
1299 }
1300
1301 /**
1302  * __ipr_format_res_path - Format the resource path for printing.
1303  * @res_path:   resource path
1304  * @buf:        buffer
1305  * @len:        length of buffer provided
1306  *
1307  * Return value:
1308  *      pointer to buffer
1309  **/
1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1311 {
1312         int i;
1313         char *p = buffer;
1314
1315         *p = '\0';
1316         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1319
1320         return buffer;
1321 }
1322
1323 /**
1324  * ipr_format_res_path - Format the resource path for printing.
1325  * @ioa_cfg:    ioa config struct
1326  * @res_path:   resource path
1327  * @buf:        buffer
1328  * @len:        length of buffer provided
1329  *
1330  * Return value:
1331  *      pointer to buffer
1332  **/
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334                                  u8 *res_path, char *buffer, int len)
1335 {
1336         char *p = buffer;
1337
1338         *p = '\0';
1339         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340         __ipr_format_res_path(res_path, p, len - (buffer - p));
1341         return buffer;
1342 }
1343
1344 /**
1345  * ipr_update_res_entry - Update the resource entry.
1346  * @res:        resource entry struct
1347  * @cfgtew:     config table entry wrapper struct
1348  *
1349  * Return value:
1350  *      none
1351  **/
1352 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353                                  struct ipr_config_table_entry_wrapper *cfgtew)
1354 {
1355         char buffer[IPR_MAX_RES_PATH_LENGTH];
1356         unsigned int proto;
1357         int new_path = 0;
1358
1359         if (res->ioa_cfg->sis64) {
1360                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1362                 res->type = cfgtew->u.cfgte64->res_type;
1363
1364                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365                         sizeof(struct ipr_std_inq_data));
1366
1367                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368                 proto = cfgtew->u.cfgte64->proto;
1369                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373                         sizeof(res->dev_lun.scsi_lun));
1374
1375                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376                                         sizeof(res->res_path))) {
1377                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378                                 sizeof(res->res_path));
1379                         new_path = 1;
1380                 }
1381
1382                 if (res->sdev && new_path)
1383                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1384                                     ipr_format_res_path(res->ioa_cfg,
1385                                         res->res_path, buffer, sizeof(buffer)));
1386         } else {
1387                 res->flags = cfgtew->u.cfgte->flags;
1388                 if (res->flags & IPR_IS_IOA_RESOURCE)
1389                         res->type = IPR_RES_TYPE_IOAFP;
1390                 else
1391                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394                         sizeof(struct ipr_std_inq_data));
1395
1396                 res->qmodel = IPR_QUEUEING_MODEL(res);
1397                 proto = cfgtew->u.cfgte->proto;
1398                 res->res_handle = cfgtew->u.cfgte->res_handle;
1399         }
1400
1401         ipr_update_ata_class(res, proto);
1402 }
1403
1404 /**
1405  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406  *                        for the resource.
1407  * @res:        resource entry struct
1408  * @cfgtew:     config table entry wrapper struct
1409  *
1410  * Return value:
1411  *      none
1412  **/
1413 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414 {
1415         struct ipr_resource_entry *gscsi_res = NULL;
1416         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418         if (!ioa_cfg->sis64)
1419                 return;
1420
1421         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422                 clear_bit(res->target, ioa_cfg->array_ids);
1423         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424                 clear_bit(res->target, ioa_cfg->vset_ids);
1425         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428                                 return;
1429                 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431         } else if (res->bus == 0)
1432                 clear_bit(res->target, ioa_cfg->target_ids);
1433 }
1434
1435 /**
1436  * ipr_handle_config_change - Handle a config change from the adapter
1437  * @ioa_cfg:    ioa config struct
1438  * @hostrcb:    hostrcb
1439  *
1440  * Return value:
1441  *      none
1442  **/
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1444                                      struct ipr_hostrcb *hostrcb)
1445 {
1446         struct ipr_resource_entry *res = NULL;
1447         struct ipr_config_table_entry_wrapper cfgtew;
1448         __be32 cc_res_handle;
1449
1450         u32 is_ndn = 1;
1451
1452         if (ioa_cfg->sis64) {
1453                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455         } else {
1456                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458         }
1459
1460         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1461                 if (res->res_handle == cc_res_handle) {
1462                         is_ndn = 0;
1463                         break;
1464                 }
1465         }
1466
1467         if (is_ndn) {
1468                 if (list_empty(&ioa_cfg->free_res_q)) {
1469                         ipr_send_hcam(ioa_cfg,
1470                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471                                       hostrcb);
1472                         return;
1473                 }
1474
1475                 res = list_entry(ioa_cfg->free_res_q.next,
1476                                  struct ipr_resource_entry, queue);
1477
1478                 list_del(&res->queue);
1479                 ipr_init_res_entry(res, &cfgtew);
1480                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481         }
1482
1483         ipr_update_res_entry(res, &cfgtew);
1484
1485         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486                 if (res->sdev) {
1487                         res->del_from_ml = 1;
1488                         res->res_handle = IPR_INVALID_RES_HANDLE;
1489                         schedule_work(&ioa_cfg->work_q);
1490                 } else {
1491                         ipr_clear_res_target(res);
1492                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1493                 }
1494         } else if (!res->sdev || res->del_from_ml) {
1495                 res->add_to_ml = 1;
1496                 schedule_work(&ioa_cfg->work_q);
1497         }
1498
1499         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500 }
1501
1502 /**
1503  * ipr_process_ccn - Op done function for a CCN.
1504  * @ipr_cmd:    ipr command struct
1505  *
1506  * This function is the op done function for a configuration
1507  * change notification host controlled async from the adapter.
1508  *
1509  * Return value:
1510  *      none
1511  **/
1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513 {
1514         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1516         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1517
1518         list_del_init(&hostrcb->queue);
1519         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1520
1521         if (ioasc) {
1522                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1524                         dev_err(&ioa_cfg->pdev->dev,
1525                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528         } else {
1529                 ipr_handle_config_change(ioa_cfg, hostrcb);
1530         }
1531 }
1532
1533 /**
1534  * strip_whitespace - Strip and pad trailing whitespace.
1535  * @i:          size of buffer
1536  * @buf:        string to modify
1537  *
1538  * This function will strip all trailing whitespace and
1539  * NUL terminate the string.
1540  *
1541  **/
1542 static void strip_whitespace(int i, char *buf)
1543 {
1544         if (i < 1)
1545                 return;
1546         i--;
1547         while (i && buf[i] == ' ')
1548                 i--;
1549         buf[i+1] = '\0';
1550 }
1551
1552 /**
1553  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1554  * @prefix:             string to print at start of printk
1555  * @hostrcb:    hostrcb pointer
1556  * @vpd:                vendor/product id/sn struct
1557  *
1558  * Return value:
1559  *      none
1560  **/
1561 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1562                                 struct ipr_vpd *vpd)
1563 {
1564         char vendor_id[IPR_VENDOR_ID_LEN + 1];
1565         char product_id[IPR_PROD_ID_LEN + 1];
1566         char sn[IPR_SERIAL_NUM_LEN + 1];
1567
1568         memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569         strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
1570
1571         memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572         strip_whitespace(IPR_PROD_ID_LEN, product_id);
1573
1574         memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
1575         strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
1576
1577         ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
1578                      vendor_id, product_id, sn);
1579 }
1580
1581 /**
1582  * ipr_log_vpd - Log the passed VPD to the error log.
1583  * @vpd:                vendor/product id/sn struct
1584  *
1585  * Return value:
1586  *      none
1587  **/
1588 static void ipr_log_vpd(struct ipr_vpd *vpd)
1589 {
1590         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1591                     + IPR_SERIAL_NUM_LEN];
1592
1593         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1594         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1595                IPR_PROD_ID_LEN);
1596         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1597         ipr_err("Vendor/Product ID: %s\n", buffer);
1598
1599         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1600         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1601         ipr_err("    Serial Number: %s\n", buffer);
1602 }
1603
1604 /**
1605  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1606  * @prefix:             string to print at start of printk
1607  * @hostrcb:    hostrcb pointer
1608  * @vpd:                vendor/product id/sn/wwn struct
1609  *
1610  * Return value:
1611  *      none
1612  **/
1613 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1614                                     struct ipr_ext_vpd *vpd)
1615 {
1616         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1617         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1618                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1619 }
1620
1621 /**
1622  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1623  * @vpd:                vendor/product id/sn/wwn struct
1624  *
1625  * Return value:
1626  *      none
1627  **/
1628 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1629 {
1630         ipr_log_vpd(&vpd->vpd);
1631         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1632                 be32_to_cpu(vpd->wwid[1]));
1633 }
1634
1635 /**
1636  * ipr_log_enhanced_cache_error - Log a cache error.
1637  * @ioa_cfg:    ioa config struct
1638  * @hostrcb:    hostrcb struct
1639  *
1640  * Return value:
1641  *      none
1642  **/
1643 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1644                                          struct ipr_hostrcb *hostrcb)
1645 {
1646         struct ipr_hostrcb_type_12_error *error;
1647
1648         if (ioa_cfg->sis64)
1649                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1650         else
1651                 error = &hostrcb->hcam.u.error.u.type_12_error;
1652
1653         ipr_err("-----Current Configuration-----\n");
1654         ipr_err("Cache Directory Card Information:\n");
1655         ipr_log_ext_vpd(&error->ioa_vpd);
1656         ipr_err("Adapter Card Information:\n");
1657         ipr_log_ext_vpd(&error->cfc_vpd);
1658
1659         ipr_err("-----Expected Configuration-----\n");
1660         ipr_err("Cache Directory Card Information:\n");
1661         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1662         ipr_err("Adapter Card Information:\n");
1663         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1664
1665         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1666                      be32_to_cpu(error->ioa_data[0]),
1667                      be32_to_cpu(error->ioa_data[1]),
1668                      be32_to_cpu(error->ioa_data[2]));
1669 }
1670
1671 /**
1672  * ipr_log_cache_error - Log a cache error.
1673  * @ioa_cfg:    ioa config struct
1674  * @hostrcb:    hostrcb struct
1675  *
1676  * Return value:
1677  *      none
1678  **/
1679 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1680                                 struct ipr_hostrcb *hostrcb)
1681 {
1682         struct ipr_hostrcb_type_02_error *error =
1683                 &hostrcb->hcam.u.error.u.type_02_error;
1684
1685         ipr_err("-----Current Configuration-----\n");
1686         ipr_err("Cache Directory Card Information:\n");
1687         ipr_log_vpd(&error->ioa_vpd);
1688         ipr_err("Adapter Card Information:\n");
1689         ipr_log_vpd(&error->cfc_vpd);
1690
1691         ipr_err("-----Expected Configuration-----\n");
1692         ipr_err("Cache Directory Card Information:\n");
1693         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1694         ipr_err("Adapter Card Information:\n");
1695         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1696
1697         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1698                      be32_to_cpu(error->ioa_data[0]),
1699                      be32_to_cpu(error->ioa_data[1]),
1700                      be32_to_cpu(error->ioa_data[2]));
1701 }
1702
1703 /**
1704  * ipr_log_enhanced_config_error - Log a configuration error.
1705  * @ioa_cfg:    ioa config struct
1706  * @hostrcb:    hostrcb struct
1707  *
1708  * Return value:
1709  *      none
1710  **/
1711 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1712                                           struct ipr_hostrcb *hostrcb)
1713 {
1714         int errors_logged, i;
1715         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1716         struct ipr_hostrcb_type_13_error *error;
1717
1718         error = &hostrcb->hcam.u.error.u.type_13_error;
1719         errors_logged = be32_to_cpu(error->errors_logged);
1720
1721         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722                 be32_to_cpu(error->errors_detected), errors_logged);
1723
1724         dev_entry = error->dev;
1725
1726         for (i = 0; i < errors_logged; i++, dev_entry++) {
1727                 ipr_err_separator;
1728
1729                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1730                 ipr_log_ext_vpd(&dev_entry->vpd);
1731
1732                 ipr_err("-----New Device Information-----\n");
1733                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1734
1735                 ipr_err("Cache Directory Card Information:\n");
1736                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1737
1738                 ipr_err("Adapter Card Information:\n");
1739                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1740         }
1741 }
1742
1743 /**
1744  * ipr_log_sis64_config_error - Log a device error.
1745  * @ioa_cfg:    ioa config struct
1746  * @hostrcb:    hostrcb struct
1747  *
1748  * Return value:
1749  *      none
1750  **/
1751 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1752                                        struct ipr_hostrcb *hostrcb)
1753 {
1754         int errors_logged, i;
1755         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1756         struct ipr_hostrcb_type_23_error *error;
1757         char buffer[IPR_MAX_RES_PATH_LENGTH];
1758
1759         error = &hostrcb->hcam.u.error64.u.type_23_error;
1760         errors_logged = be32_to_cpu(error->errors_logged);
1761
1762         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763                 be32_to_cpu(error->errors_detected), errors_logged);
1764
1765         dev_entry = error->dev;
1766
1767         for (i = 0; i < errors_logged; i++, dev_entry++) {
1768                 ipr_err_separator;
1769
1770                 ipr_err("Device %d : %s", i + 1,
1771                         __ipr_format_res_path(dev_entry->res_path,
1772                                               buffer, sizeof(buffer)));
1773                 ipr_log_ext_vpd(&dev_entry->vpd);
1774
1775                 ipr_err("-----New Device Information-----\n");
1776                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1777
1778                 ipr_err("Cache Directory Card Information:\n");
1779                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1780
1781                 ipr_err("Adapter Card Information:\n");
1782                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1783         }
1784 }
1785
1786 /**
1787  * ipr_log_config_error - Log a configuration error.
1788  * @ioa_cfg:    ioa config struct
1789  * @hostrcb:    hostrcb struct
1790  *
1791  * Return value:
1792  *      none
1793  **/
1794 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1795                                  struct ipr_hostrcb *hostrcb)
1796 {
1797         int errors_logged, i;
1798         struct ipr_hostrcb_device_data_entry *dev_entry;
1799         struct ipr_hostrcb_type_03_error *error;
1800
1801         error = &hostrcb->hcam.u.error.u.type_03_error;
1802         errors_logged = be32_to_cpu(error->errors_logged);
1803
1804         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1805                 be32_to_cpu(error->errors_detected), errors_logged);
1806
1807         dev_entry = error->dev;
1808
1809         for (i = 0; i < errors_logged; i++, dev_entry++) {
1810                 ipr_err_separator;
1811
1812                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1813                 ipr_log_vpd(&dev_entry->vpd);
1814
1815                 ipr_err("-----New Device Information-----\n");
1816                 ipr_log_vpd(&dev_entry->new_vpd);
1817
1818                 ipr_err("Cache Directory Card Information:\n");
1819                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1820
1821                 ipr_err("Adapter Card Information:\n");
1822                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1823
1824                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1825                         be32_to_cpu(dev_entry->ioa_data[0]),
1826                         be32_to_cpu(dev_entry->ioa_data[1]),
1827                         be32_to_cpu(dev_entry->ioa_data[2]),
1828                         be32_to_cpu(dev_entry->ioa_data[3]),
1829                         be32_to_cpu(dev_entry->ioa_data[4]));
1830         }
1831 }
1832
1833 /**
1834  * ipr_log_enhanced_array_error - Log an array configuration error.
1835  * @ioa_cfg:    ioa config struct
1836  * @hostrcb:    hostrcb struct
1837  *
1838  * Return value:
1839  *      none
1840  **/
1841 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842                                          struct ipr_hostrcb *hostrcb)
1843 {
1844         int i, num_entries;
1845         struct ipr_hostrcb_type_14_error *error;
1846         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1847         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1848
1849         error = &hostrcb->hcam.u.error.u.type_14_error;
1850
1851         ipr_err_separator;
1852
1853         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854                 error->protection_level,
1855                 ioa_cfg->host->host_no,
1856                 error->last_func_vset_res_addr.bus,
1857                 error->last_func_vset_res_addr.target,
1858                 error->last_func_vset_res_addr.lun);
1859
1860         ipr_err_separator;
1861
1862         array_entry = error->array_member;
1863         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1864                             ARRAY_SIZE(error->array_member));
1865
1866         for (i = 0; i < num_entries; i++, array_entry++) {
1867                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1868                         continue;
1869
1870                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1871                         ipr_err("Exposed Array Member %d:\n", i);
1872                 else
1873                         ipr_err("Array Member %d:\n", i);
1874
1875                 ipr_log_ext_vpd(&array_entry->vpd);
1876                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1877                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1878                                  "Expected Location");
1879
1880                 ipr_err_separator;
1881         }
1882 }
1883
1884 /**
1885  * ipr_log_array_error - Log an array configuration error.
1886  * @ioa_cfg:    ioa config struct
1887  * @hostrcb:    hostrcb struct
1888  *
1889  * Return value:
1890  *      none
1891  **/
1892 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1893                                 struct ipr_hostrcb *hostrcb)
1894 {
1895         int i;
1896         struct ipr_hostrcb_type_04_error *error;
1897         struct ipr_hostrcb_array_data_entry *array_entry;
1898         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1899
1900         error = &hostrcb->hcam.u.error.u.type_04_error;
1901
1902         ipr_err_separator;
1903
1904         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1905                 error->protection_level,
1906                 ioa_cfg->host->host_no,
1907                 error->last_func_vset_res_addr.bus,
1908                 error->last_func_vset_res_addr.target,
1909                 error->last_func_vset_res_addr.lun);
1910
1911         ipr_err_separator;
1912
1913         array_entry = error->array_member;
1914
1915         for (i = 0; i < 18; i++) {
1916                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1917                         continue;
1918
1919                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1920                         ipr_err("Exposed Array Member %d:\n", i);
1921                 else
1922                         ipr_err("Array Member %d:\n", i);
1923
1924                 ipr_log_vpd(&array_entry->vpd);
1925
1926                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1927                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1928                                  "Expected Location");
1929
1930                 ipr_err_separator;
1931
1932                 if (i == 9)
1933                         array_entry = error->array_member2;
1934                 else
1935                         array_entry++;
1936         }
1937 }
1938
1939 /**
1940  * ipr_log_hex_data - Log additional hex IOA error data.
1941  * @ioa_cfg:    ioa config struct
1942  * @data:               IOA error data
1943  * @len:                data length
1944  *
1945  * Return value:
1946  *      none
1947  **/
1948 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1949 {
1950         int i;
1951
1952         if (len == 0)
1953                 return;
1954
1955         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1956                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1957
1958         for (i = 0; i < len / 4; i += 4) {
1959                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1960                         be32_to_cpu(data[i]),
1961                         be32_to_cpu(data[i+1]),
1962                         be32_to_cpu(data[i+2]),
1963                         be32_to_cpu(data[i+3]));
1964         }
1965 }
1966
1967 /**
1968  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1969  * @ioa_cfg:    ioa config struct
1970  * @hostrcb:    hostrcb struct
1971  *
1972  * Return value:
1973  *      none
1974  **/
1975 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1976                                             struct ipr_hostrcb *hostrcb)
1977 {
1978         struct ipr_hostrcb_type_17_error *error;
1979
1980         if (ioa_cfg->sis64)
1981                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1982         else
1983                 error = &hostrcb->hcam.u.error.u.type_17_error;
1984
1985         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1986         strim(error->failure_reason);
1987
1988         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1989                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1990         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1991         ipr_log_hex_data(ioa_cfg, error->data,
1992                          be32_to_cpu(hostrcb->hcam.length) -
1993                          (offsetof(struct ipr_hostrcb_error, u) +
1994                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1995 }
1996
1997 /**
1998  * ipr_log_dual_ioa_error - Log a dual adapter error.
1999  * @ioa_cfg:    ioa config struct
2000  * @hostrcb:    hostrcb struct
2001  *
2002  * Return value:
2003  *      none
2004  **/
2005 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2006                                    struct ipr_hostrcb *hostrcb)
2007 {
2008         struct ipr_hostrcb_type_07_error *error;
2009
2010         error = &hostrcb->hcam.u.error.u.type_07_error;
2011         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2012         strim(error->failure_reason);
2013
2014         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2015                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2016         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2017         ipr_log_hex_data(ioa_cfg, error->data,
2018                          be32_to_cpu(hostrcb->hcam.length) -
2019                          (offsetof(struct ipr_hostrcb_error, u) +
2020                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2021 }
2022
2023 static const struct {
2024         u8 active;
2025         char *desc;
2026 } path_active_desc[] = {
2027         { IPR_PATH_NO_INFO, "Path" },
2028         { IPR_PATH_ACTIVE, "Active path" },
2029         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2030 };
2031
2032 static const struct {
2033         u8 state;
2034         char *desc;
2035 } path_state_desc[] = {
2036         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2037         { IPR_PATH_HEALTHY, "is healthy" },
2038         { IPR_PATH_DEGRADED, "is degraded" },
2039         { IPR_PATH_FAILED, "is failed" }
2040 };
2041
2042 /**
2043  * ipr_log_fabric_path - Log a fabric path error
2044  * @hostrcb:    hostrcb struct
2045  * @fabric:             fabric descriptor
2046  *
2047  * Return value:
2048  *      none
2049  **/
2050 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2051                                 struct ipr_hostrcb_fabric_desc *fabric)
2052 {
2053         int i, j;
2054         u8 path_state = fabric->path_state;
2055         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2056         u8 state = path_state & IPR_PATH_STATE_MASK;
2057
2058         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2059                 if (path_active_desc[i].active != active)
2060                         continue;
2061
2062                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2063                         if (path_state_desc[j].state != state)
2064                                 continue;
2065
2066                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2067                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2068                                              path_active_desc[i].desc, path_state_desc[j].desc,
2069                                              fabric->ioa_port);
2070                         } else if (fabric->cascaded_expander == 0xff) {
2071                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2072                                              path_active_desc[i].desc, path_state_desc[j].desc,
2073                                              fabric->ioa_port, fabric->phy);
2074                         } else if (fabric->phy == 0xff) {
2075                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2076                                              path_active_desc[i].desc, path_state_desc[j].desc,
2077                                              fabric->ioa_port, fabric->cascaded_expander);
2078                         } else {
2079                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2080                                              path_active_desc[i].desc, path_state_desc[j].desc,
2081                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2082                         }
2083                         return;
2084                 }
2085         }
2086
2087         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2088                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2089 }
2090
2091 /**
2092  * ipr_log64_fabric_path - Log a fabric path error
2093  * @hostrcb:    hostrcb struct
2094  * @fabric:             fabric descriptor
2095  *
2096  * Return value:
2097  *      none
2098  **/
2099 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2100                                   struct ipr_hostrcb64_fabric_desc *fabric)
2101 {
2102         int i, j;
2103         u8 path_state = fabric->path_state;
2104         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2105         u8 state = path_state & IPR_PATH_STATE_MASK;
2106         char buffer[IPR_MAX_RES_PATH_LENGTH];
2107
2108         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2109                 if (path_active_desc[i].active != active)
2110                         continue;
2111
2112                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2113                         if (path_state_desc[j].state != state)
2114                                 continue;
2115
2116                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2117                                      path_active_desc[i].desc, path_state_desc[j].desc,
2118                                      ipr_format_res_path(hostrcb->ioa_cfg,
2119                                                 fabric->res_path,
2120                                                 buffer, sizeof(buffer)));
2121                         return;
2122                 }
2123         }
2124
2125         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2126                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2127                                     buffer, sizeof(buffer)));
2128 }
2129
2130 static const struct {
2131         u8 type;
2132         char *desc;
2133 } path_type_desc[] = {
2134         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2135         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2136         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2137         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2138 };
2139
2140 static const struct {
2141         u8 status;
2142         char *desc;
2143 } path_status_desc[] = {
2144         { IPR_PATH_CFG_NO_PROB, "Functional" },
2145         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2146         { IPR_PATH_CFG_FAILED, "Failed" },
2147         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2148         { IPR_PATH_NOT_DETECTED, "Missing" },
2149         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2150 };
2151
2152 static const char *link_rate[] = {
2153         "unknown",
2154         "disabled",
2155         "phy reset problem",
2156         "spinup hold",
2157         "port selector",
2158         "unknown",
2159         "unknown",
2160         "unknown",
2161         "1.5Gbps",
2162         "3.0Gbps",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown",
2167         "unknown",
2168         "unknown"
2169 };
2170
2171 /**
2172  * ipr_log_path_elem - Log a fabric path element.
2173  * @hostrcb:    hostrcb struct
2174  * @cfg:                fabric path element struct
2175  *
2176  * Return value:
2177  *      none
2178  **/
2179 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2180                               struct ipr_hostrcb_config_element *cfg)
2181 {
2182         int i, j;
2183         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2184         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2185
2186         if (type == IPR_PATH_CFG_NOT_EXIST)
2187                 return;
2188
2189         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2190                 if (path_type_desc[i].type != type)
2191                         continue;
2192
2193                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2194                         if (path_status_desc[j].status != status)
2195                                 continue;
2196
2197                         if (type == IPR_PATH_CFG_IOA_PORT) {
2198                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2199                                              path_status_desc[j].desc, path_type_desc[i].desc,
2200                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2201                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2202                         } else {
2203                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2204                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2205                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2206                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2207                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2208                                 } else if (cfg->cascaded_expander == 0xff) {
2209                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2210                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2211                                                      path_type_desc[i].desc, cfg->phy,
2212                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2213                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2214                                 } else if (cfg->phy == 0xff) {
2215                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2216                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2217                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2218                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2220                                 } else {
2221                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2222                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2223                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2224                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2225                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2226                                 }
2227                         }
2228                         return;
2229                 }
2230         }
2231
2232         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2233                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2234                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2235                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2236 }
2237
2238 /**
2239  * ipr_log64_path_elem - Log a fabric path element.
2240  * @hostrcb:    hostrcb struct
2241  * @cfg:                fabric path element struct
2242  *
2243  * Return value:
2244  *      none
2245  **/
2246 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2247                                 struct ipr_hostrcb64_config_element *cfg)
2248 {
2249         int i, j;
2250         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2251         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2252         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2253         char buffer[IPR_MAX_RES_PATH_LENGTH];
2254
2255         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2256                 return;
2257
2258         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2259                 if (path_type_desc[i].type != type)
2260                         continue;
2261
2262                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2263                         if (path_status_desc[j].status != status)
2264                                 continue;
2265
2266                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2267                                      path_status_desc[j].desc, path_type_desc[i].desc,
2268                                      ipr_format_res_path(hostrcb->ioa_cfg,
2269                                         cfg->res_path, buffer, sizeof(buffer)),
2270                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2271                                         be32_to_cpu(cfg->wwid[0]),
2272                                         be32_to_cpu(cfg->wwid[1]));
2273                         return;
2274                 }
2275         }
2276         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2277                      "WWN=%08X%08X\n", cfg->type_status,
2278                      ipr_format_res_path(hostrcb->ioa_cfg,
2279                         cfg->res_path, buffer, sizeof(buffer)),
2280                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2281                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2282 }
2283
2284 /**
2285  * ipr_log_fabric_error - Log a fabric error.
2286  * @ioa_cfg:    ioa config struct
2287  * @hostrcb:    hostrcb struct
2288  *
2289  * Return value:
2290  *      none
2291  **/
2292 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2293                                  struct ipr_hostrcb *hostrcb)
2294 {
2295         struct ipr_hostrcb_type_20_error *error;
2296         struct ipr_hostrcb_fabric_desc *fabric;
2297         struct ipr_hostrcb_config_element *cfg;
2298         int i, add_len;
2299
2300         error = &hostrcb->hcam.u.error.u.type_20_error;
2301         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2302         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2303
2304         add_len = be32_to_cpu(hostrcb->hcam.length) -
2305                 (offsetof(struct ipr_hostrcb_error, u) +
2306                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2307
2308         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2309                 ipr_log_fabric_path(hostrcb, fabric);
2310                 for_each_fabric_cfg(fabric, cfg)
2311                         ipr_log_path_elem(hostrcb, cfg);
2312
2313                 add_len -= be16_to_cpu(fabric->length);
2314                 fabric = (struct ipr_hostrcb_fabric_desc *)
2315                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2316         }
2317
2318         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2319 }
2320
2321 /**
2322  * ipr_log_sis64_array_error - Log a sis64 array error.
2323  * @ioa_cfg:    ioa config struct
2324  * @hostrcb:    hostrcb struct
2325  *
2326  * Return value:
2327  *      none
2328  **/
2329 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2330                                       struct ipr_hostrcb *hostrcb)
2331 {
2332         int i, num_entries;
2333         struct ipr_hostrcb_type_24_error *error;
2334         struct ipr_hostrcb64_array_data_entry *array_entry;
2335         char buffer[IPR_MAX_RES_PATH_LENGTH];
2336         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2337
2338         error = &hostrcb->hcam.u.error64.u.type_24_error;
2339
2340         ipr_err_separator;
2341
2342         ipr_err("RAID %s Array Configuration: %s\n",
2343                 error->protection_level,
2344                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2345                         buffer, sizeof(buffer)));
2346
2347         ipr_err_separator;
2348
2349         array_entry = error->array_member;
2350         num_entries = min_t(u32, error->num_entries,
2351                             ARRAY_SIZE(error->array_member));
2352
2353         for (i = 0; i < num_entries; i++, array_entry++) {
2354
2355                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2356                         continue;
2357
2358                 if (error->exposed_mode_adn == i)
2359                         ipr_err("Exposed Array Member %d:\n", i);
2360                 else
2361                         ipr_err("Array Member %d:\n", i);
2362
2363                 ipr_err("Array Member %d:\n", i);
2364                 ipr_log_ext_vpd(&array_entry->vpd);
2365                 ipr_err("Current Location: %s\n",
2366                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2367                                 buffer, sizeof(buffer)));
2368                 ipr_err("Expected Location: %s\n",
2369                          ipr_format_res_path(ioa_cfg,
2370                                 array_entry->expected_res_path,
2371                                 buffer, sizeof(buffer)));
2372
2373                 ipr_err_separator;
2374         }
2375 }
2376
2377 /**
2378  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2379  * @ioa_cfg:    ioa config struct
2380  * @hostrcb:    hostrcb struct
2381  *
2382  * Return value:
2383  *      none
2384  **/
2385 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2386                                        struct ipr_hostrcb *hostrcb)
2387 {
2388         struct ipr_hostrcb_type_30_error *error;
2389         struct ipr_hostrcb64_fabric_desc *fabric;
2390         struct ipr_hostrcb64_config_element *cfg;
2391         int i, add_len;
2392
2393         error = &hostrcb->hcam.u.error64.u.type_30_error;
2394
2395         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2396         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2397
2398         add_len = be32_to_cpu(hostrcb->hcam.length) -
2399                 (offsetof(struct ipr_hostrcb64_error, u) +
2400                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2401
2402         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2403                 ipr_log64_fabric_path(hostrcb, fabric);
2404                 for_each_fabric_cfg(fabric, cfg)
2405                         ipr_log64_path_elem(hostrcb, cfg);
2406
2407                 add_len -= be16_to_cpu(fabric->length);
2408                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2409                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2410         }
2411
2412         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2413 }
2414
2415 /**
2416  * ipr_log_generic_error - Log an adapter error.
2417  * @ioa_cfg:    ioa config struct
2418  * @hostrcb:    hostrcb struct
2419  *
2420  * Return value:
2421  *      none
2422  **/
2423 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2424                                   struct ipr_hostrcb *hostrcb)
2425 {
2426         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2427                          be32_to_cpu(hostrcb->hcam.length));
2428 }
2429
2430 /**
2431  * ipr_log_sis64_device_error - Log a cache error.
2432  * @ioa_cfg:    ioa config struct
2433  * @hostrcb:    hostrcb struct
2434  *
2435  * Return value:
2436  *      none
2437  **/
2438 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2439                                          struct ipr_hostrcb *hostrcb)
2440 {
2441         struct ipr_hostrcb_type_21_error *error;
2442         char buffer[IPR_MAX_RES_PATH_LENGTH];
2443
2444         error = &hostrcb->hcam.u.error64.u.type_21_error;
2445
2446         ipr_err("-----Failing Device Information-----\n");
2447         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2448                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2449                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2450         ipr_err("Device Resource Path: %s\n",
2451                 __ipr_format_res_path(error->res_path,
2452                                       buffer, sizeof(buffer)));
2453         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2454         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2455         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2456         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2457         ipr_err("SCSI Sense Data:\n");
2458         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2459         ipr_err("SCSI Command Descriptor Block: \n");
2460         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2461
2462         ipr_err("Additional IOA Data:\n");
2463         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2464 }
2465
2466 /**
2467  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2468  * @ioasc:      IOASC
2469  *
2470  * This function will return the index of into the ipr_error_table
2471  * for the specified IOASC. If the IOASC is not in the table,
2472  * 0 will be returned, which points to the entry used for unknown errors.
2473  *
2474  * Return value:
2475  *      index into the ipr_error_table
2476  **/
2477 static u32 ipr_get_error(u32 ioasc)
2478 {
2479         int i;
2480
2481         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2482                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2483                         return i;
2484
2485         return 0;
2486 }
2487
2488 /**
2489  * ipr_handle_log_data - Log an adapter error.
2490  * @ioa_cfg:    ioa config struct
2491  * @hostrcb:    hostrcb struct
2492  *
2493  * This function logs an adapter error to the system.
2494  *
2495  * Return value:
2496  *      none
2497  **/
2498 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2499                                 struct ipr_hostrcb *hostrcb)
2500 {
2501         u32 ioasc;
2502         int error_index;
2503         struct ipr_hostrcb_type_21_error *error;
2504
2505         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2506                 return;
2507
2508         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2509                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2510
2511         if (ioa_cfg->sis64)
2512                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2513         else
2514                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2515
2516         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2517             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2518                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2519                 scsi_report_bus_reset(ioa_cfg->host,
2520                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2521         }
2522
2523         error_index = ipr_get_error(ioasc);
2524
2525         if (!ipr_error_table[error_index].log_hcam)
2526                 return;
2527
2528         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2529             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2530                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2531
2532                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2533                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2534                                 return;
2535         }
2536
2537         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2538
2539         /* Set indication we have logged an error */
2540         ioa_cfg->errors_logged++;
2541
2542         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2543                 return;
2544         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2545                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2546
2547         switch (hostrcb->hcam.overlay_id) {
2548         case IPR_HOST_RCB_OVERLAY_ID_2:
2549                 ipr_log_cache_error(ioa_cfg, hostrcb);
2550                 break;
2551         case IPR_HOST_RCB_OVERLAY_ID_3:
2552                 ipr_log_config_error(ioa_cfg, hostrcb);
2553                 break;
2554         case IPR_HOST_RCB_OVERLAY_ID_4:
2555         case IPR_HOST_RCB_OVERLAY_ID_6:
2556                 ipr_log_array_error(ioa_cfg, hostrcb);
2557                 break;
2558         case IPR_HOST_RCB_OVERLAY_ID_7:
2559                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2560                 break;
2561         case IPR_HOST_RCB_OVERLAY_ID_12:
2562                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2563                 break;
2564         case IPR_HOST_RCB_OVERLAY_ID_13:
2565                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2566                 break;
2567         case IPR_HOST_RCB_OVERLAY_ID_14:
2568         case IPR_HOST_RCB_OVERLAY_ID_16:
2569                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2570                 break;
2571         case IPR_HOST_RCB_OVERLAY_ID_17:
2572                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2573                 break;
2574         case IPR_HOST_RCB_OVERLAY_ID_20:
2575                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2576                 break;
2577         case IPR_HOST_RCB_OVERLAY_ID_21:
2578                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2579                 break;
2580         case IPR_HOST_RCB_OVERLAY_ID_23:
2581                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2582                 break;
2583         case IPR_HOST_RCB_OVERLAY_ID_24:
2584         case IPR_HOST_RCB_OVERLAY_ID_26:
2585                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2586                 break;
2587         case IPR_HOST_RCB_OVERLAY_ID_30:
2588                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2589                 break;
2590         case IPR_HOST_RCB_OVERLAY_ID_1:
2591         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2592         default:
2593                 ipr_log_generic_error(ioa_cfg, hostrcb);
2594                 break;
2595         }
2596 }
2597
2598 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2599 {
2600         struct ipr_hostrcb *hostrcb;
2601
2602         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2603                                         struct ipr_hostrcb, queue);
2604
2605         if (unlikely(!hostrcb)) {
2606                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2607                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2608                                                 struct ipr_hostrcb, queue);
2609         }
2610
2611         list_del_init(&hostrcb->queue);
2612         return hostrcb;
2613 }
2614
2615 /**
2616  * ipr_process_error - Op done function for an adapter error log.
2617  * @ipr_cmd:    ipr command struct
2618  *
2619  * This function is the op done function for an error log host
2620  * controlled async from the adapter. It will log the error and
2621  * send the HCAM back to the adapter.
2622  *
2623  * Return value:
2624  *      none
2625  **/
2626 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2627 {
2628         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2629         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2630         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2631         u32 fd_ioasc;
2632
2633         if (ioa_cfg->sis64)
2634                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2635         else
2636                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2637
2638         list_del_init(&hostrcb->queue);
2639         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2640
2641         if (!ioasc) {
2642                 ipr_handle_log_data(ioa_cfg, hostrcb);
2643                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2644                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2645         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2646                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2647                 dev_err(&ioa_cfg->pdev->dev,
2648                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2649         }
2650
2651         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2652         schedule_work(&ioa_cfg->work_q);
2653         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2654
2655         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2656 }
2657
2658 /**
2659  * ipr_timeout -  An internally generated op has timed out.
2660  * @ipr_cmd:    ipr command struct
2661  *
2662  * This function blocks host requests and initiates an
2663  * adapter reset.
2664  *
2665  * Return value:
2666  *      none
2667  **/
2668 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2669 {
2670         unsigned long lock_flags = 0;
2671         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2672
2673         ENTER;
2674         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2675
2676         ioa_cfg->errors_logged++;
2677         dev_err(&ioa_cfg->pdev->dev,
2678                 "Adapter being reset due to command timeout.\n");
2679
2680         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2681                 ioa_cfg->sdt_state = GET_DUMP;
2682
2683         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2684                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2685
2686         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2687         LEAVE;
2688 }
2689
2690 /**
2691  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2692  * @ipr_cmd:    ipr command struct
2693  *
2694  * This function blocks host requests and initiates an
2695  * adapter reset.
2696  *
2697  * Return value:
2698  *      none
2699  **/
2700 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2701 {
2702         unsigned long lock_flags = 0;
2703         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2704
2705         ENTER;
2706         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2707
2708         ioa_cfg->errors_logged++;
2709         dev_err(&ioa_cfg->pdev->dev,
2710                 "Adapter timed out transitioning to operational.\n");
2711
2712         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2713                 ioa_cfg->sdt_state = GET_DUMP;
2714
2715         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2716                 if (ipr_fastfail)
2717                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2718                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2719         }
2720
2721         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2722         LEAVE;
2723 }
2724
2725 /**
2726  * ipr_find_ses_entry - Find matching SES in SES table
2727  * @res:        resource entry struct of SES
2728  *
2729  * Return value:
2730  *      pointer to SES table entry / NULL on failure
2731  **/
2732 static const struct ipr_ses_table_entry *
2733 ipr_find_ses_entry(struct ipr_resource_entry *res)
2734 {
2735         int i, j, matches;
2736         struct ipr_std_inq_vpids *vpids;
2737         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2738
2739         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2740                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2741                         if (ste->compare_product_id_byte[j] == 'X') {
2742                                 vpids = &res->std_inq_data.vpids;
2743                                 if (vpids->product_id[j] == ste->product_id[j])
2744                                         matches++;
2745                                 else
2746                                         break;
2747                         } else
2748                                 matches++;
2749                 }
2750
2751                 if (matches == IPR_PROD_ID_LEN)
2752                         return ste;
2753         }
2754
2755         return NULL;
2756 }
2757
2758 /**
2759  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2760  * @ioa_cfg:    ioa config struct
2761  * @bus:                SCSI bus
2762  * @bus_width:  bus width
2763  *
2764  * Return value:
2765  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2766  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2767  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2768  *      max 160MHz = max 320MB/sec).
2769  **/
2770 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2771 {
2772         struct ipr_resource_entry *res;
2773         const struct ipr_ses_table_entry *ste;
2774         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2775
2776         /* Loop through each config table entry in the config table buffer */
2777         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2778                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2779                         continue;
2780
2781                 if (bus != res->bus)
2782                         continue;
2783
2784                 if (!(ste = ipr_find_ses_entry(res)))
2785                         continue;
2786
2787                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2788         }
2789
2790         return max_xfer_rate;
2791 }
2792
2793 /**
2794  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2795  * @ioa_cfg:            ioa config struct
2796  * @max_delay:          max delay in micro-seconds to wait
2797  *
2798  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2799  *
2800  * Return value:
2801  *      0 on success / other on failure
2802  **/
2803 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2804 {
2805         volatile u32 pcii_reg;
2806         int delay = 1;
2807
2808         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2809         while (delay < max_delay) {
2810                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2811
2812                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2813                         return 0;
2814
2815                 /* udelay cannot be used if delay is more than a few milliseconds */
2816                 if ((delay / 1000) > MAX_UDELAY_MS)
2817                         mdelay(delay / 1000);
2818                 else
2819                         udelay(delay);
2820
2821                 delay += delay;
2822         }
2823         return -EIO;
2824 }
2825
2826 /**
2827  * ipr_get_sis64_dump_data_section - Dump IOA memory
2828  * @ioa_cfg:                    ioa config struct
2829  * @start_addr:                 adapter address to dump
2830  * @dest:                       destination kernel buffer
2831  * @length_in_words:            length to dump in 4 byte words
2832  *
2833  * Return value:
2834  *      0 on success
2835  **/
2836 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2837                                            u32 start_addr,
2838                                            __be32 *dest, u32 length_in_words)
2839 {
2840         int i;
2841
2842         for (i = 0; i < length_in_words; i++) {
2843                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2844                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2845                 dest++;
2846         }
2847
2848         return 0;
2849 }
2850
2851 /**
2852  * ipr_get_ldump_data_section - Dump IOA memory
2853  * @ioa_cfg:                    ioa config struct
2854  * @start_addr:                 adapter address to dump
2855  * @dest:                               destination kernel buffer
2856  * @length_in_words:    length to dump in 4 byte words
2857  *
2858  * Return value:
2859  *      0 on success / -EIO on failure
2860  **/
2861 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2862                                       u32 start_addr,
2863                                       __be32 *dest, u32 length_in_words)
2864 {
2865         volatile u32 temp_pcii_reg;
2866         int i, delay = 0;
2867
2868         if (ioa_cfg->sis64)
2869                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2870                                                        dest, length_in_words);
2871
2872         /* Write IOA interrupt reg starting LDUMP state  */
2873         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2874                ioa_cfg->regs.set_uproc_interrupt_reg32);
2875
2876         /* Wait for IO debug acknowledge */
2877         if (ipr_wait_iodbg_ack(ioa_cfg,
2878                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2879                 dev_err(&ioa_cfg->pdev->dev,
2880                         "IOA dump long data transfer timeout\n");
2881                 return -EIO;
2882         }
2883
2884         /* Signal LDUMP interlocked - clear IO debug ack */
2885         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2886                ioa_cfg->regs.clr_interrupt_reg);
2887
2888         /* Write Mailbox with starting address */
2889         writel(start_addr, ioa_cfg->ioa_mailbox);
2890
2891         /* Signal address valid - clear IOA Reset alert */
2892         writel(IPR_UPROCI_RESET_ALERT,
2893                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2894
2895         for (i = 0; i < length_in_words; i++) {
2896                 /* Wait for IO debug acknowledge */
2897                 if (ipr_wait_iodbg_ack(ioa_cfg,
2898                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2899                         dev_err(&ioa_cfg->pdev->dev,
2900                                 "IOA dump short data transfer timeout\n");
2901                         return -EIO;
2902                 }
2903
2904                 /* Read data from mailbox and increment destination pointer */
2905                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2906                 dest++;
2907
2908                 /* For all but the last word of data, signal data received */
2909                 if (i < (length_in_words - 1)) {
2910                         /* Signal dump data received - Clear IO debug Ack */
2911                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2912                                ioa_cfg->regs.clr_interrupt_reg);
2913                 }
2914         }
2915
2916         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2917         writel(IPR_UPROCI_RESET_ALERT,
2918                ioa_cfg->regs.set_uproc_interrupt_reg32);
2919
2920         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2921                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2922
2923         /* Signal dump data received - Clear IO debug Ack */
2924         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2925                ioa_cfg->regs.clr_interrupt_reg);
2926
2927         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2928         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2929                 temp_pcii_reg =
2930                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2931
2932                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2933                         return 0;
2934
2935                 udelay(10);
2936                 delay += 10;
2937         }
2938
2939         return 0;
2940 }
2941
2942 #ifdef CONFIG_SCSI_IPR_DUMP
2943 /**
2944  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2945  * @ioa_cfg:            ioa config struct
2946  * @pci_address:        adapter address
2947  * @length:                     length of data to copy
2948  *
2949  * Copy data from PCI adapter to kernel buffer.
2950  * Note: length MUST be a 4 byte multiple
2951  * Return value:
2952  *      0 on success / other on failure
2953  **/
2954 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2955                         unsigned long pci_address, u32 length)
2956 {
2957         int bytes_copied = 0;
2958         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2959         __be32 *page;
2960         unsigned long lock_flags = 0;
2961         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2962
2963         if (ioa_cfg->sis64)
2964                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2965         else
2966                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2967
2968         while (bytes_copied < length &&
2969                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2970                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2971                     ioa_dump->page_offset == 0) {
2972                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2973
2974                         if (!page) {
2975                                 ipr_trace;
2976                                 return bytes_copied;
2977                         }
2978
2979                         ioa_dump->page_offset = 0;
2980                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2981                         ioa_dump->next_page_index++;
2982                 } else
2983                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2984
2985                 rem_len = length - bytes_copied;
2986                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2987                 cur_len = min(rem_len, rem_page_len);
2988
2989                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2990                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2991                         rc = -EIO;
2992                 } else {
2993                         rc = ipr_get_ldump_data_section(ioa_cfg,
2994                                                         pci_address + bytes_copied,
2995                                                         &page[ioa_dump->page_offset / 4],
2996                                                         (cur_len / sizeof(u32)));
2997                 }
2998                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2999
3000                 if (!rc) {
3001                         ioa_dump->page_offset += cur_len;
3002                         bytes_copied += cur_len;
3003                 } else {
3004                         ipr_trace;
3005                         break;
3006                 }
3007                 schedule();
3008         }
3009
3010         return bytes_copied;
3011 }
3012
3013 /**
3014  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3015  * @hdr:        dump entry header struct
3016  *
3017  * Return value:
3018  *      nothing
3019  **/
3020 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3021 {
3022         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3023         hdr->num_elems = 1;
3024         hdr->offset = sizeof(*hdr);
3025         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3026 }
3027
3028 /**
3029  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3030  * @ioa_cfg:    ioa config struct
3031  * @driver_dump:        driver dump struct
3032  *
3033  * Return value:
3034  *      nothing
3035  **/
3036 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3037                                    struct ipr_driver_dump *driver_dump)
3038 {
3039         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3040
3041         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3042         driver_dump->ioa_type_entry.hdr.len =
3043                 sizeof(struct ipr_dump_ioa_type_entry) -
3044                 sizeof(struct ipr_dump_entry_header);
3045         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3046         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3047         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3048         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3049                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3050                 ucode_vpd->minor_release[1];
3051         driver_dump->hdr.num_entries++;
3052 }
3053
3054 /**
3055  * ipr_dump_version_data - Fill in the driver version in the dump.
3056  * @ioa_cfg:    ioa config struct
3057  * @driver_dump:        driver dump struct
3058  *
3059  * Return value:
3060  *      nothing
3061  **/
3062 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3063                                   struct ipr_driver_dump *driver_dump)
3064 {
3065         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3066         driver_dump->version_entry.hdr.len =
3067                 sizeof(struct ipr_dump_version_entry) -
3068                 sizeof(struct ipr_dump_entry_header);
3069         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3070         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3071         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3072         driver_dump->hdr.num_entries++;
3073 }
3074
3075 /**
3076  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3077  * @ioa_cfg:    ioa config struct
3078  * @driver_dump:        driver dump struct
3079  *
3080  * Return value:
3081  *      nothing
3082  **/
3083 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3084                                    struct ipr_driver_dump *driver_dump)
3085 {
3086         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3087         driver_dump->trace_entry.hdr.len =
3088                 sizeof(struct ipr_dump_trace_entry) -
3089                 sizeof(struct ipr_dump_entry_header);
3090         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3091         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3092         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3093         driver_dump->hdr.num_entries++;
3094 }
3095
3096 /**
3097  * ipr_dump_location_data - Fill in the IOA location in the dump.
3098  * @ioa_cfg:    ioa config struct
3099  * @driver_dump:        driver dump struct
3100  *
3101  * Return value:
3102  *      nothing
3103  **/
3104 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3105                                    struct ipr_driver_dump *driver_dump)
3106 {
3107         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3108         driver_dump->location_entry.hdr.len =
3109                 sizeof(struct ipr_dump_location_entry) -
3110                 sizeof(struct ipr_dump_entry_header);
3111         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3112         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3113         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3114         driver_dump->hdr.num_entries++;
3115 }
3116
3117 /**
3118  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3119  * @ioa_cfg:    ioa config struct
3120  * @dump:               dump struct
3121  *
3122  * Return value:
3123  *      nothing
3124  **/
3125 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3126 {
3127         unsigned long start_addr, sdt_word;
3128         unsigned long lock_flags = 0;
3129         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3130         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3131         u32 num_entries, max_num_entries, start_off, end_off;
3132         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3133         struct ipr_sdt *sdt;
3134         int valid = 1;
3135         int i;
3136
3137         ENTER;
3138
3139         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3140
3141         if (ioa_cfg->sdt_state != READ_DUMP) {
3142                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3143                 return;
3144         }
3145
3146         if (ioa_cfg->sis64) {
3147                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3148                 ssleep(IPR_DUMP_DELAY_SECONDS);
3149                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3150         }
3151
3152         start_addr = readl(ioa_cfg->ioa_mailbox);
3153
3154         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3155                 dev_err(&ioa_cfg->pdev->dev,
3156                         "Invalid dump table format: %lx\n", start_addr);
3157                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158                 return;
3159         }
3160
3161         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3162
3163         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3164
3165         /* Initialize the overall dump header */
3166         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3167         driver_dump->hdr.num_entries = 1;
3168         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3169         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3170         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3171         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3172
3173         ipr_dump_version_data(ioa_cfg, driver_dump);
3174         ipr_dump_location_data(ioa_cfg, driver_dump);
3175         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3176         ipr_dump_trace_data(ioa_cfg, driver_dump);
3177
3178         /* Update dump_header */
3179         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3180
3181         /* IOA Dump entry */
3182         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3183         ioa_dump->hdr.len = 0;
3184         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3185         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3186
3187         /* First entries in sdt are actually a list of dump addresses and
3188          lengths to gather the real dump data.  sdt represents the pointer
3189          to the ioa generated dump table.  Dump data will be extracted based
3190          on entries in this table */
3191         sdt = &ioa_dump->sdt;
3192
3193         if (ioa_cfg->sis64) {
3194                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3195                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3196         } else {
3197                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3198                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3199         }
3200
3201         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3202                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3203         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3204                                         bytes_to_copy / sizeof(__be32));
3205
3206         /* Smart Dump table is ready to use and the first entry is valid */
3207         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3208             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3209                 dev_err(&ioa_cfg->pdev->dev,
3210                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3211                         rc, be32_to_cpu(sdt->hdr.state));
3212                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3213                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3214                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3215                 return;
3216         }
3217
3218         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3219
3220         if (num_entries > max_num_entries)
3221                 num_entries = max_num_entries;
3222
3223         /* Update dump length to the actual data to be copied */
3224         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3225         if (ioa_cfg->sis64)
3226                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3227         else
3228                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3229
3230         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3231
3232         for (i = 0; i < num_entries; i++) {
3233                 if (ioa_dump->hdr.len > max_dump_size) {
3234                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3235                         break;
3236                 }
3237
3238                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3239                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3240                         if (ioa_cfg->sis64)
3241                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3242                         else {
3243                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3244                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3245
3246                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3247                                         bytes_to_copy = end_off - start_off;
3248                                 else
3249                                         valid = 0;
3250                         }
3251                         if (valid) {
3252                                 if (bytes_to_copy > max_dump_size) {
3253                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3254                                         continue;
3255                                 }
3256
3257                                 /* Copy data from adapter to driver buffers */
3258                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3259                                                             bytes_to_copy);
3260
3261                                 ioa_dump->hdr.len += bytes_copied;
3262
3263                                 if (bytes_copied != bytes_to_copy) {
3264                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3265                                         break;
3266                                 }
3267                         }
3268                 }
3269         }
3270
3271         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3272
3273         /* Update dump_header */
3274         driver_dump->hdr.len += ioa_dump->hdr.len;
3275         wmb();
3276         ioa_cfg->sdt_state = DUMP_OBTAINED;
3277         LEAVE;
3278 }
3279
3280 #else
3281 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3282 #endif
3283
3284 /**
3285  * ipr_release_dump - Free adapter dump memory
3286  * @kref:       kref struct
3287  *
3288  * Return value:
3289  *      nothing
3290  **/
3291 static void ipr_release_dump(struct kref *kref)
3292 {
3293         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3294         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3295         unsigned long lock_flags = 0;
3296         int i;
3297
3298         ENTER;
3299         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3300         ioa_cfg->dump = NULL;
3301         ioa_cfg->sdt_state = INACTIVE;
3302         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3303
3304         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3305                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3306
3307         vfree(dump->ioa_dump.ioa_data);
3308         kfree(dump);
3309         LEAVE;
3310 }
3311
3312 static void ipr_add_remove_thread(struct work_struct *work)
3313 {
3314         unsigned long lock_flags;
3315         struct ipr_resource_entry *res;
3316         struct scsi_device *sdev;
3317         struct ipr_ioa_cfg *ioa_cfg =
3318                 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3319         u8 bus, target, lun;
3320         int did_work;
3321
3322         ENTER;
3323         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3324
3325 restart:
3326         do {
3327                 did_work = 0;
3328                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3329                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3330                         return;
3331                 }
3332
3333                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3334                         if (res->del_from_ml && res->sdev) {
3335                                 did_work = 1;
3336                                 sdev = res->sdev;
3337                                 if (!scsi_device_get(sdev)) {
3338                                         if (!res->add_to_ml)
3339                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3340                                         else
3341                                                 res->del_from_ml = 0;
3342                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3343                                         scsi_remove_device(sdev);
3344                                         scsi_device_put(sdev);
3345                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3346                                 }
3347                                 break;
3348                         }
3349                 }
3350         } while (did_work);
3351
3352         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3353                 if (res->add_to_ml) {
3354                         bus = res->bus;
3355                         target = res->target;
3356                         lun = res->lun;
3357                         res->add_to_ml = 0;
3358                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3359                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3360                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3361                         goto restart;
3362                 }
3363         }
3364
3365         ioa_cfg->scan_done = 1;
3366         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3367         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3368         LEAVE;
3369 }
3370
3371 /**
3372  * ipr_worker_thread - Worker thread
3373  * @work:               ioa config struct
3374  *
3375  * Called at task level from a work thread. This function takes care
3376  * of adding and removing device from the mid-layer as configuration
3377  * changes are detected by the adapter.
3378  *
3379  * Return value:
3380  *      nothing
3381  **/
3382 static void ipr_worker_thread(struct work_struct *work)
3383 {
3384         unsigned long lock_flags;
3385         struct ipr_dump *dump;
3386         struct ipr_ioa_cfg *ioa_cfg =
3387                 container_of(work, struct ipr_ioa_cfg, work_q);
3388
3389         ENTER;
3390         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3391
3392         if (ioa_cfg->sdt_state == READ_DUMP) {
3393                 dump = ioa_cfg->dump;
3394                 if (!dump) {
3395                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3396                         return;
3397                 }
3398                 kref_get(&dump->kref);
3399                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3400                 ipr_get_ioa_dump(ioa_cfg, dump);
3401                 kref_put(&dump->kref, ipr_release_dump);
3402
3403                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3405                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3406                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3407                 return;
3408         }
3409
3410         if (ioa_cfg->scsi_unblock) {
3411                 ioa_cfg->scsi_unblock = 0;
3412                 ioa_cfg->scsi_blocked = 0;
3413                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3414                 scsi_unblock_requests(ioa_cfg->host);
3415                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3416                 if (ioa_cfg->scsi_blocked)
3417                         scsi_block_requests(ioa_cfg->host);
3418         }
3419
3420         if (!ioa_cfg->scan_enabled) {
3421                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3422                 return;
3423         }
3424
3425         schedule_work(&ioa_cfg->scsi_add_work_q);
3426
3427         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3428         LEAVE;
3429 }
3430
3431 #ifdef CONFIG_SCSI_IPR_TRACE
3432 /**
3433  * ipr_read_trace - Dump the adapter trace
3434  * @filp:               open sysfs file
3435  * @kobj:               kobject struct
3436  * @bin_attr:           bin_attribute struct
3437  * @buf:                buffer
3438  * @off:                offset
3439  * @count:              buffer size
3440  *
3441  * Return value:
3442  *      number of bytes printed to buffer
3443  **/
3444 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3445                               struct bin_attribute *bin_attr,
3446                               char *buf, loff_t off, size_t count)
3447 {
3448         struct device *dev = container_of(kobj, struct device, kobj);
3449         struct Scsi_Host *shost = class_to_shost(dev);
3450         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3451         unsigned long lock_flags = 0;
3452         ssize_t ret;
3453
3454         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3455         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3456                                 IPR_TRACE_SIZE);
3457         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3458
3459         return ret;
3460 }
3461
3462 static struct bin_attribute ipr_trace_attr = {
3463         .attr = {
3464                 .name = "trace",
3465                 .mode = S_IRUGO,
3466         },
3467         .size = 0,
3468         .read = ipr_read_trace,
3469 };
3470 #endif
3471
3472 /**
3473  * ipr_show_fw_version - Show the firmware version
3474  * @dev:        class device struct
3475  * @buf:        buffer
3476  *
3477  * Return value:
3478  *      number of bytes printed to buffer
3479  **/
3480 static ssize_t ipr_show_fw_version(struct device *dev,
3481                                    struct device_attribute *attr, char *buf)
3482 {
3483         struct Scsi_Host *shost = class_to_shost(dev);
3484         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3485         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3486         unsigned long lock_flags = 0;
3487         int len;
3488
3489         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3490         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3491                        ucode_vpd->major_release, ucode_vpd->card_type,
3492                        ucode_vpd->minor_release[0],
3493                        ucode_vpd->minor_release[1]);
3494         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3495         return len;
3496 }
3497
3498 static struct device_attribute ipr_fw_version_attr = {
3499         .attr = {
3500                 .name =         "fw_version",
3501                 .mode =         S_IRUGO,
3502         },
3503         .show = ipr_show_fw_version,
3504 };
3505
3506 /**
3507  * ipr_show_log_level - Show the adapter's error logging level
3508  * @dev:        class device struct
3509  * @buf:        buffer
3510  *
3511  * Return value:
3512  *      number of bytes printed to buffer
3513  **/
3514 static ssize_t ipr_show_log_level(struct device *dev,
3515                                    struct device_attribute *attr, char *buf)
3516 {
3517         struct Scsi_Host *shost = class_to_shost(dev);
3518         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3519         unsigned long lock_flags = 0;
3520         int len;
3521
3522         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3523         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3524         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3525         return len;
3526 }
3527
3528 /**
3529  * ipr_store_log_level - Change the adapter's error logging level
3530  * @dev:        class device struct
3531  * @buf:        buffer
3532  *
3533  * Return value:
3534  *      number of bytes printed to buffer
3535  **/
3536 static ssize_t ipr_store_log_level(struct device *dev,
3537                                    struct device_attribute *attr,
3538                                    const char *buf, size_t count)
3539 {
3540         struct Scsi_Host *shost = class_to_shost(dev);
3541         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3542         unsigned long lock_flags = 0;
3543
3544         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3545         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3546         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3547         return strlen(buf);
3548 }
3549
3550 static struct device_attribute ipr_log_level_attr = {
3551         .attr = {
3552                 .name =         "log_level",
3553                 .mode =         S_IRUGO | S_IWUSR,
3554         },
3555         .show = ipr_show_log_level,
3556         .store = ipr_store_log_level
3557 };
3558
3559 /**
3560  * ipr_store_diagnostics - IOA Diagnostics interface
3561  * @dev:        device struct
3562  * @buf:        buffer
3563  * @count:      buffer size
3564  *
3565  * This function will reset the adapter and wait a reasonable
3566  * amount of time for any errors that the adapter might log.
3567  *
3568  * Return value:
3569  *      count on success / other on failure
3570  **/
3571 static ssize_t ipr_store_diagnostics(struct device *dev,
3572                                      struct device_attribute *attr,
3573                                      const char *buf, size_t count)
3574 {
3575         struct Scsi_Host *shost = class_to_shost(dev);
3576         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3577         unsigned long lock_flags = 0;
3578         int rc = count;
3579
3580         if (!capable(CAP_SYS_ADMIN))
3581                 return -EACCES;
3582
3583         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3584         while (ioa_cfg->in_reset_reload) {
3585                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3586                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3587                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3588         }
3589
3590         ioa_cfg->errors_logged = 0;
3591         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3592
3593         if (ioa_cfg->in_reset_reload) {
3594                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3595                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3596
3597                 /* Wait for a second for any errors to be logged */
3598                 msleep(1000);
3599         } else {
3600                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3601                 return -EIO;
3602         }
3603
3604         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3605         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3606                 rc = -EIO;
3607         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3608
3609         return rc;
3610 }
3611
3612 static struct device_attribute ipr_diagnostics_attr = {
3613         .attr = {
3614                 .name =         "run_diagnostics",
3615                 .mode =         S_IWUSR,
3616         },
3617         .store = ipr_store_diagnostics
3618 };
3619
3620 /**
3621  * ipr_show_adapter_state - Show the adapter's state
3622  * @class_dev:  device struct
3623  * @buf:        buffer
3624  *
3625  * Return value:
3626  *      number of bytes printed to buffer
3627  **/
3628 static ssize_t ipr_show_adapter_state(struct device *dev,
3629                                       struct device_attribute *attr, char *buf)
3630 {
3631         struct Scsi_Host *shost = class_to_shost(dev);
3632         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3633         unsigned long lock_flags = 0;
3634         int len;
3635
3636         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3637         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3638                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3639         else
3640                 len = snprintf(buf, PAGE_SIZE, "online\n");
3641         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3642         return len;
3643 }
3644
3645 /**
3646  * ipr_store_adapter_state - Change adapter state
3647  * @dev:        device struct
3648  * @buf:        buffer
3649  * @count:      buffer size
3650  *
3651  * This function will change the adapter's state.
3652  *
3653  * Return value:
3654  *      count on success / other on failure
3655  **/
3656 static ssize_t ipr_store_adapter_state(struct device *dev,
3657                                        struct device_attribute *attr,
3658                                        const char *buf, size_t count)
3659 {
3660         struct Scsi_Host *shost = class_to_shost(dev);
3661         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3662         unsigned long lock_flags;
3663         int result = count, i;
3664
3665         if (!capable(CAP_SYS_ADMIN))
3666                 return -EACCES;
3667
3668         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3669         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3670             !strncmp(buf, "online", 6)) {
3671                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3672                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3673                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3674                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3675                 }
3676                 wmb();
3677                 ioa_cfg->reset_retries = 0;
3678                 ioa_cfg->in_ioa_bringdown = 0;
3679                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3680         }
3681         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3682         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3683
3684         return result;
3685 }
3686
3687 static struct device_attribute ipr_ioa_state_attr = {
3688         .attr = {
3689                 .name =         "online_state",
3690                 .mode =         S_IRUGO | S_IWUSR,
3691         },
3692         .show = ipr_show_adapter_state,
3693         .store = ipr_store_adapter_state
3694 };
3695
3696 /**
3697  * ipr_store_reset_adapter - Reset the adapter
3698  * @dev:        device struct
3699  * @buf:        buffer
3700  * @count:      buffer size
3701  *
3702  * This function will reset the adapter.
3703  *
3704  * Return value:
3705  *      count on success / other on failure
3706  **/
3707 static ssize_t ipr_store_reset_adapter(struct device *dev,
3708                                        struct device_attribute *attr,
3709                                        const char *buf, size_t count)
3710 {
3711         struct Scsi_Host *shost = class_to_shost(dev);
3712         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3713         unsigned long lock_flags;
3714         int result = count;
3715
3716         if (!capable(CAP_SYS_ADMIN))
3717                 return -EACCES;
3718
3719         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3720         if (!ioa_cfg->in_reset_reload)
3721                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3722         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3723         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3724
3725         return result;
3726 }
3727
3728 static struct device_attribute ipr_ioa_reset_attr = {
3729         .attr = {
3730                 .name =         "reset_host",
3731                 .mode =         S_IWUSR,
3732         },
3733         .store = ipr_store_reset_adapter
3734 };
3735
3736 static int ipr_iopoll(struct irq_poll *iop, int budget);
3737  /**
3738  * ipr_show_iopoll_weight - Show ipr polling mode
3739  * @dev:        class device struct
3740  * @buf:        buffer
3741  *
3742  * Return value:
3743  *      number of bytes printed to buffer
3744  **/
3745 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3746                                    struct device_attribute *attr, char *buf)
3747 {
3748         struct Scsi_Host *shost = class_to_shost(dev);
3749         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3750         unsigned long lock_flags = 0;
3751         int len;
3752
3753         spin_lock_irqsave(shost->host_lock, lock_flags);
3754         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3755         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3756
3757         return len;
3758 }
3759
3760 /**
3761  * ipr_store_iopoll_weight - Change the adapter's polling mode
3762  * @dev:        class device struct
3763  * @buf:        buffer
3764  *
3765  * Return value:
3766  *      number of bytes printed to buffer
3767  **/
3768 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3769                                         struct device_attribute *attr,
3770                                         const char *buf, size_t count)
3771 {
3772         struct Scsi_Host *shost = class_to_shost(dev);
3773         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3774         unsigned long user_iopoll_weight;
3775         unsigned long lock_flags = 0;
3776         int i;
3777
3778         if (!ioa_cfg->sis64) {
3779                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3780                 return -EINVAL;
3781         }
3782         if (kstrtoul(buf, 10, &user_iopoll_weight))
3783                 return -EINVAL;
3784
3785         if (user_iopoll_weight > 256) {
3786                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3787                 return -EINVAL;
3788         }
3789
3790         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3791                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3792                 return strlen(buf);
3793         }
3794
3795         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3796                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3797                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3798         }
3799
3800         spin_lock_irqsave(shost->host_lock, lock_flags);
3801         ioa_cfg->iopoll_weight = user_iopoll_weight;
3802         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3803                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3804                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3805                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3806                 }
3807         }
3808         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3809
3810         return strlen(buf);
3811 }
3812
3813 static struct device_attribute ipr_iopoll_weight_attr = {
3814         .attr = {
3815                 .name =         "iopoll_weight",
3816                 .mode =         S_IRUGO | S_IWUSR,
3817         },
3818         .show = ipr_show_iopoll_weight,
3819         .store = ipr_store_iopoll_weight
3820 };
3821
3822 /**
3823  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3824  * @buf_len:            buffer length
3825  *
3826  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3827  * list to use for microcode download
3828  *
3829  * Return value:
3830  *      pointer to sglist / NULL on failure
3831  **/
3832 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3833 {
3834         int sg_size, order, bsize_elem, num_elem, i, j;
3835         struct ipr_sglist *sglist;
3836         struct scatterlist *scatterlist;
3837         struct page *page;
3838
3839         /* Get the minimum size per scatter/gather element */
3840         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3841
3842         /* Get the actual size per element */
3843         order = get_order(sg_size);
3844
3845         /* Determine the actual number of bytes per element */
3846         bsize_elem = PAGE_SIZE * (1 << order);
3847
3848         /* Determine the actual number of sg entries needed */
3849         if (buf_len % bsize_elem)
3850                 num_elem = (buf_len / bsize_elem) + 1;
3851         else
3852                 num_elem = buf_len / bsize_elem;
3853
3854         /* Allocate a scatter/gather list for the DMA */
3855         sglist = kzalloc(sizeof(struct ipr_sglist) +
3856                          (sizeof(struct scatterlist) * (num_elem - 1)),
3857                          GFP_KERNEL);
3858
3859         if (sglist == NULL) {
3860                 ipr_trace;
3861                 return NULL;
3862         }
3863
3864         scatterlist = sglist->scatterlist;
3865         sg_init_table(scatterlist, num_elem);
3866
3867         sglist->order = order;
3868         sglist->num_sg = num_elem;
3869
3870         /* Allocate a bunch of sg elements */
3871         for (i = 0; i < num_elem; i++) {
3872                 page = alloc_pages(GFP_KERNEL, order);
3873                 if (!page) {
3874                         ipr_trace;
3875
3876                         /* Free up what we already allocated */
3877                         for (j = i - 1; j >= 0; j--)
3878                                 __free_pages(sg_page(&scatterlist[j]), order);
3879                         kfree(sglist);
3880                         return NULL;
3881                 }
3882
3883                 sg_set_page(&scatterlist[i], page, 0, 0);
3884         }
3885
3886         return sglist;
3887 }
3888
3889 /**
3890  * ipr_free_ucode_buffer - Frees a microcode download buffer
3891  * @p_dnld:             scatter/gather list pointer
3892  *
3893  * Free a DMA'able ucode download buffer previously allocated with
3894  * ipr_alloc_ucode_buffer
3895  *
3896  * Return value:
3897  *      nothing
3898  **/
3899 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3900 {
3901         int i;
3902
3903         for (i = 0; i < sglist->num_sg; i++)
3904                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3905
3906         kfree(sglist);
3907 }
3908
3909 /**
3910  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3911  * @sglist:             scatter/gather list pointer
3912  * @buffer:             buffer pointer
3913  * @len:                buffer length
3914  *
3915  * Copy a microcode image from a user buffer into a buffer allocated by
3916  * ipr_alloc_ucode_buffer
3917  *
3918  * Return value:
3919  *      0 on success / other on failure
3920  **/
3921 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3922                                  u8 *buffer, u32 len)
3923 {
3924         int bsize_elem, i, result = 0;
3925         struct scatterlist *scatterlist;
3926         void *kaddr;
3927
3928         /* Determine the actual number of bytes per element */
3929         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3930
3931         scatterlist = sglist->scatterlist;
3932
3933         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3934                 struct page *page = sg_page(&scatterlist[i]);
3935
3936                 kaddr = kmap(page);
3937                 memcpy(kaddr, buffer, bsize_elem);
3938                 kunmap(page);
3939
3940                 scatterlist[i].length = bsize_elem;
3941
3942                 if (result != 0) {
3943                         ipr_trace;
3944                         return result;
3945                 }
3946         }
3947
3948         if (len % bsize_elem) {
3949                 struct page *page = sg_page(&scatterlist[i]);
3950
3951                 kaddr = kmap(page);
3952                 memcpy(kaddr, buffer, len % bsize_elem);
3953                 kunmap(page);
3954
3955                 scatterlist[i].length = len % bsize_elem;
3956         }
3957
3958         sglist->buffer_len = len;
3959         return result;
3960 }
3961
3962 /**
3963  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3964  * @ipr_cmd:            ipr command struct
3965  * @sglist:             scatter/gather list
3966  *
3967  * Builds a microcode download IOA data list (IOADL).
3968  *
3969  **/
3970 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3971                                     struct ipr_sglist *sglist)
3972 {
3973         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3974         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3975         struct scatterlist *scatterlist = sglist->scatterlist;
3976         int i;
3977
3978         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3979         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3980         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3981
3982         ioarcb->ioadl_len =
3983                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3984         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3985                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3986                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3987                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3988         }
3989
3990         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3991 }
3992
3993 /**
3994  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3995  * @ipr_cmd:    ipr command struct
3996  * @sglist:             scatter/gather list
3997  *
3998  * Builds a microcode download IOA data list (IOADL).
3999  *
4000  **/
4001 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
4002                                   struct ipr_sglist *sglist)
4003 {
4004         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4005         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4006         struct scatterlist *scatterlist = sglist->scatterlist;
4007         int i;
4008
4009         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
4010         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4011         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4012
4013         ioarcb->ioadl_len =
4014                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4015
4016         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4017                 ioadl[i].flags_and_data_len =
4018                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
4019                 ioadl[i].address =
4020                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
4021         }
4022
4023         ioadl[i-1].flags_and_data_len |=
4024                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4025 }
4026
4027 /**
4028  * ipr_update_ioa_ucode - Update IOA's microcode
4029  * @ioa_cfg:    ioa config struct
4030  * @sglist:             scatter/gather list
4031  *
4032  * Initiate an adapter reset to update the IOA's microcode
4033  *
4034  * Return value:
4035  *      0 on success / -EIO on failure
4036  **/
4037 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4038                                 struct ipr_sglist *sglist)
4039 {
4040         unsigned long lock_flags;
4041
4042         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4043         while (ioa_cfg->in_reset_reload) {
4044                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4045                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4046                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4047         }
4048
4049         if (ioa_cfg->ucode_sglist) {
4050                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4051                 dev_err(&ioa_cfg->pdev->dev,
4052                         "Microcode download already in progress\n");
4053                 return -EIO;
4054         }
4055
4056         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4057                                         sglist->scatterlist, sglist->num_sg,
4058                                         DMA_TO_DEVICE);
4059
4060         if (!sglist->num_dma_sg) {
4061                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4062                 dev_err(&ioa_cfg->pdev->dev,
4063                         "Failed to map microcode download buffer!\n");
4064                 return -EIO;
4065         }
4066
4067         ioa_cfg->ucode_sglist = sglist;
4068         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4069         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4070         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4071
4072         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4073         ioa_cfg->ucode_sglist = NULL;
4074         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4075         return 0;
4076 }
4077
4078 /**
4079  * ipr_store_update_fw - Update the firmware on the adapter
4080  * @class_dev:  device struct
4081  * @buf:        buffer
4082  * @count:      buffer size
4083  *
4084  * This function will update the firmware on the adapter.
4085  *
4086  * Return value:
4087  *      count on success / other on failure
4088  **/
4089 static ssize_t ipr_store_update_fw(struct device *dev,
4090                                    struct device_attribute *attr,
4091                                    const char *buf, size_t count)
4092 {
4093         struct Scsi_Host *shost = class_to_shost(dev);
4094         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4095         struct ipr_ucode_image_header *image_hdr;
4096         const struct firmware *fw_entry;
4097         struct ipr_sglist *sglist;
4098         char fname[100];
4099         char *src;
4100         char *endline;
4101         int result, dnld_size;
4102
4103         if (!capable(CAP_SYS_ADMIN))
4104                 return -EACCES;
4105
4106         snprintf(fname, sizeof(fname), "%s", buf);
4107
4108         endline = strchr(fname, '\n');
4109         if (endline)
4110                 *endline = '\0';
4111
4112         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4113                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4114                 return -EIO;
4115         }
4116
4117         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4118
4119         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4120         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4121         sglist = ipr_alloc_ucode_buffer(dnld_size);
4122
4123         if (!sglist) {
4124                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4125                 release_firmware(fw_entry);
4126                 return -ENOMEM;
4127         }
4128
4129         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4130
4131         if (result) {
4132                 dev_err(&ioa_cfg->pdev->dev,
4133                         "Microcode buffer copy to DMA buffer failed\n");
4134                 goto out;
4135         }
4136
4137         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4138
4139         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4140
4141         if (!result)
4142                 result = count;
4143 out:
4144         ipr_free_ucode_buffer(sglist);
4145         release_firmware(fw_entry);
4146         return result;
4147 }
4148
4149 static struct device_attribute ipr_update_fw_attr = {
4150         .attr = {
4151                 .name =         "update_fw",
4152                 .mode =         S_IWUSR,
4153         },
4154         .store = ipr_store_update_fw
4155 };
4156
4157 /**
4158  * ipr_show_fw_type - Show the adapter's firmware type.
4159  * @dev:        class device struct
4160  * @buf:        buffer
4161  *
4162  * Return value:
4163  *      number of bytes printed to buffer
4164  **/
4165 static ssize_t ipr_show_fw_type(struct device *dev,
4166                                 struct device_attribute *attr, char *buf)
4167 {
4168         struct Scsi_Host *shost = class_to_shost(dev);
4169         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4170         unsigned long lock_flags = 0;
4171         int len;
4172
4173         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4174         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4175         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4176         return len;
4177 }
4178
4179 static struct device_attribute ipr_ioa_fw_type_attr = {
4180         .attr = {
4181                 .name =         "fw_type",
4182                 .mode =         S_IRUGO,
4183         },
4184         .show = ipr_show_fw_type
4185 };
4186
4187 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4188                                 struct bin_attribute *bin_attr, char *buf,
4189                                 loff_t off, size_t count)
4190 {
4191         struct device *cdev = container_of(kobj, struct device, kobj);
4192         struct Scsi_Host *shost = class_to_shost(cdev);
4193         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4194         struct ipr_hostrcb *hostrcb;
4195         unsigned long lock_flags = 0;
4196         int ret;
4197
4198         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4199         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4200                                         struct ipr_hostrcb, queue);
4201         if (!hostrcb) {
4202                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4203                 return 0;
4204         }
4205         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4206                                 sizeof(hostrcb->hcam));
4207         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4208         return ret;
4209 }
4210
4211 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4212                                 struct bin_attribute *bin_attr, char *buf,
4213                                 loff_t off, size_t count)
4214 {
4215         struct device *cdev = container_of(kobj, struct device, kobj);
4216         struct Scsi_Host *shost = class_to_shost(cdev);
4217         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4218         struct ipr_hostrcb *hostrcb;
4219         unsigned long lock_flags = 0;
4220
4221         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4222         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4223                                         struct ipr_hostrcb, queue);
4224         if (!hostrcb) {
4225                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4226                 return count;
4227         }
4228
4229         /* Reclaim hostrcb before exit */
4230         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4231         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4232         return count;
4233 }
4234
4235 static struct bin_attribute ipr_ioa_async_err_log = {
4236         .attr = {
4237                 .name =         "async_err_log",
4238                 .mode =         S_IRUGO | S_IWUSR,
4239         },
4240         .size = 0,
4241         .read = ipr_read_async_err_log,
4242         .write = ipr_next_async_err_log
4243 };
4244
4245 static struct device_attribute *ipr_ioa_attrs[] = {
4246         &ipr_fw_version_attr,
4247         &ipr_log_level_attr,
4248         &ipr_diagnostics_attr,
4249         &ipr_ioa_state_attr,
4250         &ipr_ioa_reset_attr,
4251         &ipr_update_fw_attr,
4252         &ipr_ioa_fw_type_attr,
4253         &ipr_iopoll_weight_attr,
4254         NULL,
4255 };
4256
4257 #ifdef CONFIG_SCSI_IPR_DUMP
4258 /**
4259  * ipr_read_dump - Dump the adapter
4260  * @filp:               open sysfs file
4261  * @kobj:               kobject struct
4262  * @bin_attr:           bin_attribute struct
4263  * @buf:                buffer
4264  * @off:                offset
4265  * @count:              buffer size
4266  *
4267  * Return value:
4268  *      number of bytes printed to buffer
4269  **/
4270 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4271                              struct bin_attribute *bin_attr,
4272                              char *buf, loff_t off, size_t count)
4273 {
4274         struct device *cdev = container_of(kobj, struct device, kobj);
4275         struct Scsi_Host *shost = class_to_shost(cdev);
4276         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4277         struct ipr_dump *dump;
4278         unsigned long lock_flags = 0;
4279         char *src;
4280         int len, sdt_end;
4281         size_t rc = count;
4282
4283         if (!capable(CAP_SYS_ADMIN))
4284                 return -EACCES;
4285
4286         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4287         dump = ioa_cfg->dump;
4288
4289         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4290                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4291                 return 0;
4292         }
4293         kref_get(&dump->kref);
4294         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4295
4296         if (off > dump->driver_dump.hdr.len) {
4297                 kref_put(&dump->kref, ipr_release_dump);
4298                 return 0;
4299         }
4300
4301         if (off + count > dump->driver_dump.hdr.len) {
4302                 count = dump->driver_dump.hdr.len - off;
4303                 rc = count;
4304         }
4305
4306         if (count && off < sizeof(dump->driver_dump)) {
4307                 if (off + count > sizeof(dump->driver_dump))
4308                         len = sizeof(dump->driver_dump) - off;
4309                 else
4310                         len = count;
4311                 src = (u8 *)&dump->driver_dump + off;
4312                 memcpy(buf, src, len);
4313                 buf += len;
4314                 off += len;
4315                 count -= len;
4316         }
4317
4318         off -= sizeof(dump->driver_dump);
4319
4320         if (ioa_cfg->sis64)
4321                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4322                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4323                            sizeof(struct ipr_sdt_entry));
4324         else
4325                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4326                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4327
4328         if (count && off < sdt_end) {
4329                 if (off + count > sdt_end)
4330                         len = sdt_end - off;
4331                 else
4332                         len = count;
4333                 src = (u8 *)&dump->ioa_dump + off;
4334                 memcpy(buf, src, len);
4335                 buf += len;
4336                 off += len;
4337                 count -= len;
4338         }
4339
4340         off -= sdt_end;
4341
4342         while (count) {
4343                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4344                         len = PAGE_ALIGN(off) - off;
4345                 else
4346                         len = count;
4347                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4348                 src += off & ~PAGE_MASK;
4349                 memcpy(buf, src, len);
4350                 buf += len;
4351                 off += len;
4352                 count -= len;
4353         }
4354
4355         kref_put(&dump->kref, ipr_release_dump);
4356         return rc;
4357 }
4358
4359 /**
4360  * ipr_alloc_dump - Prepare for adapter dump
4361  * @ioa_cfg:    ioa config struct
4362  *
4363  * Return value:
4364  *      0 on success / other on failure
4365  **/
4366 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4367 {
4368         struct ipr_dump *dump;
4369         __be32 **ioa_data;
4370         unsigned long lock_flags = 0;
4371
4372         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4373
4374         if (!dump) {
4375                 ipr_err("Dump memory allocation failed\n");
4376                 return -ENOMEM;
4377         }
4378
4379         if (ioa_cfg->sis64)
4380                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4381         else
4382                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4383
4384         if (!ioa_data) {
4385                 ipr_err("Dump memory allocation failed\n");
4386                 kfree(dump);
4387                 return -ENOMEM;
4388         }
4389
4390         dump->ioa_dump.ioa_data = ioa_data;
4391
4392         kref_init(&dump->kref);
4393         dump->ioa_cfg = ioa_cfg;
4394
4395         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4396
4397         if (INACTIVE != ioa_cfg->sdt_state) {
4398                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4399                 vfree(dump->ioa_dump.ioa_data);
4400                 kfree(dump);
4401                 return 0;
4402         }
4403
4404         ioa_cfg->dump = dump;
4405         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4406         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4407                 ioa_cfg->dump_taken = 1;
4408                 schedule_work(&ioa_cfg->work_q);
4409         }
4410         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4411
4412         return 0;
4413 }
4414
4415 /**
4416  * ipr_free_dump - Free adapter dump memory
4417  * @ioa_cfg:    ioa config struct
4418  *
4419  * Return value:
4420  *      0 on success / other on failure
4421  **/
4422 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4423 {
4424         struct ipr_dump *dump;
4425         unsigned long lock_flags = 0;
4426
4427         ENTER;
4428
4429         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4430         dump = ioa_cfg->dump;
4431         if (!dump) {
4432                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4433                 return 0;
4434         }
4435
4436         ioa_cfg->dump = NULL;
4437         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4438
4439         kref_put(&dump->kref, ipr_release_dump);
4440
4441         LEAVE;
4442         return 0;
4443 }
4444
4445 /**
4446  * ipr_write_dump - Setup dump state of adapter
4447  * @filp:               open sysfs file
4448  * @kobj:               kobject struct
4449  * @bin_attr:           bin_attribute struct
4450  * @buf:                buffer
4451  * @off:                offset
4452  * @count:              buffer size
4453  *
4454  * Return value:
4455  *      number of bytes printed to buffer
4456  **/
4457 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4458                               struct bin_attribute *bin_attr,
4459                               char *buf, loff_t off, size_t count)
4460 {
4461         struct device *cdev = container_of(kobj, struct device, kobj);
4462         struct Scsi_Host *shost = class_to_shost(cdev);
4463         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4464         int rc;
4465
4466         if (!capable(CAP_SYS_ADMIN))
4467                 return -EACCES;
4468
4469         if (buf[0] == '1')
4470                 rc = ipr_alloc_dump(ioa_cfg);
4471         else if (buf[0] == '0')
4472                 rc = ipr_free_dump(ioa_cfg);
4473         else
4474                 return -EINVAL;
4475
4476         if (rc)
4477                 return rc;
4478         else
4479                 return count;
4480 }
4481
4482 static struct bin_attribute ipr_dump_attr = {
4483         .attr = {
4484                 .name = "dump",
4485                 .mode = S_IRUSR | S_IWUSR,
4486         },
4487         .size = 0,
4488         .read = ipr_read_dump,
4489         .write = ipr_write_dump
4490 };
4491 #else
4492 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4493 #endif
4494
4495 /**
4496  * ipr_change_queue_depth - Change the device's queue depth
4497  * @sdev:       scsi device struct
4498  * @qdepth:     depth to set
4499  * @reason:     calling context
4500  *
4501  * Return value:
4502  *      actual depth set
4503  **/
4504 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4505 {
4506         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4507         struct ipr_resource_entry *res;
4508         unsigned long lock_flags = 0;
4509
4510         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4511         res = (struct ipr_resource_entry *)sdev->hostdata;
4512
4513         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4514                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4515         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4516
4517         scsi_change_queue_depth(sdev, qdepth);
4518         return sdev->queue_depth;
4519 }
4520
4521 /**
4522  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4523  * @dev:        device struct
4524  * @attr:       device attribute structure
4525  * @buf:        buffer
4526  *
4527  * Return value:
4528  *      number of bytes printed to buffer
4529  **/
4530 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4531 {
4532         struct scsi_device *sdev = to_scsi_device(dev);
4533         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4534         struct ipr_resource_entry *res;
4535         unsigned long lock_flags = 0;
4536         ssize_t len = -ENXIO;
4537
4538         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4539         res = (struct ipr_resource_entry *)sdev->hostdata;
4540         if (res)
4541                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4542         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4543         return len;
4544 }
4545
4546 static struct device_attribute ipr_adapter_handle_attr = {
4547         .attr = {
4548                 .name =         "adapter_handle",
4549                 .mode =         S_IRUSR,
4550         },
4551         .show = ipr_show_adapter_handle
4552 };
4553
4554 /**
4555  * ipr_show_resource_path - Show the resource path or the resource address for
4556  *                          this device.
4557  * @dev:        device struct
4558  * @attr:       device attribute structure
4559  * @buf:        buffer
4560  *
4561  * Return value:
4562  *      number of bytes printed to buffer
4563  **/
4564 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4565 {
4566         struct scsi_device *sdev = to_scsi_device(dev);
4567         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4568         struct ipr_resource_entry *res;
4569         unsigned long lock_flags = 0;
4570         ssize_t len = -ENXIO;
4571         char buffer[IPR_MAX_RES_PATH_LENGTH];
4572
4573         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4574         res = (struct ipr_resource_entry *)sdev->hostdata;
4575         if (res && ioa_cfg->sis64)
4576                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4577                                __ipr_format_res_path(res->res_path, buffer,
4578                                                      sizeof(buffer)));
4579         else if (res)
4580                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4581                                res->bus, res->target, res->lun);
4582
4583         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4584         return len;
4585 }
4586
4587 static struct device_attribute ipr_resource_path_attr = {
4588         .attr = {
4589                 .name =         "resource_path",
4590                 .mode =         S_IRUGO,
4591         },
4592         .show = ipr_show_resource_path
4593 };
4594
4595 /**
4596  * ipr_show_device_id - Show the device_id for this device.
4597  * @dev:        device struct
4598  * @attr:       device attribute structure
4599  * @buf:        buffer
4600  *
4601  * Return value:
4602  *      number of bytes printed to buffer
4603  **/
4604 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4605 {
4606         struct scsi_device *sdev = to_scsi_device(dev);
4607         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4608         struct ipr_resource_entry *res;
4609         unsigned long lock_flags = 0;
4610         ssize_t len = -ENXIO;
4611
4612         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4613         res = (struct ipr_resource_entry *)sdev->hostdata;
4614         if (res && ioa_cfg->sis64)
4615                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4616         else if (res)
4617                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4618
4619         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4620         return len;
4621 }
4622
4623 static struct device_attribute ipr_device_id_attr = {
4624         .attr = {
4625                 .name =         "device_id",
4626                 .mode =         S_IRUGO,
4627         },
4628         .show = ipr_show_device_id
4629 };
4630
4631 /**
4632  * ipr_show_resource_type - Show the resource type for this device.
4633  * @dev:        device struct
4634  * @attr:       device attribute structure
4635  * @buf:        buffer
4636  *
4637  * Return value:
4638  *      number of bytes printed to buffer
4639  **/
4640 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4641 {
4642         struct scsi_device *sdev = to_scsi_device(dev);
4643         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4644         struct ipr_resource_entry *res;
4645         unsigned long lock_flags = 0;
4646         ssize_t len = -ENXIO;
4647
4648         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4649         res = (struct ipr_resource_entry *)sdev->hostdata;
4650
4651         if (res)
4652                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4653
4654         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4655         return len;
4656 }
4657
4658 static struct device_attribute ipr_resource_type_attr = {
4659         .attr = {
4660                 .name =         "resource_type",
4661                 .mode =         S_IRUGO,
4662         },
4663         .show = ipr_show_resource_type
4664 };
4665
4666 /**
4667  * ipr_show_raw_mode - Show the adapter's raw mode
4668  * @dev:        class device struct
4669  * @buf:        buffer
4670  *
4671  * Return value:
4672  *      number of bytes printed to buffer
4673  **/
4674 static ssize_t ipr_show_raw_mode(struct device *dev,
4675                                  struct device_attribute *attr, char *buf)
4676 {
4677         struct scsi_device *sdev = to_scsi_device(dev);
4678         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4679         struct ipr_resource_entry *res;
4680         unsigned long lock_flags = 0;
4681         ssize_t len;
4682
4683         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4684         res = (struct ipr_resource_entry *)sdev->hostdata;
4685         if (res)
4686                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4687         else
4688                 len = -ENXIO;
4689         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4690         return len;
4691 }
4692
4693 /**
4694  * ipr_store_raw_mode - Change the adapter's raw mode
4695  * @dev:        class device struct
4696  * @buf:        buffer
4697  *
4698  * Return value:
4699  *      number of bytes printed to buffer
4700  **/
4701 static ssize_t ipr_store_raw_mode(struct device *dev,
4702                                   struct device_attribute *attr,
4703                                   const char *buf, size_t count)
4704 {
4705         struct scsi_device *sdev = to_scsi_device(dev);
4706         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4707         struct ipr_resource_entry *res;
4708         unsigned long lock_flags = 0;
4709         ssize_t len;
4710
4711         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4712         res = (struct ipr_resource_entry *)sdev->hostdata;
4713         if (res) {
4714                 if (ipr_is_af_dasd_device(res)) {
4715                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4716                         len = strlen(buf);
4717                         if (res->sdev)
4718                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4719                                         res->raw_mode ? "enabled" : "disabled");
4720                 } else
4721                         len = -EINVAL;
4722         } else
4723                 len = -ENXIO;
4724         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4725         return len;
4726 }
4727
4728 static struct device_attribute ipr_raw_mode_attr = {
4729         .attr = {
4730                 .name =         "raw_mode",
4731                 .mode =         S_IRUGO | S_IWUSR,
4732         },
4733         .show = ipr_show_raw_mode,
4734         .store = ipr_store_raw_mode
4735 };
4736
4737 static struct device_attribute *ipr_dev_attrs[] = {
4738         &ipr_adapter_handle_attr,
4739         &ipr_resource_path_attr,
4740         &ipr_device_id_attr,
4741         &ipr_resource_type_attr,
4742         &ipr_raw_mode_attr,
4743         NULL,
4744 };
4745
4746 /**
4747  * ipr_biosparam - Return the HSC mapping
4748  * @sdev:                       scsi device struct
4749  * @block_device:       block device pointer
4750  * @capacity:           capacity of the device
4751  * @parm:                       Array containing returned HSC values.
4752  *
4753  * This function generates the HSC parms that fdisk uses.
4754  * We want to make sure we return something that places partitions
4755  * on 4k boundaries for best performance with the IOA.
4756  *
4757  * Return value:
4758  *      0 on success
4759  **/
4760 static int ipr_biosparam(struct scsi_device *sdev,
4761                          struct block_device *block_device,
4762                          sector_t capacity, int *parm)
4763 {
4764         int heads, sectors;
4765         sector_t cylinders;
4766
4767         heads = 128;
4768         sectors = 32;
4769
4770         cylinders = capacity;
4771         sector_div(cylinders, (128 * 32));
4772
4773         /* return result */
4774         parm[0] = heads;
4775         parm[1] = sectors;
4776         parm[2] = cylinders;
4777
4778         return 0;
4779 }
4780
4781 /**
4782  * ipr_find_starget - Find target based on bus/target.
4783  * @starget:    scsi target struct
4784  *
4785  * Return value:
4786  *      resource entry pointer if found / NULL if not found
4787  **/
4788 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4789 {
4790         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4791         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4792         struct ipr_resource_entry *res;
4793
4794         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4795                 if ((res->bus == starget->channel) &&
4796                     (res->target == starget->id)) {
4797                         return res;
4798                 }
4799         }
4800
4801         return NULL;
4802 }
4803
4804 static struct ata_port_info sata_port_info;
4805
4806 /**
4807  * ipr_target_alloc - Prepare for commands to a SCSI target
4808  * @starget:    scsi target struct
4809  *
4810  * If the device is a SATA device, this function allocates an
4811  * ATA port with libata, else it does nothing.
4812  *
4813  * Return value:
4814  *      0 on success / non-0 on failure
4815  **/
4816 static int ipr_target_alloc(struct scsi_target *starget)
4817 {
4818         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4819         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4820         struct ipr_sata_port *sata_port;
4821         struct ata_port *ap;
4822         struct ipr_resource_entry *res;
4823         unsigned long lock_flags;
4824
4825         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4826         res = ipr_find_starget(starget);
4827         starget->hostdata = NULL;
4828
4829         if (res && ipr_is_gata(res)) {
4830                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4831                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4832                 if (!sata_port)
4833                         return -ENOMEM;
4834
4835                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4836                 if (ap) {
4837                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4838                         sata_port->ioa_cfg = ioa_cfg;
4839                         sata_port->ap = ap;
4840                         sata_port->res = res;
4841
4842                         res->sata_port = sata_port;
4843                         ap->private_data = sata_port;
4844                         starget->hostdata = sata_port;
4845                 } else {
4846                         kfree(sata_port);
4847                         return -ENOMEM;
4848                 }
4849         }
4850         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4851
4852         return 0;
4853 }
4854
4855 /**
4856  * ipr_target_destroy - Destroy a SCSI target
4857  * @starget:    scsi target struct
4858  *
4859  * If the device was a SATA device, this function frees the libata
4860  * ATA port, else it does nothing.
4861  *
4862  **/
4863 static void ipr_target_destroy(struct scsi_target *starget)
4864 {
4865         struct ipr_sata_port *sata_port = starget->hostdata;
4866         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4867         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4868
4869         if (ioa_cfg->sis64) {
4870                 if (!ipr_find_starget(starget)) {
4871                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4872                                 clear_bit(starget->id, ioa_cfg->array_ids);
4873                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4874                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4875                         else if (starget->channel == 0)
4876                                 clear_bit(starget->id, ioa_cfg->target_ids);
4877                 }
4878         }
4879
4880         if (sata_port) {
4881                 starget->hostdata = NULL;
4882                 ata_sas_port_destroy(sata_port->ap);
4883                 kfree(sata_port);
4884         }
4885 }
4886
4887 /**
4888  * ipr_find_sdev - Find device based on bus/target/lun.
4889  * @sdev:       scsi device struct
4890  *
4891  * Return value:
4892  *      resource entry pointer if found / NULL if not found
4893  **/
4894 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4895 {
4896         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4897         struct ipr_resource_entry *res;
4898
4899         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4900                 if ((res->bus == sdev->channel) &&
4901                     (res->target == sdev->id) &&
4902                     (res->lun == sdev->lun))
4903                         return res;
4904         }
4905
4906         return NULL;
4907 }
4908
4909 /**
4910  * ipr_slave_destroy - Unconfigure a SCSI device
4911  * @sdev:       scsi device struct
4912  *
4913  * Return value:
4914  *      nothing
4915  **/
4916 static void ipr_slave_destroy(struct scsi_device *sdev)
4917 {
4918         struct ipr_resource_entry *res;
4919         struct ipr_ioa_cfg *ioa_cfg;
4920         unsigned long lock_flags = 0;
4921
4922         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4923
4924         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4925         res = (struct ipr_resource_entry *) sdev->hostdata;
4926         if (res) {
4927                 if (res->sata_port)
4928                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4929                 sdev->hostdata = NULL;
4930                 res->sdev = NULL;
4931                 res->sata_port = NULL;
4932         }
4933         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4934 }
4935
4936 /**
4937  * ipr_slave_configure - Configure a SCSI device
4938  * @sdev:       scsi device struct
4939  *
4940  * This function configures the specified scsi device.
4941  *
4942  * Return value:
4943  *      0 on success
4944  **/
4945 static int ipr_slave_configure(struct scsi_device *sdev)
4946 {
4947         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4948         struct ipr_resource_entry *res;
4949         struct ata_port *ap = NULL;
4950         unsigned long lock_flags = 0;
4951         char buffer[IPR_MAX_RES_PATH_LENGTH];
4952
4953         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4954         res = sdev->hostdata;
4955         if (res) {
4956                 if (ipr_is_af_dasd_device(res))
4957                         sdev->type = TYPE_RAID;
4958                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4959                         sdev->scsi_level = 4;
4960                         sdev->no_uld_attach = 1;
4961                 }
4962                 if (ipr_is_vset_device(res)) {
4963                         sdev->scsi_level = SCSI_SPC_3;
4964                         sdev->no_report_opcodes = 1;
4965                         blk_queue_rq_timeout(sdev->request_queue,
4966                                              IPR_VSET_RW_TIMEOUT);
4967                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4968                 }
4969                 if (ipr_is_gata(res) && res->sata_port)
4970                         ap = res->sata_port->ap;
4971                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4972
4973                 if (ap) {
4974                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4975                         ata_sas_slave_configure(sdev, ap);
4976                 }
4977
4978                 if (ioa_cfg->sis64)
4979                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4980                                     ipr_format_res_path(ioa_cfg,
4981                                 res->res_path, buffer, sizeof(buffer)));
4982                 return 0;
4983         }
4984         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4985         return 0;
4986 }
4987
4988 /**
4989  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4990  * @sdev:       scsi device struct
4991  *
4992  * This function initializes an ATA port so that future commands
4993  * sent through queuecommand will work.
4994  *
4995  * Return value:
4996  *      0 on success
4997  **/
4998 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4999 {
5000         struct ipr_sata_port *sata_port = NULL;
5001         int rc = -ENXIO;
5002
5003         ENTER;
5004         if (sdev->sdev_target)
5005                 sata_port = sdev->sdev_target->hostdata;
5006         if (sata_port) {
5007                 rc = ata_sas_port_init(sata_port->ap);
5008                 if (rc == 0)
5009                         rc = ata_sas_sync_probe(sata_port->ap);
5010         }
5011
5012         if (rc)
5013                 ipr_slave_destroy(sdev);
5014
5015         LEAVE;
5016         return rc;
5017 }
5018
5019 /**
5020  * ipr_slave_alloc - Prepare for commands to a device.
5021  * @sdev:       scsi device struct
5022  *
5023  * This function saves a pointer to the resource entry
5024  * in the scsi device struct if the device exists. We
5025  * can then use this pointer in ipr_queuecommand when
5026  * handling new commands.
5027  *
5028  * Return value:
5029  *      0 on success / -ENXIO if device does not exist
5030  **/
5031 static int ipr_slave_alloc(struct scsi_device *sdev)
5032 {
5033         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5034         struct ipr_resource_entry *res;
5035         unsigned long lock_flags;
5036         int rc = -ENXIO;
5037
5038         sdev->hostdata = NULL;
5039
5040         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5041
5042         res = ipr_find_sdev(sdev);
5043         if (res) {
5044                 res->sdev = sdev;
5045                 res->add_to_ml = 0;
5046                 res->in_erp = 0;
5047                 sdev->hostdata = res;
5048                 if (!ipr_is_naca_model(res))
5049                         res->needs_sync_complete = 1;
5050                 rc = 0;
5051                 if (ipr_is_gata(res)) {
5052                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5053                         return ipr_ata_slave_alloc(sdev);
5054                 }
5055         }
5056
5057         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5058
5059         return rc;
5060 }
5061
5062 /**
5063  * ipr_match_lun - Match function for specified LUN
5064  * @ipr_cmd:    ipr command struct
5065  * @device:             device to match (sdev)
5066  *
5067  * Returns:
5068  *      1 if command matches sdev / 0 if command does not match sdev
5069  **/
5070 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5071 {
5072         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5073                 return 1;
5074         return 0;
5075 }
5076
5077 /**
5078  * ipr_cmnd_is_free - Check if a command is free or not
5079  * @ipr_cmd     ipr command struct
5080  *
5081  * Returns:
5082  *      true / false
5083  **/
5084 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5085 {
5086         struct ipr_cmnd *loop_cmd;
5087
5088         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5089                 if (loop_cmd == ipr_cmd)
5090                         return true;
5091         }
5092
5093         return false;
5094 }
5095
5096 /**
5097  * ipr_match_res - Match function for specified resource entry
5098  * @ipr_cmd:    ipr command struct
5099  * @resource:   resource entry to match
5100  *
5101  * Returns:
5102  *      1 if command matches sdev / 0 if command does not match sdev
5103  **/
5104 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5105 {
5106         struct ipr_resource_entry *res = resource;
5107
5108         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5109                 return 1;
5110         return 0;
5111 }
5112
5113 /**
5114  * ipr_wait_for_ops - Wait for matching commands to complete
5115  * @ipr_cmd:    ipr command struct
5116  * @device:             device to match (sdev)
5117  * @match:              match function to use
5118  *
5119  * Returns:
5120  *      SUCCESS / FAILED
5121  **/
5122 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5123                             int (*match)(struct ipr_cmnd *, void *))
5124 {
5125         struct ipr_cmnd *ipr_cmd;
5126         int wait, i;
5127         unsigned long flags;
5128         struct ipr_hrr_queue *hrrq;
5129         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5130         DECLARE_COMPLETION_ONSTACK(comp);
5131
5132         ENTER;
5133         do {
5134                 wait = 0;
5135
5136                 for_each_hrrq(hrrq, ioa_cfg) {
5137                         spin_lock_irqsave(hrrq->lock, flags);
5138                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5139                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5140                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5141                                         if (match(ipr_cmd, device)) {
5142                                                 ipr_cmd->eh_comp = &comp;
5143                                                 wait++;
5144                                         }
5145                                 }
5146                         }
5147                         spin_unlock_irqrestore(hrrq->lock, flags);
5148                 }
5149
5150                 if (wait) {
5151                         timeout = wait_for_completion_timeout(&comp, timeout);
5152
5153                         if (!timeout) {
5154                                 wait = 0;
5155
5156                                 for_each_hrrq(hrrq, ioa_cfg) {
5157                                         spin_lock_irqsave(hrrq->lock, flags);
5158                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5159                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5160                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5161                                                         if (match(ipr_cmd, device)) {
5162                                                                 ipr_cmd->eh_comp = NULL;
5163                                                                 wait++;
5164                                                         }
5165                                                 }
5166                                         }
5167                                         spin_unlock_irqrestore(hrrq->lock, flags);
5168                                 }
5169
5170                                 if (wait)
5171                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5172                                 LEAVE;
5173                                 return wait ? FAILED : SUCCESS;
5174                         }
5175                 }
5176         } while (wait);
5177
5178         LEAVE;
5179         return SUCCESS;
5180 }
5181
5182 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5183 {
5184         struct ipr_ioa_cfg *ioa_cfg;
5185         unsigned long lock_flags = 0;
5186         int rc = SUCCESS;
5187
5188         ENTER;
5189         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5190         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5191
5192         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5193                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5194                 dev_err(&ioa_cfg->pdev->dev,
5195                         "Adapter being reset as a result of error recovery.\n");
5196
5197                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5198                         ioa_cfg->sdt_state = GET_DUMP;
5199         }
5200
5201         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5202         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5203         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5204
5205         /* If we got hit with a host reset while we were already resetting
5206          the adapter for some reason, and the reset failed. */
5207         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5208                 ipr_trace;
5209                 rc = FAILED;
5210         }
5211
5212         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5213         LEAVE;
5214         return rc;
5215 }
5216
5217 /**
5218  * ipr_device_reset - Reset the device
5219  * @ioa_cfg:    ioa config struct
5220  * @res:                resource entry struct
5221  *
5222  * This function issues a device reset to the affected device.
5223  * If the device is a SCSI device, a LUN reset will be sent
5224  * to the device first. If that does not work, a target reset
5225  * will be sent. If the device is a SATA device, a PHY reset will
5226  * be sent.
5227  *
5228  * Return value:
5229  *      0 on success / non-zero on failure
5230  **/
5231 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5232                             struct ipr_resource_entry *res)
5233 {
5234         struct ipr_cmnd *ipr_cmd;
5235         struct ipr_ioarcb *ioarcb;
5236         struct ipr_cmd_pkt *cmd_pkt;
5237         struct ipr_ioarcb_ata_regs *regs;
5238         u32 ioasc;
5239
5240         ENTER;
5241         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5242         ioarcb = &ipr_cmd->ioarcb;
5243         cmd_pkt = &ioarcb->cmd_pkt;
5244
5245         if (ipr_cmd->ioa_cfg->sis64) {
5246                 regs = &ipr_cmd->i.ata_ioadl.regs;
5247                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5248         } else
5249                 regs = &ioarcb->u.add_data.u.regs;
5250
5251         ioarcb->res_handle = res->res_handle;
5252         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5253         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5254         if (ipr_is_gata(res)) {
5255                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5256                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5257                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5258         }
5259
5260         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5261         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5262         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5263         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5264                 if (ipr_cmd->ioa_cfg->sis64)
5265                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5266                                sizeof(struct ipr_ioasa_gata));
5267                 else
5268                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5269                                sizeof(struct ipr_ioasa_gata));
5270         }
5271
5272         LEAVE;
5273         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5274 }
5275
5276 /**
5277  * ipr_sata_reset - Reset the SATA port
5278  * @link:       SATA link to reset
5279  * @classes:    class of the attached device
5280  *
5281  * This function issues a SATA phy reset to the affected ATA link.
5282  *
5283  * Return value:
5284  *      0 on success / non-zero on failure
5285  **/
5286 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5287                                 unsigned long deadline)
5288 {
5289         struct ipr_sata_port *sata_port = link->ap->private_data;
5290         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5291         struct ipr_resource_entry *res;
5292         unsigned long lock_flags = 0;
5293         int rc = -ENXIO, ret;
5294
5295         ENTER;
5296         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5297         while (ioa_cfg->in_reset_reload) {
5298                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5299                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5300                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5301         }
5302
5303         res = sata_port->res;
5304         if (res) {
5305                 rc = ipr_device_reset(ioa_cfg, res);
5306                 *classes = res->ata_class;
5307                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5308
5309                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5310                 if (ret != SUCCESS) {
5311                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5312                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5313                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5314
5315                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5316                 }
5317         } else
5318                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5319
5320         LEAVE;
5321         return rc;
5322 }
5323
5324 /**
5325  * ipr_eh_dev_reset - Reset the device
5326  * @scsi_cmd:   scsi command struct
5327  *
5328  * This function issues a device reset to the affected device.
5329  * A LUN reset will be sent to the device first. If that does
5330  * not work, a target reset will be sent.
5331  *
5332  * Return value:
5333  *      SUCCESS / FAILED
5334  **/
5335 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5336 {
5337         struct ipr_cmnd *ipr_cmd;
5338         struct ipr_ioa_cfg *ioa_cfg;
5339         struct ipr_resource_entry *res;
5340         struct ata_port *ap;
5341         int rc = 0, i;
5342         struct ipr_hrr_queue *hrrq;
5343
5344         ENTER;
5345         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5346         res = scsi_cmd->device->hostdata;
5347
5348         /*
5349          * If we are currently going through reset/reload, return failed. This will force the
5350          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5351          * reset to complete
5352          */
5353         if (ioa_cfg->in_reset_reload)
5354                 return FAILED;
5355         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5356                 return FAILED;
5357
5358         for_each_hrrq(hrrq, ioa_cfg) {
5359                 spin_lock(&hrrq->_lock);
5360                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5361                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5362
5363                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5364                                 if (!ipr_cmd->qc)
5365                                         continue;
5366                                 if (ipr_cmnd_is_free(ipr_cmd))
5367                                         continue;
5368
5369                                 ipr_cmd->done = ipr_sata_eh_done;
5370                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5371                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5372                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5373                                 }
5374                         }
5375                 }
5376                 spin_unlock(&hrrq->_lock);
5377         }
5378         res->resetting_device = 1;
5379         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5380
5381         if (ipr_is_gata(res) && res->sata_port) {
5382                 ap = res->sata_port->ap;
5383                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5384                 ata_std_error_handler(ap);
5385                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5386         } else
5387                 rc = ipr_device_reset(ioa_cfg, res);
5388         res->resetting_device = 0;
5389         res->reset_occurred = 1;
5390
5391         LEAVE;
5392         return rc ? FAILED : SUCCESS;
5393 }
5394
5395 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5396 {
5397         int rc;
5398         struct ipr_ioa_cfg *ioa_cfg;
5399         struct ipr_resource_entry *res;
5400
5401         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5402         res = cmd->device->hostdata;
5403
5404         if (!res)
5405                 return FAILED;
5406
5407         spin_lock_irq(cmd->device->host->host_lock);
5408         rc = __ipr_eh_dev_reset(cmd);
5409         spin_unlock_irq(cmd->device->host->host_lock);
5410
5411         if (rc == SUCCESS) {
5412                 if (ipr_is_gata(res) && res->sata_port)
5413                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5414                 else
5415                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5416         }
5417
5418         return rc;
5419 }
5420
5421 /**
5422  * ipr_bus_reset_done - Op done function for bus reset.
5423  * @ipr_cmd:    ipr command struct
5424  *
5425  * This function is the op done function for a bus reset
5426  *
5427  * Return value:
5428  *      none
5429  **/
5430 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5431 {
5432         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5433         struct ipr_resource_entry *res;
5434
5435         ENTER;
5436         if (!ioa_cfg->sis64)
5437                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5438                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5439                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5440                                 break;
5441                         }
5442                 }
5443
5444         /*
5445          * If abort has not completed, indicate the reset has, else call the
5446          * abort's done function to wake the sleeping eh thread
5447          */
5448         if (ipr_cmd->sibling->sibling)
5449                 ipr_cmd->sibling->sibling = NULL;
5450         else
5451                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5452
5453         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5454         LEAVE;
5455 }
5456
5457 /**
5458  * ipr_abort_timeout - An abort task has timed out
5459  * @ipr_cmd:    ipr command struct
5460  *
5461  * This function handles when an abort task times out. If this
5462  * happens we issue a bus reset since we have resources tied
5463  * up that must be freed before returning to the midlayer.
5464  *
5465  * Return value:
5466  *      none
5467  **/
5468 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5469 {
5470         struct ipr_cmnd *reset_cmd;
5471         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5472         struct ipr_cmd_pkt *cmd_pkt;
5473         unsigned long lock_flags = 0;
5474
5475         ENTER;
5476         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5477         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5478                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5479                 return;
5480         }
5481
5482         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5483         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5484         ipr_cmd->sibling = reset_cmd;
5485         reset_cmd->sibling = ipr_cmd;
5486         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5487         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5488         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5489         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5490         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5491
5492         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5493         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5494         LEAVE;
5495 }
5496
5497 /**
5498  * ipr_cancel_op - Cancel specified op
5499  * @scsi_cmd:   scsi command struct
5500  *
5501  * This function cancels specified op.
5502  *
5503  * Return value:
5504  *      SUCCESS / FAILED
5505  **/
5506 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5507 {
5508         struct ipr_cmnd *ipr_cmd;
5509         struct ipr_ioa_cfg *ioa_cfg;
5510         struct ipr_resource_entry *res;
5511         struct ipr_cmd_pkt *cmd_pkt;
5512         u32 ioasc, int_reg;
5513         int i, op_found = 0;
5514         struct ipr_hrr_queue *hrrq;
5515
5516         ENTER;
5517         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5518         res = scsi_cmd->device->hostdata;
5519
5520         /* If we are currently going through reset/reload, return failed.
5521          * This will force the mid-layer to call ipr_eh_host_reset,
5522          * which will then go to sleep and wait for the reset to complete
5523          */
5524         if (ioa_cfg->in_reset_reload ||
5525             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5526                 return FAILED;
5527         if (!res)
5528                 return FAILED;
5529
5530         /*
5531          * If we are aborting a timed out op, chances are that the timeout was caused
5532          * by a still not detected EEH error. In such cases, reading a register will
5533          * trigger the EEH recovery infrastructure.
5534          */
5535         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5536
5537         if (!ipr_is_gscsi(res))
5538                 return FAILED;
5539
5540         for_each_hrrq(hrrq, ioa_cfg) {
5541                 spin_lock(&hrrq->_lock);
5542                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5543                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5544                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5545                                         op_found = 1;
5546                                         break;
5547                                 }
5548                         }
5549                 }
5550                 spin_unlock(&hrrq->_lock);
5551         }
5552
5553         if (!op_found)
5554                 return SUCCESS;
5555
5556         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5557         ipr_cmd->ioarcb.res_handle = res->res_handle;
5558         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5559         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5560         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5561         ipr_cmd->u.sdev = scsi_cmd->device;
5562
5563         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5564                     scsi_cmd->cmnd[0]);
5565         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5566         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5567
5568         /*
5569          * If the abort task timed out and we sent a bus reset, we will get
5570          * one the following responses to the abort
5571          */
5572         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5573                 ioasc = 0;
5574                 ipr_trace;
5575         }
5576
5577         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5578         if (!ipr_is_naca_model(res))
5579                 res->needs_sync_complete = 1;
5580
5581         LEAVE;
5582         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5583 }
5584
5585 /**
5586  * ipr_eh_abort - Abort a single op
5587  * @scsi_cmd:   scsi command struct
5588  *
5589  * Return value:
5590  *      0 if scan in progress / 1 if scan is complete
5591  **/
5592 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5593 {
5594         unsigned long lock_flags;
5595         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5596         int rc = 0;
5597
5598         spin_lock_irqsave(shost->host_lock, lock_flags);
5599         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5600                 rc = 1;
5601         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5602                 rc = 1;
5603         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5604         return rc;
5605 }
5606
5607 /**
5608  * ipr_eh_host_reset - Reset the host adapter
5609  * @scsi_cmd:   scsi command struct
5610  *
5611  * Return value:
5612  *      SUCCESS / FAILED
5613  **/
5614 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5615 {
5616         unsigned long flags;
5617         int rc;
5618         struct ipr_ioa_cfg *ioa_cfg;
5619
5620         ENTER;
5621
5622         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5623
5624         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5625         rc = ipr_cancel_op(scsi_cmd);
5626         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5627
5628         if (rc == SUCCESS)
5629                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5630         LEAVE;
5631         return rc;
5632 }
5633
5634 /**
5635  * ipr_handle_other_interrupt - Handle "other" interrupts
5636  * @ioa_cfg:    ioa config struct
5637  * @int_reg:    interrupt register
5638  *
5639  * Return value:
5640  *      IRQ_NONE / IRQ_HANDLED
5641  **/
5642 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5643                                               u32 int_reg)
5644 {
5645         irqreturn_t rc = IRQ_HANDLED;
5646         u32 int_mask_reg;
5647
5648         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5649         int_reg &= ~int_mask_reg;
5650
5651         /* If an interrupt on the adapter did not occur, ignore it.
5652          * Or in the case of SIS 64, check for a stage change interrupt.
5653          */
5654         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5655                 if (ioa_cfg->sis64) {
5656                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5657                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5658                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5659
5660                                 /* clear stage change */
5661                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5662                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5663                                 list_del(&ioa_cfg->reset_cmd->queue);
5664                                 del_timer(&ioa_cfg->reset_cmd->timer);
5665                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5666                                 return IRQ_HANDLED;
5667                         }
5668                 }
5669
5670                 return IRQ_NONE;
5671         }
5672
5673         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5674                 /* Mask the interrupt */
5675                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5676                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5677
5678                 list_del(&ioa_cfg->reset_cmd->queue);
5679                 del_timer(&ioa_cfg->reset_cmd->timer);
5680                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5681         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5682                 if (ioa_cfg->clear_isr) {
5683                         if (ipr_debug && printk_ratelimit())
5684                                 dev_err(&ioa_cfg->pdev->dev,
5685                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5686                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5687                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5688                         return IRQ_NONE;
5689                 }
5690         } else {
5691                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5692                         ioa_cfg->ioa_unit_checked = 1;
5693                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5694                         dev_err(&ioa_cfg->pdev->dev,
5695                                 "No Host RRQ. 0x%08X\n", int_reg);
5696                 else
5697                         dev_err(&ioa_cfg->pdev->dev,
5698                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5699
5700                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5701                         ioa_cfg->sdt_state = GET_DUMP;
5702
5703                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5704                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5705         }
5706
5707         return rc;
5708 }
5709
5710 /**
5711  * ipr_isr_eh - Interrupt service routine error handler
5712  * @ioa_cfg:    ioa config struct
5713  * @msg:        message to log
5714  *
5715  * Return value:
5716  *      none
5717  **/
5718 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5719 {
5720         ioa_cfg->errors_logged++;
5721         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5722
5723         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5724                 ioa_cfg->sdt_state = GET_DUMP;
5725
5726         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5727 }
5728
5729 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5730                                                 struct list_head *doneq)
5731 {
5732         u32 ioasc;
5733         u16 cmd_index;
5734         struct ipr_cmnd *ipr_cmd;
5735         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5736         int num_hrrq = 0;
5737
5738         /* If interrupts are disabled, ignore the interrupt */
5739         if (!hrr_queue->allow_interrupts)
5740                 return 0;
5741
5742         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5743                hrr_queue->toggle_bit) {
5744
5745                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5746                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5747                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5748
5749                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5750                              cmd_index < hrr_queue->min_cmd_id)) {
5751                         ipr_isr_eh(ioa_cfg,
5752                                 "Invalid response handle from IOA: ",
5753                                 cmd_index);
5754                         break;
5755                 }
5756
5757                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5758                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5759
5760                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5761
5762                 list_move_tail(&ipr_cmd->queue, doneq);
5763
5764                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5765                         hrr_queue->hrrq_curr++;
5766                 } else {
5767                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5768                         hrr_queue->toggle_bit ^= 1u;
5769                 }
5770                 num_hrrq++;
5771                 if (budget > 0 && num_hrrq >= budget)
5772                         break;
5773         }
5774
5775         return num_hrrq;
5776 }
5777
5778 static int ipr_iopoll(struct irq_poll *iop, int budget)
5779 {
5780         struct ipr_ioa_cfg *ioa_cfg;
5781         struct ipr_hrr_queue *hrrq;
5782         struct ipr_cmnd *ipr_cmd, *temp;
5783         unsigned long hrrq_flags;
5784         int completed_ops;
5785         LIST_HEAD(doneq);
5786
5787         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5788         ioa_cfg = hrrq->ioa_cfg;
5789
5790         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5791         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5792
5793         if (completed_ops < budget)
5794                 irq_poll_complete(iop);
5795         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5796
5797         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5798                 list_del(&ipr_cmd->queue);
5799                 del_timer(&ipr_cmd->timer);
5800                 ipr_cmd->fast_done(ipr_cmd);
5801         }
5802
5803         return completed_ops;
5804 }
5805
5806 /**
5807  * ipr_isr - Interrupt service routine
5808  * @irq:        irq number
5809  * @devp:       pointer to ioa config struct
5810  *
5811  * Return value:
5812  *      IRQ_NONE / IRQ_HANDLED
5813  **/
5814 static irqreturn_t ipr_isr(int irq, void *devp)
5815 {
5816         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5817         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5818         unsigned long hrrq_flags = 0;
5819         u32 int_reg = 0;
5820         int num_hrrq = 0;
5821         int irq_none = 0;
5822         struct ipr_cmnd *ipr_cmd, *temp;
5823         irqreturn_t rc = IRQ_NONE;
5824         LIST_HEAD(doneq);
5825
5826         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5827         /* If interrupts are disabled, ignore the interrupt */
5828         if (!hrrq->allow_interrupts) {
5829                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5830                 return IRQ_NONE;
5831         }
5832
5833         while (1) {
5834                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5835                         rc =  IRQ_HANDLED;
5836
5837                         if (!ioa_cfg->clear_isr)
5838                                 break;
5839
5840                         /* Clear the PCI interrupt */
5841                         num_hrrq = 0;
5842                         do {
5843                                 writel(IPR_PCII_HRRQ_UPDATED,
5844                                      ioa_cfg->regs.clr_interrupt_reg32);
5845                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5846                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5847                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5848
5849                 } else if (rc == IRQ_NONE && irq_none == 0) {
5850                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5851                         irq_none++;
5852                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5853                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5854                         ipr_isr_eh(ioa_cfg,
5855                                 "Error clearing HRRQ: ", num_hrrq);
5856                         rc = IRQ_HANDLED;
5857                         break;
5858                 } else
5859                         break;
5860         }
5861
5862         if (unlikely(rc == IRQ_NONE))
5863                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5864
5865         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5866         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5867                 list_del(&ipr_cmd->queue);
5868                 del_timer(&ipr_cmd->timer);
5869                 ipr_cmd->fast_done(ipr_cmd);
5870         }
5871         return rc;
5872 }
5873
5874 /**
5875  * ipr_isr_mhrrq - Interrupt service routine
5876  * @irq:        irq number
5877  * @devp:       pointer to ioa config struct
5878  *
5879  * Return value:
5880  *      IRQ_NONE / IRQ_HANDLED
5881  **/
5882 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5883 {
5884         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5885         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5886         unsigned long hrrq_flags = 0;
5887         struct ipr_cmnd *ipr_cmd, *temp;
5888         irqreturn_t rc = IRQ_NONE;
5889         LIST_HEAD(doneq);
5890
5891         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5892
5893         /* If interrupts are disabled, ignore the interrupt */
5894         if (!hrrq->allow_interrupts) {
5895                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5896                 return IRQ_NONE;
5897         }
5898
5899         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5900                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5901                        hrrq->toggle_bit) {
5902                         irq_poll_sched(&hrrq->iopoll);
5903                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5904                         return IRQ_HANDLED;
5905                 }
5906         } else {
5907                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5908                         hrrq->toggle_bit)
5909
5910                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5911                                 rc =  IRQ_HANDLED;
5912         }
5913
5914         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5915
5916         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5917                 list_del(&ipr_cmd->queue);
5918                 del_timer(&ipr_cmd->timer);
5919                 ipr_cmd->fast_done(ipr_cmd);
5920         }
5921         return rc;
5922 }
5923
5924 /**
5925  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5926  * @ioa_cfg:    ioa config struct
5927  * @ipr_cmd:    ipr command struct
5928  *
5929  * Return value:
5930  *      0 on success / -1 on failure
5931  **/
5932 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5933                              struct ipr_cmnd *ipr_cmd)
5934 {
5935         int i, nseg;
5936         struct scatterlist *sg;
5937         u32 length;
5938         u32 ioadl_flags = 0;
5939         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5940         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5941         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5942
5943         length = scsi_bufflen(scsi_cmd);
5944         if (!length)
5945                 return 0;
5946
5947         nseg = scsi_dma_map(scsi_cmd);
5948         if (nseg < 0) {
5949                 if (printk_ratelimit())
5950                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5951                 return -1;
5952         }
5953
5954         ipr_cmd->dma_use_sg = nseg;
5955
5956         ioarcb->data_transfer_length = cpu_to_be32(length);
5957         ioarcb->ioadl_len =
5958                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5959
5960         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5961                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5962                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5963         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5964                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5965
5966         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5967                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5968                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5969                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5970         }
5971
5972         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5973         return 0;
5974 }
5975
5976 /**
5977  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5978  * @ioa_cfg:    ioa config struct
5979  * @ipr_cmd:    ipr command struct
5980  *
5981  * Return value:
5982  *      0 on success / -1 on failure
5983  **/
5984 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5985                            struct ipr_cmnd *ipr_cmd)
5986 {
5987         int i, nseg;
5988         struct scatterlist *sg;
5989         u32 length;
5990         u32 ioadl_flags = 0;
5991         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5992         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5993         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5994
5995         length = scsi_bufflen(scsi_cmd);
5996         if (!length)
5997                 return 0;
5998
5999         nseg = scsi_dma_map(scsi_cmd);
6000         if (nseg < 0) {
6001                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
6002                 return -1;
6003         }
6004
6005         ipr_cmd->dma_use_sg = nseg;
6006
6007         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6008                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6009                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6010                 ioarcb->data_transfer_length = cpu_to_be32(length);
6011                 ioarcb->ioadl_len =
6012                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6013         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6014                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6015                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6016                 ioarcb->read_ioadl_len =
6017                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6018         }
6019
6020         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6021                 ioadl = ioarcb->u.add_data.u.ioadl;
6022                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6023                                     offsetof(struct ipr_ioarcb, u.add_data));
6024                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6025         }
6026
6027         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6028                 ioadl[i].flags_and_data_len =
6029                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6030                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6031         }
6032
6033         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6034         return 0;
6035 }
6036
6037 /**
6038  * __ipr_erp_done - Process completion of ERP for a device
6039  * @ipr_cmd:            ipr command struct
6040  *
6041  * This function copies the sense buffer into the scsi_cmd
6042  * struct and pushes the scsi_done function.
6043  *
6044  * Return value:
6045  *      nothing
6046  **/
6047 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6048 {
6049         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6050         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6051         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6052
6053         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6054                 scsi_cmd->result |= (DID_ERROR << 16);
6055                 scmd_printk(KERN_ERR, scsi_cmd,
6056                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6057         } else {
6058                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6059                        SCSI_SENSE_BUFFERSIZE);
6060         }
6061
6062         if (res) {
6063                 if (!ipr_is_naca_model(res))
6064                         res->needs_sync_complete = 1;
6065                 res->in_erp = 0;
6066         }
6067         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6068         scsi_cmd->scsi_done(scsi_cmd);
6069         if (ipr_cmd->eh_comp)
6070                 complete(ipr_cmd->eh_comp);
6071         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6072 }
6073
6074 /**
6075  * ipr_erp_done - Process completion of ERP for a device
6076  * @ipr_cmd:            ipr command struct
6077  *
6078  * This function copies the sense buffer into the scsi_cmd
6079  * struct and pushes the scsi_done function.
6080  *
6081  * Return value:
6082  *      nothing
6083  **/
6084 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6085 {
6086         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6087         unsigned long hrrq_flags;
6088
6089         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6090         __ipr_erp_done(ipr_cmd);
6091         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6092 }
6093
6094 /**
6095  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6096  * @ipr_cmd:    ipr command struct
6097  *
6098  * Return value:
6099  *      none
6100  **/
6101 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6102 {
6103         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6104         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6105         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6106
6107         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6108         ioarcb->data_transfer_length = 0;
6109         ioarcb->read_data_transfer_length = 0;
6110         ioarcb->ioadl_len = 0;
6111         ioarcb->read_ioadl_len = 0;
6112         ioasa->hdr.ioasc = 0;
6113         ioasa->hdr.residual_data_len = 0;
6114
6115         if (ipr_cmd->ioa_cfg->sis64)
6116                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6117                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6118         else {
6119                 ioarcb->write_ioadl_addr =
6120                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6121                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6122         }
6123 }
6124
6125 /**
6126  * __ipr_erp_request_sense - Send request sense to a device
6127  * @ipr_cmd:    ipr command struct
6128  *
6129  * This function sends a request sense to a device as a result
6130  * of a check condition.
6131  *
6132  * Return value:
6133  *      nothing
6134  **/
6135 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6136 {
6137         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6138         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6139
6140         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6141                 __ipr_erp_done(ipr_cmd);
6142                 return;
6143         }
6144
6145         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6146
6147         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6148         cmd_pkt->cdb[0] = REQUEST_SENSE;
6149         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6150         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6151         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6152         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6153
6154         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6155                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6156
6157         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6158                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6159 }
6160
6161 /**
6162  * ipr_erp_request_sense - Send request sense to a device
6163  * @ipr_cmd:    ipr command struct
6164  *
6165  * This function sends a request sense to a device as a result
6166  * of a check condition.
6167  *
6168  * Return value:
6169  *      nothing
6170  **/
6171 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6172 {
6173         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6174         unsigned long hrrq_flags;
6175
6176         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6177         __ipr_erp_request_sense(ipr_cmd);
6178         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6179 }
6180
6181 /**
6182  * ipr_erp_cancel_all - Send cancel all to a device
6183  * @ipr_cmd:    ipr command struct
6184  *
6185  * This function sends a cancel all to a device to clear the
6186  * queue. If we are running TCQ on the device, QERR is set to 1,
6187  * which means all outstanding ops have been dropped on the floor.
6188  * Cancel all will return them to us.
6189  *
6190  * Return value:
6191  *      nothing
6192  **/
6193 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6194 {
6195         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6196         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6197         struct ipr_cmd_pkt *cmd_pkt;
6198
6199         res->in_erp = 1;
6200
6201         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6202
6203         if (!scsi_cmd->device->simple_tags) {
6204                 __ipr_erp_request_sense(ipr_cmd);
6205                 return;
6206         }
6207
6208         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6209         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6210         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6211
6212         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6213                    IPR_CANCEL_ALL_TIMEOUT);
6214 }
6215
6216 /**
6217  * ipr_dump_ioasa - Dump contents of IOASA
6218  * @ioa_cfg:    ioa config struct
6219  * @ipr_cmd:    ipr command struct
6220  * @res:                resource entry struct
6221  *
6222  * This function is invoked by the interrupt handler when ops
6223  * fail. It will log the IOASA if appropriate. Only called
6224  * for GPDD ops.
6225  *
6226  * Return value:
6227  *      none
6228  **/
6229 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6230                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6231 {
6232         int i;
6233         u16 data_len;
6234         u32 ioasc, fd_ioasc;
6235         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6236         __be32 *ioasa_data = (__be32 *)ioasa;
6237         int error_index;
6238
6239         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6240         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6241
6242         if (0 == ioasc)
6243                 return;
6244
6245         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6246                 return;
6247
6248         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6249                 error_index = ipr_get_error(fd_ioasc);
6250         else
6251                 error_index = ipr_get_error(ioasc);
6252
6253         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6254                 /* Don't log an error if the IOA already logged one */
6255                 if (ioasa->hdr.ilid != 0)
6256                         return;
6257
6258                 if (!ipr_is_gscsi(res))
6259                         return;
6260
6261                 if (ipr_error_table[error_index].log_ioasa == 0)
6262                         return;
6263         }
6264
6265         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6266
6267         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6268         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6269                 data_len = sizeof(struct ipr_ioasa64);
6270         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6271                 data_len = sizeof(struct ipr_ioasa);
6272
6273         ipr_err("IOASA Dump:\n");
6274
6275         for (i = 0; i < data_len / 4; i += 4) {
6276                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6277                         be32_to_cpu(ioasa_data[i]),
6278                         be32_to_cpu(ioasa_data[i+1]),
6279                         be32_to_cpu(ioasa_data[i+2]),
6280                         be32_to_cpu(ioasa_data[i+3]));
6281         }
6282 }
6283
6284 /**
6285  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6286  * @ioasa:              IOASA
6287  * @sense_buf:  sense data buffer
6288  *
6289  * Return value:
6290  *      none
6291  **/
6292 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6293 {
6294         u32 failing_lba;
6295         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6296         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6297         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6298         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6299
6300         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6301
6302         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6303                 return;
6304
6305         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6306
6307         if (ipr_is_vset_device(res) &&
6308             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6309             ioasa->u.vset.failing_lba_hi != 0) {
6310                 sense_buf[0] = 0x72;
6311                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6312                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6313                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6314
6315                 sense_buf[7] = 12;
6316                 sense_buf[8] = 0;
6317                 sense_buf[9] = 0x0A;
6318                 sense_buf[10] = 0x80;
6319
6320                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6321
6322                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6323                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6324                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6325                 sense_buf[15] = failing_lba & 0x000000ff;
6326
6327                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6328
6329                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6330                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6331                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6332                 sense_buf[19] = failing_lba & 0x000000ff;
6333         } else {
6334                 sense_buf[0] = 0x70;
6335                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6336                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6337                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6338
6339                 /* Illegal request */
6340                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6341                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6342                         sense_buf[7] = 10;      /* additional length */
6343
6344                         /* IOARCB was in error */
6345                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6346                                 sense_buf[15] = 0xC0;
6347                         else    /* Parameter data was invalid */
6348                                 sense_buf[15] = 0x80;
6349
6350                         sense_buf[16] =
6351                             ((IPR_FIELD_POINTER_MASK &
6352                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6353                         sense_buf[17] =
6354                             (IPR_FIELD_POINTER_MASK &
6355                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6356                 } else {
6357                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6358                                 if (ipr_is_vset_device(res))
6359                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6360                                 else
6361                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6362
6363                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6364                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6365                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6366                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6367                                 sense_buf[6] = failing_lba & 0x000000ff;
6368                         }
6369
6370                         sense_buf[7] = 6;       /* additional length */
6371                 }
6372         }
6373 }
6374
6375 /**
6376  * ipr_get_autosense - Copy autosense data to sense buffer
6377  * @ipr_cmd:    ipr command struct
6378  *
6379  * This function copies the autosense buffer to the buffer
6380  * in the scsi_cmd, if there is autosense available.
6381  *
6382  * Return value:
6383  *      1 if autosense was available / 0 if not
6384  **/
6385 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6386 {
6387         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6388         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6389
6390         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6391                 return 0;
6392
6393         if (ipr_cmd->ioa_cfg->sis64)
6394                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6395                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6396                            SCSI_SENSE_BUFFERSIZE));
6397         else
6398                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6399                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6400                            SCSI_SENSE_BUFFERSIZE));
6401         return 1;
6402 }
6403
6404 /**
6405  * ipr_erp_start - Process an error response for a SCSI op
6406  * @ioa_cfg:    ioa config struct
6407  * @ipr_cmd:    ipr command struct
6408  *
6409  * This function determines whether or not to initiate ERP
6410  * on the affected device.
6411  *
6412  * Return value:
6413  *      nothing
6414  **/
6415 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6416                               struct ipr_cmnd *ipr_cmd)
6417 {
6418         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6419         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6420         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6421         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6422
6423         if (!res) {
6424                 __ipr_scsi_eh_done(ipr_cmd);
6425                 return;
6426         }
6427
6428         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6429                 ipr_gen_sense(ipr_cmd);
6430
6431         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6432
6433         switch (masked_ioasc) {
6434         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6435                 if (ipr_is_naca_model(res))
6436                         scsi_cmd->result |= (DID_ABORT << 16);
6437                 else
6438                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6439                 break;
6440         case IPR_IOASC_IR_RESOURCE_HANDLE:
6441         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6442                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6443                 break;
6444         case IPR_IOASC_HW_SEL_TIMEOUT:
6445                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6446                 if (!ipr_is_naca_model(res))
6447                         res->needs_sync_complete = 1;
6448                 break;
6449         case IPR_IOASC_SYNC_REQUIRED:
6450                 if (!res->in_erp)
6451                         res->needs_sync_complete = 1;
6452                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6453                 break;
6454         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6455         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6456                 /*
6457                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6458                  * so SCSI mid-layer and upper layers handle it accordingly.
6459                  */
6460                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6461                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6462                 break;
6463         case IPR_IOASC_BUS_WAS_RESET:
6464         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6465                 /*
6466                  * Report the bus reset and ask for a retry. The device
6467                  * will give CC/UA the next command.
6468                  */
6469                 if (!res->resetting_device)
6470                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6471                 scsi_cmd->result |= (DID_ERROR << 16);
6472                 if (!ipr_is_naca_model(res))
6473                         res->needs_sync_complete = 1;
6474                 break;
6475         case IPR_IOASC_HW_DEV_BUS_STATUS:
6476                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6477                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6478                         if (!ipr_get_autosense(ipr_cmd)) {
6479                                 if (!ipr_is_naca_model(res)) {
6480                                         ipr_erp_cancel_all(ipr_cmd);
6481                                         return;
6482                                 }
6483                         }
6484                 }
6485                 if (!ipr_is_naca_model(res))
6486                         res->needs_sync_complete = 1;
6487                 break;
6488         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6489                 break;
6490         case IPR_IOASC_IR_NON_OPTIMIZED:
6491                 if (res->raw_mode) {
6492                         res->raw_mode = 0;
6493                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6494                 } else
6495                         scsi_cmd->result |= (DID_ERROR << 16);
6496                 break;
6497         default:
6498                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6499                         scsi_cmd->result |= (DID_ERROR << 16);
6500                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6501                         res->needs_sync_complete = 1;
6502                 break;
6503         }
6504
6505         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6506         scsi_cmd->scsi_done(scsi_cmd);
6507         if (ipr_cmd->eh_comp)
6508                 complete(ipr_cmd->eh_comp);
6509         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6510 }
6511
6512 /**
6513  * ipr_scsi_done - mid-layer done function
6514  * @ipr_cmd:    ipr command struct
6515  *
6516  * This function is invoked by the interrupt handler for
6517  * ops generated by the SCSI mid-layer
6518  *
6519  * Return value:
6520  *      none
6521  **/
6522 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6523 {
6524         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6525         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6526         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6527         unsigned long lock_flags;
6528
6529         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6530
6531         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6532                 scsi_dma_unmap(scsi_cmd);
6533
6534                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6535                 scsi_cmd->scsi_done(scsi_cmd);
6536                 if (ipr_cmd->eh_comp)
6537                         complete(ipr_cmd->eh_comp);
6538                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6539                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6540         } else {
6541                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6542                 spin_lock(&ipr_cmd->hrrq->_lock);
6543                 ipr_erp_start(ioa_cfg, ipr_cmd);
6544                 spin_unlock(&ipr_cmd->hrrq->_lock);
6545                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6546         }
6547 }
6548
6549 /**
6550  * ipr_queuecommand - Queue a mid-layer request
6551  * @shost:              scsi host struct
6552  * @scsi_cmd:   scsi command struct
6553  *
6554  * This function queues a request generated by the mid-layer.
6555  *
6556  * Return value:
6557  *      0 on success
6558  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6559  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6560  **/
6561 static int ipr_queuecommand(struct Scsi_Host *shost,
6562                             struct scsi_cmnd *scsi_cmd)
6563 {
6564         struct ipr_ioa_cfg *ioa_cfg;
6565         struct ipr_resource_entry *res;
6566         struct ipr_ioarcb *ioarcb;
6567         struct ipr_cmnd *ipr_cmd;
6568         unsigned long hrrq_flags, lock_flags;
6569         int rc;
6570         struct ipr_hrr_queue *hrrq;
6571         int hrrq_id;
6572
6573         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6574
6575         scsi_cmd->result = (DID_OK << 16);
6576         res = scsi_cmd->device->hostdata;
6577
6578         if (ipr_is_gata(res) && res->sata_port) {
6579                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6580                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6581                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6582                 return rc;
6583         }
6584
6585         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6586         hrrq = &ioa_cfg->hrrq[hrrq_id];
6587
6588         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6589         /*
6590          * We are currently blocking all devices due to a host reset
6591          * We have told the host to stop giving us new requests, but
6592          * ERP ops don't count. FIXME
6593          */
6594         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6595                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6596                 return SCSI_MLQUEUE_HOST_BUSY;
6597         }
6598
6599         /*
6600          * FIXME - Create scsi_set_host_offline interface
6601          *  and the ioa_is_dead check can be removed
6602          */
6603         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6604                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6605                 goto err_nodev;
6606         }
6607
6608         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6609         if (ipr_cmd == NULL) {
6610                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6611                 return SCSI_MLQUEUE_HOST_BUSY;
6612         }
6613         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6614
6615         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6616         ioarcb = &ipr_cmd->ioarcb;
6617
6618         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6619         ipr_cmd->scsi_cmd = scsi_cmd;
6620         ipr_cmd->done = ipr_scsi_eh_done;
6621
6622         if (ipr_is_gscsi(res)) {
6623                 if (scsi_cmd->underflow == 0)
6624                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6625
6626                 if (res->reset_occurred) {
6627                         res->reset_occurred = 0;
6628                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6629                 }
6630         }
6631
6632         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6633                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6634
6635                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6636                 if (scsi_cmd->flags & SCMD_TAGGED)
6637                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6638                 else
6639                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6640         }
6641
6642         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6643             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6644                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6645         }
6646         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6647                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6648
6649                 if (scsi_cmd->underflow == 0)
6650                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6651         }
6652
6653         if (ioa_cfg->sis64)
6654                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6655         else
6656                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6657
6658         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6659         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6660                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6661                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6662                 if (!rc)
6663                         scsi_dma_unmap(scsi_cmd);
6664                 return SCSI_MLQUEUE_HOST_BUSY;
6665         }
6666
6667         if (unlikely(hrrq->ioa_is_dead)) {
6668                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6669                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6670                 scsi_dma_unmap(scsi_cmd);
6671                 goto err_nodev;
6672         }
6673
6674         ioarcb->res_handle = res->res_handle;
6675         if (res->needs_sync_complete) {
6676                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6677                 res->needs_sync_complete = 0;
6678         }
6679         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6680         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6681         ipr_send_command(ipr_cmd);
6682         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6683         return 0;
6684
6685 err_nodev:
6686         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6687         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6688         scsi_cmd->result = (DID_NO_CONNECT << 16);
6689         scsi_cmd->scsi_done(scsi_cmd);
6690         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6691         return 0;
6692 }
6693
6694 /**
6695  * ipr_ioctl - IOCTL handler
6696  * @sdev:       scsi device struct
6697  * @cmd:        IOCTL cmd
6698  * @arg:        IOCTL arg
6699  *
6700  * Return value:
6701  *      0 on success / other on failure
6702  **/
6703 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6704 {
6705         struct ipr_resource_entry *res;
6706
6707         res = (struct ipr_resource_entry *)sdev->hostdata;
6708         if (res && ipr_is_gata(res)) {
6709                 if (cmd == HDIO_GET_IDENTITY)
6710                         return -ENOTTY;
6711                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6712         }
6713
6714         return -EINVAL;
6715 }
6716
6717 /**
6718  * ipr_info - Get information about the card/driver
6719  * @scsi_host:  scsi host struct
6720  *
6721  * Return value:
6722  *      pointer to buffer with description string
6723  **/
6724 static const char *ipr_ioa_info(struct Scsi_Host *host)
6725 {
6726         static char buffer[512];
6727         struct ipr_ioa_cfg *ioa_cfg;
6728         unsigned long lock_flags = 0;
6729
6730         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6731
6732         spin_lock_irqsave(host->host_lock, lock_flags);
6733         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6734         spin_unlock_irqrestore(host->host_lock, lock_flags);
6735
6736         return buffer;
6737 }
6738
6739 static struct scsi_host_template driver_template = {
6740         .module = THIS_MODULE,
6741         .name = "IPR",
6742         .info = ipr_ioa_info,
6743         .ioctl = ipr_ioctl,
6744         .queuecommand = ipr_queuecommand,
6745         .eh_abort_handler = ipr_eh_abort,
6746         .eh_device_reset_handler = ipr_eh_dev_reset,
6747         .eh_host_reset_handler = ipr_eh_host_reset,
6748         .slave_alloc = ipr_slave_alloc,
6749         .slave_configure = ipr_slave_configure,
6750         .slave_destroy = ipr_slave_destroy,
6751         .scan_finished = ipr_scan_finished,
6752         .target_alloc = ipr_target_alloc,
6753         .target_destroy = ipr_target_destroy,
6754         .change_queue_depth = ipr_change_queue_depth,
6755         .bios_param = ipr_biosparam,
6756         .can_queue = IPR_MAX_COMMANDS,
6757         .this_id = -1,
6758         .sg_tablesize = IPR_MAX_SGLIST,
6759         .max_sectors = IPR_IOA_MAX_SECTORS,
6760         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6761         .use_clustering = ENABLE_CLUSTERING,
6762         .shost_attrs = ipr_ioa_attrs,
6763         .sdev_attrs = ipr_dev_attrs,
6764         .proc_name = IPR_NAME,
6765 };
6766
6767 /**
6768  * ipr_ata_phy_reset - libata phy_reset handler
6769  * @ap:         ata port to reset
6770  *
6771  **/
6772 static void ipr_ata_phy_reset(struct ata_port *ap)
6773 {
6774         unsigned long flags;
6775         struct ipr_sata_port *sata_port = ap->private_data;
6776         struct ipr_resource_entry *res = sata_port->res;
6777         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6778         int rc;
6779
6780         ENTER;
6781         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6782         while (ioa_cfg->in_reset_reload) {
6783                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6784                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6785                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6786         }
6787
6788         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6789                 goto out_unlock;
6790
6791         rc = ipr_device_reset(ioa_cfg, res);
6792
6793         if (rc) {
6794                 ap->link.device[0].class = ATA_DEV_NONE;
6795                 goto out_unlock;
6796         }
6797
6798         ap->link.device[0].class = res->ata_class;
6799         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6800                 ap->link.device[0].class = ATA_DEV_NONE;
6801
6802 out_unlock:
6803         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6804         LEAVE;
6805 }
6806
6807 /**
6808  * ipr_ata_post_internal - Cleanup after an internal command
6809  * @qc: ATA queued command
6810  *
6811  * Return value:
6812  *      none
6813  **/
6814 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6815 {
6816         struct ipr_sata_port *sata_port = qc->ap->private_data;
6817         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6818         struct ipr_cmnd *ipr_cmd;
6819         struct ipr_hrr_queue *hrrq;
6820         unsigned long flags;
6821
6822         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6823         while (ioa_cfg->in_reset_reload) {
6824                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6825                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6826                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6827         }
6828
6829         for_each_hrrq(hrrq, ioa_cfg) {
6830                 spin_lock(&hrrq->_lock);
6831                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6832                         if (ipr_cmd->qc == qc) {
6833                                 ipr_device_reset(ioa_cfg, sata_port->res);
6834                                 break;
6835                         }
6836                 }
6837                 spin_unlock(&hrrq->_lock);
6838         }
6839         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6840 }
6841
6842 /**
6843  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6844  * @regs:       destination
6845  * @tf: source ATA taskfile
6846  *
6847  * Return value:
6848  *      none
6849  **/
6850 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6851                              struct ata_taskfile *tf)
6852 {
6853         regs->feature = tf->feature;
6854         regs->nsect = tf->nsect;
6855         regs->lbal = tf->lbal;
6856         regs->lbam = tf->lbam;
6857         regs->lbah = tf->lbah;
6858         regs->device = tf->device;
6859         regs->command = tf->command;
6860         regs->hob_feature = tf->hob_feature;
6861         regs->hob_nsect = tf->hob_nsect;
6862         regs->hob_lbal = tf->hob_lbal;
6863         regs->hob_lbam = tf->hob_lbam;
6864         regs->hob_lbah = tf->hob_lbah;
6865         regs->ctl = tf->ctl;
6866 }
6867
6868 /**
6869  * ipr_sata_done - done function for SATA commands
6870  * @ipr_cmd:    ipr command struct
6871  *
6872  * This function is invoked by the interrupt handler for
6873  * ops generated by the SCSI mid-layer to SATA devices
6874  *
6875  * Return value:
6876  *      none
6877  **/
6878 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6879 {
6880         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6881         struct ata_queued_cmd *qc = ipr_cmd->qc;
6882         struct ipr_sata_port *sata_port = qc->ap->private_data;
6883         struct ipr_resource_entry *res = sata_port->res;
6884         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6885
6886         spin_lock(&ipr_cmd->hrrq->_lock);
6887         if (ipr_cmd->ioa_cfg->sis64)
6888                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6889                        sizeof(struct ipr_ioasa_gata));
6890         else
6891                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6892                        sizeof(struct ipr_ioasa_gata));
6893         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6894
6895         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6896                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6897
6898         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6899                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6900         else
6901                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6902         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6903         spin_unlock(&ipr_cmd->hrrq->_lock);
6904         ata_qc_complete(qc);
6905 }
6906
6907 /**
6908  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6909  * @ipr_cmd:    ipr command struct
6910  * @qc:         ATA queued command
6911  *
6912  **/
6913 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6914                                   struct ata_queued_cmd *qc)
6915 {
6916         u32 ioadl_flags = 0;
6917         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6918         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6919         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6920         int len = qc->nbytes;
6921         struct scatterlist *sg;
6922         unsigned int si;
6923         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6924
6925         if (len == 0)
6926                 return;
6927
6928         if (qc->dma_dir == DMA_TO_DEVICE) {
6929                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6930                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6931         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6932                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6933
6934         ioarcb->data_transfer_length = cpu_to_be32(len);
6935         ioarcb->ioadl_len =
6936                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6937         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6938                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6939
6940         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6941                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6942                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6943                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6944
6945                 last_ioadl64 = ioadl64;
6946                 ioadl64++;
6947         }
6948
6949         if (likely(last_ioadl64))
6950                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6951 }
6952
6953 /**
6954  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6955  * @ipr_cmd:    ipr command struct
6956  * @qc:         ATA queued command
6957  *
6958  **/
6959 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6960                                 struct ata_queued_cmd *qc)
6961 {
6962         u32 ioadl_flags = 0;
6963         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6964         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6965         struct ipr_ioadl_desc *last_ioadl = NULL;
6966         int len = qc->nbytes;
6967         struct scatterlist *sg;
6968         unsigned int si;
6969
6970         if (len == 0)
6971                 return;
6972
6973         if (qc->dma_dir == DMA_TO_DEVICE) {
6974                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6975                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6976                 ioarcb->data_transfer_length = cpu_to_be32(len);
6977                 ioarcb->ioadl_len =
6978                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6979         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6980                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6981                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6982                 ioarcb->read_ioadl_len =
6983                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6984         }
6985
6986         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6987                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6988                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6989
6990                 last_ioadl = ioadl;
6991                 ioadl++;
6992         }
6993
6994         if (likely(last_ioadl))
6995                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6996 }
6997
6998 /**
6999  * ipr_qc_defer - Get a free ipr_cmd
7000  * @qc: queued command
7001  *
7002  * Return value:
7003  *      0 if success
7004  **/
7005 static int ipr_qc_defer(struct ata_queued_cmd *qc)
7006 {
7007         struct ata_port *ap = qc->ap;
7008         struct ipr_sata_port *sata_port = ap->private_data;
7009         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7010         struct ipr_cmnd *ipr_cmd;
7011         struct ipr_hrr_queue *hrrq;
7012         int hrrq_id;
7013
7014         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7015         hrrq = &ioa_cfg->hrrq[hrrq_id];
7016
7017         qc->lldd_task = NULL;
7018         spin_lock(&hrrq->_lock);
7019         if (unlikely(hrrq->ioa_is_dead)) {
7020                 spin_unlock(&hrrq->_lock);
7021                 return 0;
7022         }
7023
7024         if (unlikely(!hrrq->allow_cmds)) {
7025                 spin_unlock(&hrrq->_lock);
7026                 return ATA_DEFER_LINK;
7027         }
7028
7029         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7030         if (ipr_cmd == NULL) {
7031                 spin_unlock(&hrrq->_lock);
7032                 return ATA_DEFER_LINK;
7033         }
7034
7035         qc->lldd_task = ipr_cmd;
7036         spin_unlock(&hrrq->_lock);
7037         return 0;
7038 }
7039
7040 /**
7041  * ipr_qc_issue - Issue a SATA qc to a device
7042  * @qc: queued command
7043  *
7044  * Return value:
7045  *      0 if success
7046  **/
7047 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7048 {
7049         struct ata_port *ap = qc->ap;
7050         struct ipr_sata_port *sata_port = ap->private_data;
7051         struct ipr_resource_entry *res = sata_port->res;
7052         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7053         struct ipr_cmnd *ipr_cmd;
7054         struct ipr_ioarcb *ioarcb;
7055         struct ipr_ioarcb_ata_regs *regs;
7056
7057         if (qc->lldd_task == NULL)
7058                 ipr_qc_defer(qc);
7059
7060         ipr_cmd = qc->lldd_task;
7061         if (ipr_cmd == NULL)
7062                 return AC_ERR_SYSTEM;
7063
7064         qc->lldd_task = NULL;
7065         spin_lock(&ipr_cmd->hrrq->_lock);
7066         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7067                         ipr_cmd->hrrq->ioa_is_dead)) {
7068                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7069                 spin_unlock(&ipr_cmd->hrrq->_lock);
7070                 return AC_ERR_SYSTEM;
7071         }
7072
7073         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7074         ioarcb = &ipr_cmd->ioarcb;
7075
7076         if (ioa_cfg->sis64) {
7077                 regs = &ipr_cmd->i.ata_ioadl.regs;
7078                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7079         } else
7080                 regs = &ioarcb->u.add_data.u.regs;
7081
7082         memset(regs, 0, sizeof(*regs));
7083         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7084
7085         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7086         ipr_cmd->qc = qc;
7087         ipr_cmd->done = ipr_sata_done;
7088         ipr_cmd->ioarcb.res_handle = res->res_handle;
7089         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7090         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7091         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7092         ipr_cmd->dma_use_sg = qc->n_elem;
7093
7094         if (ioa_cfg->sis64)
7095                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7096         else
7097                 ipr_build_ata_ioadl(ipr_cmd, qc);
7098
7099         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7100         ipr_copy_sata_tf(regs, &qc->tf);
7101         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7102         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7103
7104         switch (qc->tf.protocol) {
7105         case ATA_PROT_NODATA:
7106         case ATA_PROT_PIO:
7107                 break;
7108
7109         case ATA_PROT_DMA:
7110                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7111                 break;
7112
7113         case ATAPI_PROT_PIO:
7114         case ATAPI_PROT_NODATA:
7115                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7116                 break;
7117
7118         case ATAPI_PROT_DMA:
7119                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7120                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7121                 break;
7122
7123         default:
7124                 WARN_ON(1);
7125                 spin_unlock(&ipr_cmd->hrrq->_lock);
7126                 return AC_ERR_INVALID;
7127         }
7128
7129         ipr_send_command(ipr_cmd);
7130         spin_unlock(&ipr_cmd->hrrq->_lock);
7131
7132         return 0;
7133 }
7134
7135 /**
7136  * ipr_qc_fill_rtf - Read result TF
7137  * @qc: ATA queued command
7138  *
7139  * Return value:
7140  *      true
7141  **/
7142 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7143 {
7144         struct ipr_sata_port *sata_port = qc->ap->private_data;
7145         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7146         struct ata_taskfile *tf = &qc->result_tf;
7147
7148         tf->feature = g->error;
7149         tf->nsect = g->nsect;
7150         tf->lbal = g->lbal;
7151         tf->lbam = g->lbam;
7152         tf->lbah = g->lbah;
7153         tf->device = g->device;
7154         tf->command = g->status;
7155         tf->hob_nsect = g->hob_nsect;
7156         tf->hob_lbal = g->hob_lbal;
7157         tf->hob_lbam = g->hob_lbam;
7158         tf->hob_lbah = g->hob_lbah;
7159
7160         return true;
7161 }
7162
7163 static struct ata_port_operations ipr_sata_ops = {
7164         .phy_reset = ipr_ata_phy_reset,
7165         .hardreset = ipr_sata_reset,
7166         .post_internal_cmd = ipr_ata_post_internal,
7167         .qc_prep = ata_noop_qc_prep,
7168         .qc_defer = ipr_qc_defer,
7169         .qc_issue = ipr_qc_issue,
7170         .qc_fill_rtf = ipr_qc_fill_rtf,
7171         .port_start = ata_sas_port_start,
7172         .port_stop = ata_sas_port_stop
7173 };
7174
7175 static struct ata_port_info sata_port_info = {
7176         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7177                           ATA_FLAG_SAS_HOST,
7178         .pio_mask       = ATA_PIO4_ONLY,
7179         .mwdma_mask     = ATA_MWDMA2,
7180         .udma_mask      = ATA_UDMA6,
7181         .port_ops       = &ipr_sata_ops
7182 };
7183
7184 #ifdef CONFIG_PPC_PSERIES
7185 static const u16 ipr_blocked_processors[] = {
7186         PVR_NORTHSTAR,
7187         PVR_PULSAR,
7188         PVR_POWER4,
7189         PVR_ICESTAR,
7190         PVR_SSTAR,
7191         PVR_POWER4p,
7192         PVR_630,
7193         PVR_630p
7194 };
7195
7196 /**
7197  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7198  * @ioa_cfg:    ioa cfg struct
7199  *
7200  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7201  * certain pSeries hardware. This function determines if the given
7202  * adapter is in one of these confgurations or not.
7203  *
7204  * Return value:
7205  *      1 if adapter is not supported / 0 if adapter is supported
7206  **/
7207 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7208 {
7209         int i;
7210
7211         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7212                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7213                         if (pvr_version_is(ipr_blocked_processors[i]))
7214                                 return 1;
7215                 }
7216         }
7217         return 0;
7218 }
7219 #else
7220 #define ipr_invalid_adapter(ioa_cfg) 0
7221 #endif
7222
7223 /**
7224  * ipr_ioa_bringdown_done - IOA bring down completion.
7225  * @ipr_cmd:    ipr command struct
7226  *
7227  * This function processes the completion of an adapter bring down.
7228  * It wakes any reset sleepers.
7229  *
7230  * Return value:
7231  *      IPR_RC_JOB_RETURN
7232  **/
7233 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7234 {
7235         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7236         int i;
7237
7238         ENTER;
7239         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7240                 ipr_trace;
7241                 ioa_cfg->scsi_unblock = 1;
7242                 schedule_work(&ioa_cfg->work_q);
7243         }
7244
7245         ioa_cfg->in_reset_reload = 0;
7246         ioa_cfg->reset_retries = 0;
7247         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7248                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7249                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7250                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7251         }
7252         wmb();
7253
7254         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7255         wake_up_all(&ioa_cfg->reset_wait_q);
7256         LEAVE;
7257
7258         return IPR_RC_JOB_RETURN;
7259 }
7260
7261 /**
7262  * ipr_ioa_reset_done - IOA reset completion.
7263  * @ipr_cmd:    ipr command struct
7264  *
7265  * This function processes the completion of an adapter reset.
7266  * It schedules any necessary mid-layer add/removes and
7267  * wakes any reset sleepers.
7268  *
7269  * Return value:
7270  *      IPR_RC_JOB_RETURN
7271  **/
7272 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7273 {
7274         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7275         struct ipr_resource_entry *res;
7276         int j;
7277
7278         ENTER;
7279         ioa_cfg->in_reset_reload = 0;
7280         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7281                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7282                 ioa_cfg->hrrq[j].allow_cmds = 1;
7283                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7284         }
7285         wmb();
7286         ioa_cfg->reset_cmd = NULL;
7287         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7288
7289         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7290                 if (res->add_to_ml || res->del_from_ml) {
7291                         ipr_trace;
7292                         break;
7293                 }
7294         }
7295         schedule_work(&ioa_cfg->work_q);
7296
7297         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7298                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7299                 if (j < IPR_NUM_LOG_HCAMS)
7300                         ipr_send_hcam(ioa_cfg,
7301                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7302                                 ioa_cfg->hostrcb[j]);
7303                 else
7304                         ipr_send_hcam(ioa_cfg,
7305                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7306                                 ioa_cfg->hostrcb[j]);
7307         }
7308
7309         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7310         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7311
7312         ioa_cfg->reset_retries = 0;
7313         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7314         wake_up_all(&ioa_cfg->reset_wait_q);
7315
7316         ioa_cfg->scsi_unblock = 1;
7317         schedule_work(&ioa_cfg->work_q);
7318         LEAVE;
7319         return IPR_RC_JOB_RETURN;
7320 }
7321
7322 /**
7323  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7324  * @supported_dev:      supported device struct
7325  * @vpids:                      vendor product id struct
7326  *
7327  * Return value:
7328  *      none
7329  **/
7330 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7331                                  struct ipr_std_inq_vpids *vpids)
7332 {
7333         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7334         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7335         supported_dev->num_records = 1;
7336         supported_dev->data_length =
7337                 cpu_to_be16(sizeof(struct ipr_supported_device));
7338         supported_dev->reserved = 0;
7339 }
7340
7341 /**
7342  * ipr_set_supported_devs - Send Set Supported Devices for a device
7343  * @ipr_cmd:    ipr command struct
7344  *
7345  * This function sends a Set Supported Devices to the adapter
7346  *
7347  * Return value:
7348  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7349  **/
7350 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7351 {
7352         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7353         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7354         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7355         struct ipr_resource_entry *res = ipr_cmd->u.res;
7356
7357         ipr_cmd->job_step = ipr_ioa_reset_done;
7358
7359         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7360                 if (!ipr_is_scsi_disk(res))
7361                         continue;
7362
7363                 ipr_cmd->u.res = res;
7364                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7365
7366                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7367                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7368                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7369
7370                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7371                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7372                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7373                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7374
7375                 ipr_init_ioadl(ipr_cmd,
7376                                ioa_cfg->vpd_cbs_dma +
7377                                  offsetof(struct ipr_misc_cbs, supp_dev),
7378                                sizeof(struct ipr_supported_device),
7379                                IPR_IOADL_FLAGS_WRITE_LAST);
7380
7381                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7382                            IPR_SET_SUP_DEVICE_TIMEOUT);
7383
7384                 if (!ioa_cfg->sis64)
7385                         ipr_cmd->job_step = ipr_set_supported_devs;
7386                 LEAVE;
7387                 return IPR_RC_JOB_RETURN;
7388         }
7389
7390         LEAVE;
7391         return IPR_RC_JOB_CONTINUE;
7392 }
7393
7394 /**
7395  * ipr_get_mode_page - Locate specified mode page
7396  * @mode_pages: mode page buffer
7397  * @page_code:  page code to find
7398  * @len:                minimum required length for mode page
7399  *
7400  * Return value:
7401  *      pointer to mode page / NULL on failure
7402  **/
7403 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7404                                u32 page_code, u32 len)
7405 {
7406         struct ipr_mode_page_hdr *mode_hdr;
7407         u32 page_length;
7408         u32 length;
7409
7410         if (!mode_pages || (mode_pages->hdr.length == 0))
7411                 return NULL;
7412
7413         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7414         mode_hdr = (struct ipr_mode_page_hdr *)
7415                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7416
7417         while (length) {
7418                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7419                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7420                                 return mode_hdr;
7421                         break;
7422                 } else {
7423                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7424                                        mode_hdr->page_length);
7425                         length -= page_length;
7426                         mode_hdr = (struct ipr_mode_page_hdr *)
7427                                 ((unsigned long)mode_hdr + page_length);
7428                 }
7429         }
7430         return NULL;
7431 }
7432
7433 /**
7434  * ipr_check_term_power - Check for term power errors
7435  * @ioa_cfg:    ioa config struct
7436  * @mode_pages: IOAFP mode pages buffer
7437  *
7438  * Check the IOAFP's mode page 28 for term power errors
7439  *
7440  * Return value:
7441  *      nothing
7442  **/
7443 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7444                                  struct ipr_mode_pages *mode_pages)
7445 {
7446         int i;
7447         int entry_length;
7448         struct ipr_dev_bus_entry *bus;
7449         struct ipr_mode_page28 *mode_page;
7450
7451         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7452                                       sizeof(struct ipr_mode_page28));
7453
7454         entry_length = mode_page->entry_length;
7455
7456         bus = mode_page->bus;
7457
7458         for (i = 0; i < mode_page->num_entries; i++) {
7459                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7460                         dev_err(&ioa_cfg->pdev->dev,
7461                                 "Term power is absent on scsi bus %d\n",
7462                                 bus->res_addr.bus);
7463                 }
7464
7465                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7466         }
7467 }
7468
7469 /**
7470  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7471  * @ioa_cfg:    ioa config struct
7472  *
7473  * Looks through the config table checking for SES devices. If
7474  * the SES device is in the SES table indicating a maximum SCSI
7475  * bus speed, the speed is limited for the bus.
7476  *
7477  * Return value:
7478  *      none
7479  **/
7480 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7481 {
7482         u32 max_xfer_rate;
7483         int i;
7484
7485         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7486                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7487                                                        ioa_cfg->bus_attr[i].bus_width);
7488
7489                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7490                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7491         }
7492 }
7493
7494 /**
7495  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7496  * @ioa_cfg:    ioa config struct
7497  * @mode_pages: mode page 28 buffer
7498  *
7499  * Updates mode page 28 based on driver configuration
7500  *
7501  * Return value:
7502  *      none
7503  **/
7504 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7505                                           struct ipr_mode_pages *mode_pages)
7506 {
7507         int i, entry_length;
7508         struct ipr_dev_bus_entry *bus;
7509         struct ipr_bus_attributes *bus_attr;
7510         struct ipr_mode_page28 *mode_page;
7511
7512         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7513                                       sizeof(struct ipr_mode_page28));
7514
7515         entry_length = mode_page->entry_length;
7516
7517         /* Loop for each device bus entry */
7518         for (i = 0, bus = mode_page->bus;
7519              i < mode_page->num_entries;
7520              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7521                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7522                         dev_err(&ioa_cfg->pdev->dev,
7523                                 "Invalid resource address reported: 0x%08X\n",
7524                                 IPR_GET_PHYS_LOC(bus->res_addr));
7525                         continue;
7526                 }
7527
7528                 bus_attr = &ioa_cfg->bus_attr[i];
7529                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7530                 bus->bus_width = bus_attr->bus_width;
7531                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7532                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7533                 if (bus_attr->qas_enabled)
7534                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7535                 else
7536                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7537         }
7538 }
7539
7540 /**
7541  * ipr_build_mode_select - Build a mode select command
7542  * @ipr_cmd:    ipr command struct
7543  * @res_handle: resource handle to send command to
7544  * @parm:               Byte 2 of Mode Sense command
7545  * @dma_addr:   DMA buffer address
7546  * @xfer_len:   data transfer length
7547  *
7548  * Return value:
7549  *      none
7550  **/
7551 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7552                                   __be32 res_handle, u8 parm,
7553                                   dma_addr_t dma_addr, u8 xfer_len)
7554 {
7555         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7556
7557         ioarcb->res_handle = res_handle;
7558         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7559         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7560         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7561         ioarcb->cmd_pkt.cdb[1] = parm;
7562         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7563
7564         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7565 }
7566
7567 /**
7568  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7569  * @ipr_cmd:    ipr command struct
7570  *
7571  * This function sets up the SCSI bus attributes and sends
7572  * a Mode Select for Page 28 to activate them.
7573  *
7574  * Return value:
7575  *      IPR_RC_JOB_RETURN
7576  **/
7577 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7578 {
7579         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7580         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7581         int length;
7582
7583         ENTER;
7584         ipr_scsi_bus_speed_limit(ioa_cfg);
7585         ipr_check_term_power(ioa_cfg, mode_pages);
7586         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7587         length = mode_pages->hdr.length + 1;
7588         mode_pages->hdr.length = 0;
7589
7590         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7591                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7592                               length);
7593
7594         ipr_cmd->job_step = ipr_set_supported_devs;
7595         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7596                                     struct ipr_resource_entry, queue);
7597         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7598
7599         LEAVE;
7600         return IPR_RC_JOB_RETURN;
7601 }
7602
7603 /**
7604  * ipr_build_mode_sense - Builds a mode sense command
7605  * @ipr_cmd:    ipr command struct
7606  * @res:                resource entry struct
7607  * @parm:               Byte 2 of mode sense command
7608  * @dma_addr:   DMA address of mode sense buffer
7609  * @xfer_len:   Size of DMA buffer
7610  *
7611  * Return value:
7612  *      none
7613  **/
7614 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7615                                  __be32 res_handle,
7616                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7617 {
7618         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7619
7620         ioarcb->res_handle = res_handle;
7621         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7622         ioarcb->cmd_pkt.cdb[2] = parm;
7623         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7624         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7625
7626         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7627 }
7628
7629 /**
7630  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7631  * @ipr_cmd:    ipr command struct
7632  *
7633  * This function handles the failure of an IOA bringup command.
7634  *
7635  * Return value:
7636  *      IPR_RC_JOB_RETURN
7637  **/
7638 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7639 {
7640         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7641         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7642
7643         dev_err(&ioa_cfg->pdev->dev,
7644                 "0x%02X failed with IOASC: 0x%08X\n",
7645                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7646
7647         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7648         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7649         return IPR_RC_JOB_RETURN;
7650 }
7651
7652 /**
7653  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7654  * @ipr_cmd:    ipr command struct
7655  *
7656  * This function handles the failure of a Mode Sense to the IOAFP.
7657  * Some adapters do not handle all mode pages.
7658  *
7659  * Return value:
7660  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7661  **/
7662 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7663 {
7664         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7665         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7666
7667         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7668                 ipr_cmd->job_step = ipr_set_supported_devs;
7669                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7670                                             struct ipr_resource_entry, queue);
7671                 return IPR_RC_JOB_CONTINUE;
7672         }
7673
7674         return ipr_reset_cmd_failed(ipr_cmd);
7675 }
7676
7677 /**
7678  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7679  * @ipr_cmd:    ipr command struct
7680  *
7681  * This function send a Page 28 mode sense to the IOA to
7682  * retrieve SCSI bus attributes.
7683  *
7684  * Return value:
7685  *      IPR_RC_JOB_RETURN
7686  **/
7687 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7688 {
7689         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7690
7691         ENTER;
7692         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7693                              0x28, ioa_cfg->vpd_cbs_dma +
7694                              offsetof(struct ipr_misc_cbs, mode_pages),
7695                              sizeof(struct ipr_mode_pages));
7696
7697         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7698         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7699
7700         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7701
7702         LEAVE;
7703         return IPR_RC_JOB_RETURN;
7704 }
7705
7706 /**
7707  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7708  * @ipr_cmd:    ipr command struct
7709  *
7710  * This function enables dual IOA RAID support if possible.
7711  *
7712  * Return value:
7713  *      IPR_RC_JOB_RETURN
7714  **/
7715 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7716 {
7717         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7718         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7719         struct ipr_mode_page24 *mode_page;
7720         int length;
7721
7722         ENTER;
7723         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7724                                       sizeof(struct ipr_mode_page24));
7725
7726         if (mode_page)
7727                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7728
7729         length = mode_pages->hdr.length + 1;
7730         mode_pages->hdr.length = 0;
7731
7732         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7733                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7734                               length);
7735
7736         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7737         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7738
7739         LEAVE;
7740         return IPR_RC_JOB_RETURN;
7741 }
7742
7743 /**
7744  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7745  * @ipr_cmd:    ipr command struct
7746  *
7747  * This function handles the failure of a Mode Sense to the IOAFP.
7748  * Some adapters do not handle all mode pages.
7749  *
7750  * Return value:
7751  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7752  **/
7753 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7754 {
7755         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7756
7757         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7758                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7759                 return IPR_RC_JOB_CONTINUE;
7760         }
7761
7762         return ipr_reset_cmd_failed(ipr_cmd);
7763 }
7764
7765 /**
7766  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7767  * @ipr_cmd:    ipr command struct
7768  *
7769  * This function send a mode sense to the IOA to retrieve
7770  * the IOA Advanced Function Control mode page.
7771  *
7772  * Return value:
7773  *      IPR_RC_JOB_RETURN
7774  **/
7775 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7776 {
7777         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7778
7779         ENTER;
7780         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7781                              0x24, ioa_cfg->vpd_cbs_dma +
7782                              offsetof(struct ipr_misc_cbs, mode_pages),
7783                              sizeof(struct ipr_mode_pages));
7784
7785         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7786         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7787
7788         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7789
7790         LEAVE;
7791         return IPR_RC_JOB_RETURN;
7792 }
7793
7794 /**
7795  * ipr_init_res_table - Initialize the resource table
7796  * @ipr_cmd:    ipr command struct
7797  *
7798  * This function looks through the existing resource table, comparing
7799  * it with the config table. This function will take care of old/new
7800  * devices and schedule adding/removing them from the mid-layer
7801  * as appropriate.
7802  *
7803  * Return value:
7804  *      IPR_RC_JOB_CONTINUE
7805  **/
7806 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7807 {
7808         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7809         struct ipr_resource_entry *res, *temp;
7810         struct ipr_config_table_entry_wrapper cfgtew;
7811         int entries, found, flag, i;
7812         LIST_HEAD(old_res);
7813
7814         ENTER;
7815         if (ioa_cfg->sis64)
7816                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7817         else
7818                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7819
7820         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7821                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7822
7823         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7824                 list_move_tail(&res->queue, &old_res);
7825
7826         if (ioa_cfg->sis64)
7827                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7828         else
7829                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7830
7831         for (i = 0; i < entries; i++) {
7832                 if (ioa_cfg->sis64)
7833                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7834                 else
7835                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7836                 found = 0;
7837
7838                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7839                         if (ipr_is_same_device(res, &cfgtew)) {
7840                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7841                                 found = 1;
7842                                 break;
7843                         }
7844                 }
7845
7846                 if (!found) {
7847                         if (list_empty(&ioa_cfg->free_res_q)) {
7848                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7849                                 break;
7850                         }
7851
7852                         found = 1;
7853                         res = list_entry(ioa_cfg->free_res_q.next,
7854                                          struct ipr_resource_entry, queue);
7855                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7856                         ipr_init_res_entry(res, &cfgtew);
7857                         res->add_to_ml = 1;
7858                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7859                         res->sdev->allow_restart = 1;
7860
7861                 if (found)
7862                         ipr_update_res_entry(res, &cfgtew);
7863         }
7864
7865         list_for_each_entry_safe(res, temp, &old_res, queue) {
7866                 if (res->sdev) {
7867                         res->del_from_ml = 1;
7868                         res->res_handle = IPR_INVALID_RES_HANDLE;
7869                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7870                 }
7871         }
7872
7873         list_for_each_entry_safe(res, temp, &old_res, queue) {
7874                 ipr_clear_res_target(res);
7875                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7876         }
7877
7878         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7879                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7880         else
7881                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7882
7883         LEAVE;
7884         return IPR_RC_JOB_CONTINUE;
7885 }
7886
7887 /**
7888  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7889  * @ipr_cmd:    ipr command struct
7890  *
7891  * This function sends a Query IOA Configuration command
7892  * to the adapter to retrieve the IOA configuration table.
7893  *
7894  * Return value:
7895  *      IPR_RC_JOB_RETURN
7896  **/
7897 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7898 {
7899         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7900         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7901         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7902         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7903
7904         ENTER;
7905         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7906                 ioa_cfg->dual_raid = 1;
7907         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7908                  ucode_vpd->major_release, ucode_vpd->card_type,
7909                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7910         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7911         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7912
7913         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7914         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7915         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7916         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7917
7918         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7919                        IPR_IOADL_FLAGS_READ_LAST);
7920
7921         ipr_cmd->job_step = ipr_init_res_table;
7922
7923         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7924
7925         LEAVE;
7926         return IPR_RC_JOB_RETURN;
7927 }
7928
7929 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7930 {
7931         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7932
7933         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7934                 return IPR_RC_JOB_CONTINUE;
7935
7936         return ipr_reset_cmd_failed(ipr_cmd);
7937 }
7938
7939 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7940                                          __be32 res_handle, u8 sa_code)
7941 {
7942         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7943
7944         ioarcb->res_handle = res_handle;
7945         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7946         ioarcb->cmd_pkt.cdb[1] = sa_code;
7947         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7948 }
7949
7950 /**
7951  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7952  * action
7953  *
7954  * Return value:
7955  *      none
7956  **/
7957 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7958 {
7959         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7960         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7961         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7962
7963         ENTER;
7964
7965         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7966
7967         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7968                 ipr_build_ioa_service_action(ipr_cmd,
7969                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7970                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7971
7972                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7973
7974                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7975                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7976                            IPR_SET_SUP_DEVICE_TIMEOUT);
7977
7978                 LEAVE;
7979                 return IPR_RC_JOB_RETURN;
7980         }
7981
7982         LEAVE;
7983         return IPR_RC_JOB_CONTINUE;
7984 }
7985
7986 /**
7987  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7988  * @ipr_cmd:    ipr command struct
7989  *
7990  * This utility function sends an inquiry to the adapter.
7991  *
7992  * Return value:
7993  *      none
7994  **/
7995 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7996                               dma_addr_t dma_addr, u8 xfer_len)
7997 {
7998         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7999
8000         ENTER;
8001         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8002         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8003
8004         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8005         ioarcb->cmd_pkt.cdb[1] = flags;
8006         ioarcb->cmd_pkt.cdb[2] = page;
8007         ioarcb->cmd_pkt.cdb[4] = xfer_len;
8008
8009         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
8010
8011         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8012         LEAVE;
8013 }
8014
8015 /**
8016  * ipr_inquiry_page_supported - Is the given inquiry page supported
8017  * @page0:              inquiry page 0 buffer
8018  * @page:               page code.
8019  *
8020  * This function determines if the specified inquiry page is supported.
8021  *
8022  * Return value:
8023  *      1 if page is supported / 0 if not
8024  **/
8025 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8026 {
8027         int i;
8028
8029         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8030                 if (page0->page[i] == page)
8031                         return 1;
8032
8033         return 0;
8034 }
8035
8036 /**
8037  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8038  * @ipr_cmd:    ipr command struct
8039  *
8040  * This function sends a Page 0xC4 inquiry to the adapter
8041  * to retrieve software VPD information.
8042  *
8043  * Return value:
8044  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8045  **/
8046 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8047 {
8048         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8049         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8050         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8051
8052         ENTER;
8053         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8054         memset(pageC4, 0, sizeof(*pageC4));
8055
8056         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8057                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8058                                   (ioa_cfg->vpd_cbs_dma
8059                                    + offsetof(struct ipr_misc_cbs,
8060                                               pageC4_data)),
8061                                   sizeof(struct ipr_inquiry_pageC4));
8062                 return IPR_RC_JOB_RETURN;
8063         }
8064
8065         LEAVE;
8066         return IPR_RC_JOB_CONTINUE;
8067 }
8068
8069 /**
8070  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8071  * @ipr_cmd:    ipr command struct
8072  *
8073  * This function sends a Page 0xD0 inquiry to the adapter
8074  * to retrieve adapter capabilities.
8075  *
8076  * Return value:
8077  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8078  **/
8079 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8080 {
8081         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8082         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8083         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8084
8085         ENTER;
8086         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8087         memset(cap, 0, sizeof(*cap));
8088
8089         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8090                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8091                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8092                                   sizeof(struct ipr_inquiry_cap));
8093                 return IPR_RC_JOB_RETURN;
8094         }
8095
8096         LEAVE;
8097         return IPR_RC_JOB_CONTINUE;
8098 }
8099
8100 /**
8101  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8102  * @ipr_cmd:    ipr command struct
8103  *
8104  * This function sends a Page 3 inquiry to the adapter
8105  * to retrieve software VPD information.
8106  *
8107  * Return value:
8108  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8109  **/
8110 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8111 {
8112         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8113
8114         ENTER;
8115
8116         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8117
8118         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8119                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8120                           sizeof(struct ipr_inquiry_page3));
8121
8122         LEAVE;
8123         return IPR_RC_JOB_RETURN;
8124 }
8125
8126 /**
8127  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8128  * @ipr_cmd:    ipr command struct
8129  *
8130  * This function sends a Page 0 inquiry to the adapter
8131  * to retrieve supported inquiry pages.
8132  *
8133  * Return value:
8134  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8135  **/
8136 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8137 {
8138         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8139         char type[5];
8140
8141         ENTER;
8142
8143         /* Grab the type out of the VPD and store it away */
8144         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8145         type[4] = '\0';
8146         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8147
8148         if (ipr_invalid_adapter(ioa_cfg)) {
8149                 dev_err(&ioa_cfg->pdev->dev,
8150                         "Adapter not supported in this hardware configuration.\n");
8151
8152                 if (!ipr_testmode) {
8153                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8154                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8155                         list_add_tail(&ipr_cmd->queue,
8156                                         &ioa_cfg->hrrq->hrrq_free_q);
8157                         return IPR_RC_JOB_RETURN;
8158                 }
8159         }
8160
8161         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8162
8163         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8164                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8165                           sizeof(struct ipr_inquiry_page0));
8166
8167         LEAVE;
8168         return IPR_RC_JOB_RETURN;
8169 }
8170
8171 /**
8172  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8173  * @ipr_cmd:    ipr command struct
8174  *
8175  * This function sends a standard inquiry to the adapter.
8176  *
8177  * Return value:
8178  *      IPR_RC_JOB_RETURN
8179  **/
8180 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8181 {
8182         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8183
8184         ENTER;
8185         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8186
8187         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8188                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8189                           sizeof(struct ipr_ioa_vpd));
8190
8191         LEAVE;
8192         return IPR_RC_JOB_RETURN;
8193 }
8194
8195 /**
8196  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8197  * @ipr_cmd:    ipr command struct
8198  *
8199  * This function send an Identify Host Request Response Queue
8200  * command to establish the HRRQ with the adapter.
8201  *
8202  * Return value:
8203  *      IPR_RC_JOB_RETURN
8204  **/
8205 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8206 {
8207         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8208         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8209         struct ipr_hrr_queue *hrrq;
8210
8211         ENTER;
8212         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8213         if (ioa_cfg->identify_hrrq_index == 0)
8214                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8215
8216         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8217                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8218
8219                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8220                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8221
8222                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8223                 if (ioa_cfg->sis64)
8224                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8225
8226                 if (ioa_cfg->nvectors == 1)
8227                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8228                 else
8229                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8230
8231                 ioarcb->cmd_pkt.cdb[2] =
8232                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8233                 ioarcb->cmd_pkt.cdb[3] =
8234                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8235                 ioarcb->cmd_pkt.cdb[4] =
8236                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8237                 ioarcb->cmd_pkt.cdb[5] =
8238                         ((u64) hrrq->host_rrq_dma) & 0xff;
8239                 ioarcb->cmd_pkt.cdb[7] =
8240                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8241                 ioarcb->cmd_pkt.cdb[8] =
8242                         (sizeof(u32) * hrrq->size) & 0xff;
8243
8244                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8245                         ioarcb->cmd_pkt.cdb[9] =
8246                                         ioa_cfg->identify_hrrq_index;
8247
8248                 if (ioa_cfg->sis64) {
8249                         ioarcb->cmd_pkt.cdb[10] =
8250                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8251                         ioarcb->cmd_pkt.cdb[11] =
8252                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8253                         ioarcb->cmd_pkt.cdb[12] =
8254                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8255                         ioarcb->cmd_pkt.cdb[13] =
8256                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8257                 }
8258
8259                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8260                         ioarcb->cmd_pkt.cdb[14] =
8261                                         ioa_cfg->identify_hrrq_index;
8262
8263                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8264                            IPR_INTERNAL_TIMEOUT);
8265
8266                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8267                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8268
8269                 LEAVE;
8270                 return IPR_RC_JOB_RETURN;
8271         }
8272
8273         LEAVE;
8274         return IPR_RC_JOB_CONTINUE;
8275 }
8276
8277 /**
8278  * ipr_reset_timer_done - Adapter reset timer function
8279  * @ipr_cmd:    ipr command struct
8280  *
8281  * Description: This function is used in adapter reset processing
8282  * for timing events. If the reset_cmd pointer in the IOA
8283  * config struct is not this adapter's we are doing nested
8284  * resets and fail_all_ops will take care of freeing the
8285  * command block.
8286  *
8287  * Return value:
8288  *      none
8289  **/
8290 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8291 {
8292         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8293         unsigned long lock_flags = 0;
8294
8295         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8296
8297         if (ioa_cfg->reset_cmd == ipr_cmd) {
8298                 list_del(&ipr_cmd->queue);
8299                 ipr_cmd->done(ipr_cmd);
8300         }
8301
8302         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8303 }
8304
8305 /**
8306  * ipr_reset_start_timer - Start a timer for adapter reset job
8307  * @ipr_cmd:    ipr command struct
8308  * @timeout:    timeout value
8309  *
8310  * Description: This function is used in adapter reset processing
8311  * for timing events. If the reset_cmd pointer in the IOA
8312  * config struct is not this adapter's we are doing nested
8313  * resets and fail_all_ops will take care of freeing the
8314  * command block.
8315  *
8316  * Return value:
8317  *      none
8318  **/
8319 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8320                                   unsigned long timeout)
8321 {
8322
8323         ENTER;
8324         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8325         ipr_cmd->done = ipr_reset_ioa_job;
8326
8327         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8328         ipr_cmd->timer.expires = jiffies + timeout;
8329         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8330         add_timer(&ipr_cmd->timer);
8331 }
8332
8333 /**
8334  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8335  * @ioa_cfg:    ioa cfg struct
8336  *
8337  * Return value:
8338  *      nothing
8339  **/
8340 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8341 {
8342         struct ipr_hrr_queue *hrrq;
8343
8344         for_each_hrrq(hrrq, ioa_cfg) {
8345                 spin_lock(&hrrq->_lock);
8346                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8347
8348                 /* Initialize Host RRQ pointers */
8349                 hrrq->hrrq_start = hrrq->host_rrq;
8350                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8351                 hrrq->hrrq_curr = hrrq->hrrq_start;
8352                 hrrq->toggle_bit = 1;
8353                 spin_unlock(&hrrq->_lock);
8354         }
8355         wmb();
8356
8357         ioa_cfg->identify_hrrq_index = 0;
8358         if (ioa_cfg->hrrq_num == 1)
8359                 atomic_set(&ioa_cfg->hrrq_index, 0);
8360         else
8361                 atomic_set(&ioa_cfg->hrrq_index, 1);
8362
8363         /* Zero out config table */
8364         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8365 }
8366
8367 /**
8368  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8369  * @ipr_cmd:    ipr command struct
8370  *
8371  * Return value:
8372  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8373  **/
8374 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8375 {
8376         unsigned long stage, stage_time;
8377         u32 feedback;
8378         volatile u32 int_reg;
8379         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8380         u64 maskval = 0;
8381
8382         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8383         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8384         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8385
8386         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8387
8388         /* sanity check the stage_time value */
8389         if (stage_time == 0)
8390                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8391         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8392                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8393         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8394                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8395
8396         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8397                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8398                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8399                 stage_time = ioa_cfg->transop_timeout;
8400                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8401         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8402                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8403                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8404                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8405                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8406                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8407                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8408                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8409                         return IPR_RC_JOB_CONTINUE;
8410                 }
8411         }
8412
8413         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8414         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8415         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8416         ipr_cmd->done = ipr_reset_ioa_job;
8417         add_timer(&ipr_cmd->timer);
8418
8419         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8420
8421         return IPR_RC_JOB_RETURN;
8422 }
8423
8424 /**
8425  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8426  * @ipr_cmd:    ipr command struct
8427  *
8428  * This function reinitializes some control blocks and
8429  * enables destructive diagnostics on the adapter.
8430  *
8431  * Return value:
8432  *      IPR_RC_JOB_RETURN
8433  **/
8434 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8435 {
8436         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8437         volatile u32 int_reg;
8438         volatile u64 maskval;
8439         int i;
8440
8441         ENTER;
8442         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8443         ipr_init_ioa_mem(ioa_cfg);
8444
8445         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8446                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8447                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8448                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8449         }
8450         wmb();
8451         if (ioa_cfg->sis64) {
8452                 /* Set the adapter to the correct endian mode. */
8453                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8454                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8455         }
8456
8457         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8458
8459         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8460                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8461                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8462                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8463                 return IPR_RC_JOB_CONTINUE;
8464         }
8465
8466         /* Enable destructive diagnostics on IOA */
8467         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8468
8469         if (ioa_cfg->sis64) {
8470                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8471                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8472                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8473         } else
8474                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8475
8476         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8477
8478         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8479
8480         if (ioa_cfg->sis64) {
8481                 ipr_cmd->job_step = ipr_reset_next_stage;
8482                 return IPR_RC_JOB_CONTINUE;
8483         }
8484
8485         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8486         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8487         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8488         ipr_cmd->done = ipr_reset_ioa_job;
8489         add_timer(&ipr_cmd->timer);
8490         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8491
8492         LEAVE;
8493         return IPR_RC_JOB_RETURN;
8494 }
8495
8496 /**
8497  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8498  * @ipr_cmd:    ipr command struct
8499  *
8500  * This function is invoked when an adapter dump has run out
8501  * of processing time.
8502  *
8503  * Return value:
8504  *      IPR_RC_JOB_CONTINUE
8505  **/
8506 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8507 {
8508         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8509
8510         if (ioa_cfg->sdt_state == GET_DUMP)
8511                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8512         else if (ioa_cfg->sdt_state == READ_DUMP)
8513                 ioa_cfg->sdt_state = ABORT_DUMP;
8514
8515         ioa_cfg->dump_timeout = 1;
8516         ipr_cmd->job_step = ipr_reset_alert;
8517
8518         return IPR_RC_JOB_CONTINUE;
8519 }
8520
8521 /**
8522  * ipr_unit_check_no_data - Log a unit check/no data error log
8523  * @ioa_cfg:            ioa config struct
8524  *
8525  * Logs an error indicating the adapter unit checked, but for some
8526  * reason, we were unable to fetch the unit check buffer.
8527  *
8528  * Return value:
8529  *      nothing
8530  **/
8531 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8532 {
8533         ioa_cfg->errors_logged++;
8534         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8535 }
8536
8537 /**
8538  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8539  * @ioa_cfg:            ioa config struct
8540  *
8541  * Fetches the unit check buffer from the adapter by clocking the data
8542  * through the mailbox register.
8543  *
8544  * Return value:
8545  *      nothing
8546  **/
8547 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8548 {
8549         unsigned long mailbox;
8550         struct ipr_hostrcb *hostrcb;
8551         struct ipr_uc_sdt sdt;
8552         int rc, length;
8553         u32 ioasc;
8554
8555         mailbox = readl(ioa_cfg->ioa_mailbox);
8556
8557         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8558                 ipr_unit_check_no_data(ioa_cfg);
8559                 return;
8560         }
8561
8562         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8563         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8564                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8565
8566         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8567             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8568             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8569                 ipr_unit_check_no_data(ioa_cfg);
8570                 return;
8571         }
8572
8573         /* Find length of the first sdt entry (UC buffer) */
8574         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8575                 length = be32_to_cpu(sdt.entry[0].end_token);
8576         else
8577                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8578                           be32_to_cpu(sdt.entry[0].start_token)) &
8579                           IPR_FMT2_MBX_ADDR_MASK;
8580
8581         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8582                              struct ipr_hostrcb, queue);
8583         list_del_init(&hostrcb->queue);
8584         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8585
8586         rc = ipr_get_ldump_data_section(ioa_cfg,
8587                                         be32_to_cpu(sdt.entry[0].start_token),
8588                                         (__be32 *)&hostrcb->hcam,
8589                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8590
8591         if (!rc) {
8592                 ipr_handle_log_data(ioa_cfg, hostrcb);
8593                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8594                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8595                     ioa_cfg->sdt_state == GET_DUMP)
8596                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8597         } else
8598                 ipr_unit_check_no_data(ioa_cfg);
8599
8600         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8601 }
8602
8603 /**
8604  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8605  * @ipr_cmd:    ipr command struct
8606  *
8607  * Description: This function will call to get the unit check buffer.
8608  *
8609  * Return value:
8610  *      IPR_RC_JOB_RETURN
8611  **/
8612 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8613 {
8614         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8615
8616         ENTER;
8617         ioa_cfg->ioa_unit_checked = 0;
8618         ipr_get_unit_check_buffer(ioa_cfg);
8619         ipr_cmd->job_step = ipr_reset_alert;
8620         ipr_reset_start_timer(ipr_cmd, 0);
8621
8622         LEAVE;
8623         return IPR_RC_JOB_RETURN;
8624 }
8625
8626 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8627 {
8628         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8629
8630         ENTER;
8631
8632         if (ioa_cfg->sdt_state != GET_DUMP)
8633                 return IPR_RC_JOB_RETURN;
8634
8635         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8636             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8637              IPR_PCII_MAILBOX_STABLE)) {
8638
8639                 if (!ipr_cmd->u.time_left)
8640                         dev_err(&ioa_cfg->pdev->dev,
8641                                 "Timed out waiting for Mailbox register.\n");
8642
8643                 ioa_cfg->sdt_state = READ_DUMP;
8644                 ioa_cfg->dump_timeout = 0;
8645                 if (ioa_cfg->sis64)
8646                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8647                 else
8648                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8649                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8650                 schedule_work(&ioa_cfg->work_q);
8651
8652         } else {
8653                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8654                 ipr_reset_start_timer(ipr_cmd,
8655                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8656         }
8657
8658         LEAVE;
8659         return IPR_RC_JOB_RETURN;
8660 }
8661
8662 /**
8663  * ipr_reset_restore_cfg_space - Restore PCI config space.
8664  * @ipr_cmd:    ipr command struct
8665  *
8666  * Description: This function restores the saved PCI config space of
8667  * the adapter, fails all outstanding ops back to the callers, and
8668  * fetches the dump/unit check if applicable to this reset.
8669  *
8670  * Return value:
8671  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8672  **/
8673 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8674 {
8675         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8676         u32 int_reg;
8677
8678         ENTER;
8679         ioa_cfg->pdev->state_saved = true;
8680         pci_restore_state(ioa_cfg->pdev);
8681
8682         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8683                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8684                 return IPR_RC_JOB_CONTINUE;
8685         }
8686
8687         ipr_fail_all_ops(ioa_cfg);
8688
8689         if (ioa_cfg->sis64) {
8690                 /* Set the adapter to the correct endian mode. */
8691                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8692                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8693         }
8694
8695         if (ioa_cfg->ioa_unit_checked) {
8696                 if (ioa_cfg->sis64) {
8697                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8698                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8699                         return IPR_RC_JOB_RETURN;
8700                 } else {
8701                         ioa_cfg->ioa_unit_checked = 0;
8702                         ipr_get_unit_check_buffer(ioa_cfg);
8703                         ipr_cmd->job_step = ipr_reset_alert;
8704                         ipr_reset_start_timer(ipr_cmd, 0);
8705                         return IPR_RC_JOB_RETURN;
8706                 }
8707         }
8708
8709         if (ioa_cfg->in_ioa_bringdown) {
8710                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8711         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8712                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8713                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8714         } else {
8715                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8716         }
8717
8718         LEAVE;
8719         return IPR_RC_JOB_CONTINUE;
8720 }
8721
8722 /**
8723  * ipr_reset_bist_done - BIST has completed on the adapter.
8724  * @ipr_cmd:    ipr command struct
8725  *
8726  * Description: Unblock config space and resume the reset process.
8727  *
8728  * Return value:
8729  *      IPR_RC_JOB_CONTINUE
8730  **/
8731 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8732 {
8733         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8734
8735         ENTER;
8736         if (ioa_cfg->cfg_locked)
8737                 pci_cfg_access_unlock(ioa_cfg->pdev);
8738         ioa_cfg->cfg_locked = 0;
8739         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8740         LEAVE;
8741         return IPR_RC_JOB_CONTINUE;
8742 }
8743
8744 /**
8745  * ipr_reset_start_bist - Run BIST on the adapter.
8746  * @ipr_cmd:    ipr command struct
8747  *
8748  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8749  *
8750  * Return value:
8751  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8752  **/
8753 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8754 {
8755         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8756         int rc = PCIBIOS_SUCCESSFUL;
8757
8758         ENTER;
8759         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8760                 writel(IPR_UPROCI_SIS64_START_BIST,
8761                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8762         else
8763                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8764
8765         if (rc == PCIBIOS_SUCCESSFUL) {
8766                 ipr_cmd->job_step = ipr_reset_bist_done;
8767                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8768                 rc = IPR_RC_JOB_RETURN;
8769         } else {
8770                 if (ioa_cfg->cfg_locked)
8771                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8772                 ioa_cfg->cfg_locked = 0;
8773                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8774                 rc = IPR_RC_JOB_CONTINUE;
8775         }
8776
8777         LEAVE;
8778         return rc;
8779 }
8780
8781 /**
8782  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8783  * @ipr_cmd:    ipr command struct
8784  *
8785  * Description: This clears PCI reset to the adapter and delays two seconds.
8786  *
8787  * Return value:
8788  *      IPR_RC_JOB_RETURN
8789  **/
8790 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8791 {
8792         ENTER;
8793         ipr_cmd->job_step = ipr_reset_bist_done;
8794         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8795         LEAVE;
8796         return IPR_RC_JOB_RETURN;
8797 }
8798
8799 /**
8800  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8801  * @work:       work struct
8802  *
8803  * Description: This pulses warm reset to a slot.
8804  *
8805  **/
8806 static void ipr_reset_reset_work(struct work_struct *work)
8807 {
8808         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8809         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8810         struct pci_dev *pdev = ioa_cfg->pdev;
8811         unsigned long lock_flags = 0;
8812
8813         ENTER;
8814         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8815         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8816         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8817
8818         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8819         if (ioa_cfg->reset_cmd == ipr_cmd)
8820                 ipr_reset_ioa_job(ipr_cmd);
8821         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8822         LEAVE;
8823 }
8824
8825 /**
8826  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8827  * @ipr_cmd:    ipr command struct
8828  *
8829  * Description: This asserts PCI reset to the adapter.
8830  *
8831  * Return value:
8832  *      IPR_RC_JOB_RETURN
8833  **/
8834 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8835 {
8836         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8837
8838         ENTER;
8839         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8840         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8841         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8842         LEAVE;
8843         return IPR_RC_JOB_RETURN;
8844 }
8845
8846 /**
8847  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8848  * @ipr_cmd:    ipr command struct
8849  *
8850  * Description: This attempts to block config access to the IOA.
8851  *
8852  * Return value:
8853  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8854  **/
8855 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8856 {
8857         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8858         int rc = IPR_RC_JOB_CONTINUE;
8859
8860         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8861                 ioa_cfg->cfg_locked = 1;
8862                 ipr_cmd->job_step = ioa_cfg->reset;
8863         } else {
8864                 if (ipr_cmd->u.time_left) {
8865                         rc = IPR_RC_JOB_RETURN;
8866                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8867                         ipr_reset_start_timer(ipr_cmd,
8868                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8869                 } else {
8870                         ipr_cmd->job_step = ioa_cfg->reset;
8871                         dev_err(&ioa_cfg->pdev->dev,
8872                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8873                 }
8874         }
8875
8876         return rc;
8877 }
8878
8879 /**
8880  * ipr_reset_block_config_access - Block config access to the IOA
8881  * @ipr_cmd:    ipr command struct
8882  *
8883  * Description: This attempts to block config access to the IOA
8884  *
8885  * Return value:
8886  *      IPR_RC_JOB_CONTINUE
8887  **/
8888 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8889 {
8890         ipr_cmd->ioa_cfg->cfg_locked = 0;
8891         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8892         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8893         return IPR_RC_JOB_CONTINUE;
8894 }
8895
8896 /**
8897  * ipr_reset_allowed - Query whether or not IOA can be reset
8898  * @ioa_cfg:    ioa config struct
8899  *
8900  * Return value:
8901  *      0 if reset not allowed / non-zero if reset is allowed
8902  **/
8903 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8904 {
8905         volatile u32 temp_reg;
8906
8907         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8908         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8909 }
8910
8911 /**
8912  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8913  * @ipr_cmd:    ipr command struct
8914  *
8915  * Description: This function waits for adapter permission to run BIST,
8916  * then runs BIST. If the adapter does not give permission after a
8917  * reasonable time, we will reset the adapter anyway. The impact of
8918  * resetting the adapter without warning the adapter is the risk of
8919  * losing the persistent error log on the adapter. If the adapter is
8920  * reset while it is writing to the flash on the adapter, the flash
8921  * segment will have bad ECC and be zeroed.
8922  *
8923  * Return value:
8924  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8925  **/
8926 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8927 {
8928         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8929         int rc = IPR_RC_JOB_RETURN;
8930
8931         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8932                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8933                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8934         } else {
8935                 ipr_cmd->job_step = ipr_reset_block_config_access;
8936                 rc = IPR_RC_JOB_CONTINUE;
8937         }
8938
8939         return rc;
8940 }
8941
8942 /**
8943  * ipr_reset_alert - Alert the adapter of a pending reset
8944  * @ipr_cmd:    ipr command struct
8945  *
8946  * Description: This function alerts the adapter that it will be reset.
8947  * If memory space is not currently enabled, proceed directly
8948  * to running BIST on the adapter. The timer must always be started
8949  * so we guarantee we do not run BIST from ipr_isr.
8950  *
8951  * Return value:
8952  *      IPR_RC_JOB_RETURN
8953  **/
8954 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8955 {
8956         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8957         u16 cmd_reg;
8958         int rc;
8959
8960         ENTER;
8961         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8962
8963         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8964                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8965                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8966                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8967         } else {
8968                 ipr_cmd->job_step = ipr_reset_block_config_access;
8969         }
8970
8971         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8972         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8973
8974         LEAVE;
8975         return IPR_RC_JOB_RETURN;
8976 }
8977
8978 /**
8979  * ipr_reset_quiesce_done - Complete IOA disconnect
8980  * @ipr_cmd:    ipr command struct
8981  *
8982  * Description: Freeze the adapter to complete quiesce processing
8983  *
8984  * Return value:
8985  *      IPR_RC_JOB_CONTINUE
8986  **/
8987 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8988 {
8989         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8990
8991         ENTER;
8992         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8993         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8994         LEAVE;
8995         return IPR_RC_JOB_CONTINUE;
8996 }
8997
8998 /**
8999  * ipr_reset_cancel_hcam_done - Check for outstanding commands
9000  * @ipr_cmd:    ipr command struct
9001  *
9002  * Description: Ensure nothing is outstanding to the IOA and
9003  *                      proceed with IOA disconnect. Otherwise reset the IOA.
9004  *
9005  * Return value:
9006  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9007  **/
9008 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9009 {
9010         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9011         struct ipr_cmnd *loop_cmd;
9012         struct ipr_hrr_queue *hrrq;
9013         int rc = IPR_RC_JOB_CONTINUE;
9014         int count = 0;
9015
9016         ENTER;
9017         ipr_cmd->job_step = ipr_reset_quiesce_done;
9018
9019         for_each_hrrq(hrrq, ioa_cfg) {
9020                 spin_lock(&hrrq->_lock);
9021                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9022                         count++;
9023                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9024                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9025                         rc = IPR_RC_JOB_RETURN;
9026                         break;
9027                 }
9028                 spin_unlock(&hrrq->_lock);
9029
9030                 if (count)
9031                         break;
9032         }
9033
9034         LEAVE;
9035         return rc;
9036 }
9037
9038 /**
9039  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9040  * @ipr_cmd:    ipr command struct
9041  *
9042  * Description: Cancel any oustanding HCAMs to the IOA.
9043  *
9044  * Return value:
9045  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9046  **/
9047 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9048 {
9049         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9050         int rc = IPR_RC_JOB_CONTINUE;
9051         struct ipr_cmd_pkt *cmd_pkt;
9052         struct ipr_cmnd *hcam_cmd;
9053         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9054
9055         ENTER;
9056         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9057
9058         if (!hrrq->ioa_is_dead) {
9059                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9060                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9061                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9062                                         continue;
9063
9064                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9065                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9066                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9067                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9068                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9069                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9070                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9071                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9072                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9073                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9074                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9075                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9076                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9077                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9078
9079                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9080                                            IPR_CANCEL_TIMEOUT);
9081
9082                                 rc = IPR_RC_JOB_RETURN;
9083                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9084                                 break;
9085                         }
9086                 }
9087         } else
9088                 ipr_cmd->job_step = ipr_reset_alert;
9089
9090         LEAVE;
9091         return rc;
9092 }
9093
9094 /**
9095  * ipr_reset_ucode_download_done - Microcode download completion
9096  * @ipr_cmd:    ipr command struct
9097  *
9098  * Description: This function unmaps the microcode download buffer.
9099  *
9100  * Return value:
9101  *      IPR_RC_JOB_CONTINUE
9102  **/
9103 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9104 {
9105         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9106         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9107
9108         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9109                      sglist->num_sg, DMA_TO_DEVICE);
9110
9111         ipr_cmd->job_step = ipr_reset_alert;
9112         return IPR_RC_JOB_CONTINUE;
9113 }
9114
9115 /**
9116  * ipr_reset_ucode_download - Download microcode to the adapter
9117  * @ipr_cmd:    ipr command struct
9118  *
9119  * Description: This function checks to see if it there is microcode
9120  * to download to the adapter. If there is, a download is performed.
9121  *
9122  * Return value:
9123  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9124  **/
9125 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9126 {
9127         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9128         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9129
9130         ENTER;
9131         ipr_cmd->job_step = ipr_reset_alert;
9132
9133         if (!sglist)
9134                 return IPR_RC_JOB_CONTINUE;
9135
9136         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9137         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9138         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9139         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9140         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9141         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9142         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9143
9144         if (ioa_cfg->sis64)
9145                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9146         else
9147                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9148         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9149
9150         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9151                    IPR_WRITE_BUFFER_TIMEOUT);
9152
9153         LEAVE;
9154         return IPR_RC_JOB_RETURN;
9155 }
9156
9157 /**
9158  * ipr_reset_shutdown_ioa - Shutdown the adapter
9159  * @ipr_cmd:    ipr command struct
9160  *
9161  * Description: This function issues an adapter shutdown of the
9162  * specified type to the specified adapter as part of the
9163  * adapter reset job.
9164  *
9165  * Return value:
9166  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9167  **/
9168 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9169 {
9170         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9171         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9172         unsigned long timeout;
9173         int rc = IPR_RC_JOB_CONTINUE;
9174
9175         ENTER;
9176         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9177                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9178         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9179                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9180                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9181                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9182                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9183                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9184
9185                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9186                         timeout = IPR_SHUTDOWN_TIMEOUT;
9187                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9188                         timeout = IPR_INTERNAL_TIMEOUT;
9189                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9190                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9191                 else
9192                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9193
9194                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9195
9196                 rc = IPR_RC_JOB_RETURN;
9197                 ipr_cmd->job_step = ipr_reset_ucode_download;
9198         } else
9199                 ipr_cmd->job_step = ipr_reset_alert;
9200
9201         LEAVE;
9202         return rc;
9203 }
9204
9205 /**
9206  * ipr_reset_ioa_job - Adapter reset job
9207  * @ipr_cmd:    ipr command struct
9208  *
9209  * Description: This function is the job router for the adapter reset job.
9210  *
9211  * Return value:
9212  *      none
9213  **/
9214 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9215 {
9216         u32 rc, ioasc;
9217         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9218
9219         do {
9220                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9221
9222                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9223                         /*
9224                          * We are doing nested adapter resets and this is
9225                          * not the current reset job.
9226                          */
9227                         list_add_tail(&ipr_cmd->queue,
9228                                         &ipr_cmd->hrrq->hrrq_free_q);
9229                         return;
9230                 }
9231
9232                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9233                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9234                         if (rc == IPR_RC_JOB_RETURN)
9235                                 return;
9236                 }
9237
9238                 ipr_reinit_ipr_cmnd(ipr_cmd);
9239                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9240                 rc = ipr_cmd->job_step(ipr_cmd);
9241         } while (rc == IPR_RC_JOB_CONTINUE);
9242 }
9243
9244 /**
9245  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9246  * @ioa_cfg:            ioa config struct
9247  * @job_step:           first job step of reset job
9248  * @shutdown_type:      shutdown type
9249  *
9250  * Description: This function will initiate the reset of the given adapter
9251  * starting at the selected job step.
9252  * If the caller needs to wait on the completion of the reset,
9253  * the caller must sleep on the reset_wait_q.
9254  *
9255  * Return value:
9256  *      none
9257  **/
9258 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9259                                     int (*job_step) (struct ipr_cmnd *),
9260                                     enum ipr_shutdown_type shutdown_type)
9261 {
9262         struct ipr_cmnd *ipr_cmd;
9263         int i;
9264
9265         ioa_cfg->in_reset_reload = 1;
9266         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9267                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9268                 ioa_cfg->hrrq[i].allow_cmds = 0;
9269                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9270         }
9271         wmb();
9272         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9273                 ioa_cfg->scsi_unblock = 0;
9274                 ioa_cfg->scsi_blocked = 1;
9275                 scsi_block_requests(ioa_cfg->host);
9276         }
9277
9278         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9279         ioa_cfg->reset_cmd = ipr_cmd;
9280         ipr_cmd->job_step = job_step;
9281         ipr_cmd->u.shutdown_type = shutdown_type;
9282
9283         ipr_reset_ioa_job(ipr_cmd);
9284 }
9285
9286 /**
9287  * ipr_initiate_ioa_reset - Initiate an adapter reset
9288  * @ioa_cfg:            ioa config struct
9289  * @shutdown_type:      shutdown type
9290  *
9291  * Description: This function will initiate the reset of the given adapter.
9292  * If the caller needs to wait on the completion of the reset,
9293  * the caller must sleep on the reset_wait_q.
9294  *
9295  * Return value:
9296  *      none
9297  **/
9298 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9299                                    enum ipr_shutdown_type shutdown_type)
9300 {
9301         int i;
9302
9303         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9304                 return;
9305
9306         if (ioa_cfg->in_reset_reload) {
9307                 if (ioa_cfg->sdt_state == GET_DUMP)
9308                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9309                 else if (ioa_cfg->sdt_state == READ_DUMP)
9310                         ioa_cfg->sdt_state = ABORT_DUMP;
9311         }
9312
9313         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9314                 dev_err(&ioa_cfg->pdev->dev,
9315                         "IOA taken offline - error recovery failed\n");
9316
9317                 ioa_cfg->reset_retries = 0;
9318                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9319                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9320                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9321                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9322                 }
9323                 wmb();
9324
9325                 if (ioa_cfg->in_ioa_bringdown) {
9326                         ioa_cfg->reset_cmd = NULL;
9327                         ioa_cfg->in_reset_reload = 0;
9328                         ipr_fail_all_ops(ioa_cfg);
9329                         wake_up_all(&ioa_cfg->reset_wait_q);
9330
9331                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9332                                 ioa_cfg->scsi_unblock = 1;
9333                                 schedule_work(&ioa_cfg->work_q);
9334                         }
9335                         return;
9336                 } else {
9337                         ioa_cfg->in_ioa_bringdown = 1;
9338                         shutdown_type = IPR_SHUTDOWN_NONE;
9339                 }
9340         }
9341
9342         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9343                                 shutdown_type);
9344 }
9345
9346 /**
9347  * ipr_reset_freeze - Hold off all I/O activity
9348  * @ipr_cmd:    ipr command struct
9349  *
9350  * Description: If the PCI slot is frozen, hold off all I/O
9351  * activity; then, as soon as the slot is available again,
9352  * initiate an adapter reset.
9353  */
9354 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9355 {
9356         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9357         int i;
9358
9359         /* Disallow new interrupts, avoid loop */
9360         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9361                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9362                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9363                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9364         }
9365         wmb();
9366         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9367         ipr_cmd->done = ipr_reset_ioa_job;
9368         return IPR_RC_JOB_RETURN;
9369 }
9370
9371 /**
9372  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9373  * @pdev:       PCI device struct
9374  *
9375  * Description: This routine is called to tell us that the MMIO
9376  * access to the IOA has been restored
9377  */
9378 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9379 {
9380         unsigned long flags = 0;
9381         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9382
9383         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9384         if (!ioa_cfg->probe_done)
9385                 pci_save_state(pdev);
9386         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9387         return PCI_ERS_RESULT_NEED_RESET;
9388 }
9389
9390 /**
9391  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9392  * @pdev:       PCI device struct
9393  *
9394  * Description: This routine is called to tell us that the PCI bus
9395  * is down. Can't do anything here, except put the device driver
9396  * into a holding pattern, waiting for the PCI bus to come back.
9397  */
9398 static void ipr_pci_frozen(struct pci_dev *pdev)
9399 {
9400         unsigned long flags = 0;
9401         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9402
9403         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9404         if (ioa_cfg->probe_done)
9405                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9406         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9407 }
9408
9409 /**
9410  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9411  * @pdev:       PCI device struct
9412  *
9413  * Description: This routine is called by the pci error recovery
9414  * code after the PCI slot has been reset, just before we
9415  * should resume normal operations.
9416  */
9417 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9418 {
9419         unsigned long flags = 0;
9420         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9421
9422         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9423         if (ioa_cfg->probe_done) {
9424                 if (ioa_cfg->needs_warm_reset)
9425                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9426                 else
9427                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9428                                                 IPR_SHUTDOWN_NONE);
9429         } else
9430                 wake_up_all(&ioa_cfg->eeh_wait_q);
9431         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9432         return PCI_ERS_RESULT_RECOVERED;
9433 }
9434
9435 /**
9436  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9437  * @pdev:       PCI device struct
9438  *
9439  * Description: This routine is called when the PCI bus has
9440  * permanently failed.
9441  */
9442 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9443 {
9444         unsigned long flags = 0;
9445         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9446         int i;
9447
9448         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9449         if (ioa_cfg->probe_done) {
9450                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9451                         ioa_cfg->sdt_state = ABORT_DUMP;
9452                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9453                 ioa_cfg->in_ioa_bringdown = 1;
9454                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9455                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9456                         ioa_cfg->hrrq[i].allow_cmds = 0;
9457                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9458                 }
9459                 wmb();
9460                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9461         } else
9462                 wake_up_all(&ioa_cfg->eeh_wait_q);
9463         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9464 }
9465
9466 /**
9467  * ipr_pci_error_detected - Called when a PCI error is detected.
9468  * @pdev:       PCI device struct
9469  * @state:      PCI channel state
9470  *
9471  * Description: Called when a PCI error is detected.
9472  *
9473  * Return value:
9474  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9475  */
9476 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9477                                                pci_channel_state_t state)
9478 {
9479         switch (state) {
9480         case pci_channel_io_frozen:
9481                 ipr_pci_frozen(pdev);
9482                 return PCI_ERS_RESULT_CAN_RECOVER;
9483         case pci_channel_io_perm_failure:
9484                 ipr_pci_perm_failure(pdev);
9485                 return PCI_ERS_RESULT_DISCONNECT;
9486                 break;
9487         default:
9488                 break;
9489         }
9490         return PCI_ERS_RESULT_NEED_RESET;
9491 }
9492
9493 /**
9494  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9495  * @ioa_cfg:    ioa cfg struct
9496  *
9497  * Description: This is the second phase of adapter initialization
9498  * This function takes care of initilizing the adapter to the point
9499  * where it can accept new commands.
9500
9501  * Return value:
9502  *      0 on success / -EIO on failure
9503  **/
9504 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9505 {
9506         int rc = 0;
9507         unsigned long host_lock_flags = 0;
9508
9509         ENTER;
9510         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9511         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9512         ioa_cfg->probe_done = 1;
9513         if (ioa_cfg->needs_hard_reset) {
9514                 ioa_cfg->needs_hard_reset = 0;
9515                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9516         } else
9517                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9518                                         IPR_SHUTDOWN_NONE);
9519         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9520
9521         LEAVE;
9522         return rc;
9523 }
9524
9525 /**
9526  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9527  * @ioa_cfg:    ioa config struct
9528  *
9529  * Return value:
9530  *      none
9531  **/
9532 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9533 {
9534         int i;
9535
9536         if (ioa_cfg->ipr_cmnd_list) {
9537                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9538                         if (ioa_cfg->ipr_cmnd_list[i])
9539                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9540                                               ioa_cfg->ipr_cmnd_list[i],
9541                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9542
9543                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9544                 }
9545         }
9546
9547         if (ioa_cfg->ipr_cmd_pool)
9548                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9549
9550         kfree(ioa_cfg->ipr_cmnd_list);
9551         kfree(ioa_cfg->ipr_cmnd_list_dma);
9552         ioa_cfg->ipr_cmnd_list = NULL;
9553         ioa_cfg->ipr_cmnd_list_dma = NULL;
9554         ioa_cfg->ipr_cmd_pool = NULL;
9555 }
9556
9557 /**
9558  * ipr_free_mem - Frees memory allocated for an adapter
9559  * @ioa_cfg:    ioa cfg struct
9560  *
9561  * Return value:
9562  *      nothing
9563  **/
9564 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9565 {
9566         int i;
9567
9568         kfree(ioa_cfg->res_entries);
9569         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9570                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9571         ipr_free_cmd_blks(ioa_cfg);
9572
9573         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9574                 dma_free_coherent(&ioa_cfg->pdev->dev,
9575                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9576                                   ioa_cfg->hrrq[i].host_rrq,
9577                                   ioa_cfg->hrrq[i].host_rrq_dma);
9578
9579         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9580                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9581
9582         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9583                 dma_free_coherent(&ioa_cfg->pdev->dev,
9584                                   sizeof(struct ipr_hostrcb),
9585                                   ioa_cfg->hostrcb[i],
9586                                   ioa_cfg->hostrcb_dma[i]);
9587         }
9588
9589         ipr_free_dump(ioa_cfg);
9590         kfree(ioa_cfg->trace);
9591 }
9592
9593 /**
9594  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9595  * @ioa_cfg:    ipr cfg struct
9596  *
9597  * This function frees all allocated IRQs for the
9598  * specified adapter.
9599  *
9600  * Return value:
9601  *      none
9602  **/
9603 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9604 {
9605         struct pci_dev *pdev = ioa_cfg->pdev;
9606         int i;
9607
9608         for (i = 0; i < ioa_cfg->nvectors; i++)
9609                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9610         pci_free_irq_vectors(pdev);
9611 }
9612
9613 /**
9614  * ipr_free_all_resources - Free all allocated resources for an adapter.
9615  * @ipr_cmd:    ipr command struct
9616  *
9617  * This function frees all allocated resources for the
9618  * specified adapter.
9619  *
9620  * Return value:
9621  *      none
9622  **/
9623 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9624 {
9625         struct pci_dev *pdev = ioa_cfg->pdev;
9626
9627         ENTER;
9628         ipr_free_irqs(ioa_cfg);
9629         if (ioa_cfg->reset_work_q)
9630                 destroy_workqueue(ioa_cfg->reset_work_q);
9631         iounmap(ioa_cfg->hdw_dma_regs);
9632         pci_release_regions(pdev);
9633         ipr_free_mem(ioa_cfg);
9634         scsi_host_put(ioa_cfg->host);
9635         pci_disable_device(pdev);
9636         LEAVE;
9637 }
9638
9639 /**
9640  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9641  * @ioa_cfg:    ioa config struct
9642  *
9643  * Return value:
9644  *      0 on success / -ENOMEM on allocation failure
9645  **/
9646 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9647 {
9648         struct ipr_cmnd *ipr_cmd;
9649         struct ipr_ioarcb *ioarcb;
9650         dma_addr_t dma_addr;
9651         int i, entries_each_hrrq, hrrq_id = 0;
9652
9653         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9654                                                 sizeof(struct ipr_cmnd), 512, 0);
9655
9656         if (!ioa_cfg->ipr_cmd_pool)
9657                 return -ENOMEM;
9658
9659         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9660         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9661
9662         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9663                 ipr_free_cmd_blks(ioa_cfg);
9664                 return -ENOMEM;
9665         }
9666
9667         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9668                 if (ioa_cfg->hrrq_num > 1) {
9669                         if (i == 0) {
9670                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9671                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9672                                         ioa_cfg->hrrq[i].max_cmd_id =
9673                                                 (entries_each_hrrq - 1);
9674                         } else {
9675                                 entries_each_hrrq =
9676                                         IPR_NUM_BASE_CMD_BLKS/
9677                                         (ioa_cfg->hrrq_num - 1);
9678                                 ioa_cfg->hrrq[i].min_cmd_id =
9679                                         IPR_NUM_INTERNAL_CMD_BLKS +
9680                                         (i - 1) * entries_each_hrrq;
9681                                 ioa_cfg->hrrq[i].max_cmd_id =
9682                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9683                                         i * entries_each_hrrq - 1);
9684                         }
9685                 } else {
9686                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9687                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9688                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9689                 }
9690                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9691         }
9692
9693         BUG_ON(ioa_cfg->hrrq_num == 0);
9694
9695         i = IPR_NUM_CMD_BLKS -
9696                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9697         if (i > 0) {
9698                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9699                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9700         }
9701
9702         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9703                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9704
9705                 if (!ipr_cmd) {
9706                         ipr_free_cmd_blks(ioa_cfg);
9707                         return -ENOMEM;
9708                 }
9709
9710                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9711                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9712                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9713
9714                 ioarcb = &ipr_cmd->ioarcb;
9715                 ipr_cmd->dma_addr = dma_addr;
9716                 if (ioa_cfg->sis64)
9717                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9718                 else
9719                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9720
9721                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9722                 if (ioa_cfg->sis64) {
9723                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9724                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9725                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9726                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9727                 } else {
9728                         ioarcb->write_ioadl_addr =
9729                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9730                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9731                         ioarcb->ioasa_host_pci_addr =
9732                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9733                 }
9734                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9735                 ipr_cmd->cmd_index = i;
9736                 ipr_cmd->ioa_cfg = ioa_cfg;
9737                 ipr_cmd->sense_buffer_dma = dma_addr +
9738                         offsetof(struct ipr_cmnd, sense_buffer);
9739
9740                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9741                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9742                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9743                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9744                         hrrq_id++;
9745         }
9746
9747         return 0;
9748 }
9749
9750 /**
9751  * ipr_alloc_mem - Allocate memory for an adapter
9752  * @ioa_cfg:    ioa config struct
9753  *
9754  * Return value:
9755  *      0 on success / non-zero for error
9756  **/
9757 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9758 {
9759         struct pci_dev *pdev = ioa_cfg->pdev;
9760         int i, rc = -ENOMEM;
9761
9762         ENTER;
9763         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9764                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9765
9766         if (!ioa_cfg->res_entries)
9767                 goto out;
9768
9769         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9770                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9771                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9772         }
9773
9774         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9775                                               sizeof(struct ipr_misc_cbs),
9776                                               &ioa_cfg->vpd_cbs_dma,
9777                                               GFP_KERNEL);
9778
9779         if (!ioa_cfg->vpd_cbs)
9780                 goto out_free_res_entries;
9781
9782         if (ipr_alloc_cmd_blks(ioa_cfg))
9783                 goto out_free_vpd_cbs;
9784
9785         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9786                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9787                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9788                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9789                                         GFP_KERNEL);
9790
9791                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9792                         while (--i >= 0)
9793                                 dma_free_coherent(&pdev->dev,
9794                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9795                                         ioa_cfg->hrrq[i].host_rrq,
9796                                         ioa_cfg->hrrq[i].host_rrq_dma);
9797                         goto out_ipr_free_cmd_blocks;
9798                 }
9799                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9800         }
9801
9802         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9803                                                   ioa_cfg->cfg_table_size,
9804                                                   &ioa_cfg->cfg_table_dma,
9805                                                   GFP_KERNEL);
9806
9807         if (!ioa_cfg->u.cfg_table)
9808                 goto out_free_host_rrq;
9809
9810         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9811                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9812                                                          sizeof(struct ipr_hostrcb),
9813                                                          &ioa_cfg->hostrcb_dma[i],
9814                                                          GFP_KERNEL);
9815
9816                 if (!ioa_cfg->hostrcb[i])
9817                         goto out_free_hostrcb_dma;
9818
9819                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9820                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9821                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9822                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9823         }
9824
9825         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9826                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9827
9828         if (!ioa_cfg->trace)
9829                 goto out_free_hostrcb_dma;
9830
9831         rc = 0;
9832 out:
9833         LEAVE;
9834         return rc;
9835
9836 out_free_hostrcb_dma:
9837         while (i-- > 0) {
9838                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9839                                   ioa_cfg->hostrcb[i],
9840                                   ioa_cfg->hostrcb_dma[i]);
9841         }
9842         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9843                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9844 out_free_host_rrq:
9845         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9846                 dma_free_coherent(&pdev->dev,
9847                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9848                                   ioa_cfg->hrrq[i].host_rrq,
9849                                   ioa_cfg->hrrq[i].host_rrq_dma);
9850         }
9851 out_ipr_free_cmd_blocks:
9852         ipr_free_cmd_blks(ioa_cfg);
9853 out_free_vpd_cbs:
9854         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9855                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9856 out_free_res_entries:
9857         kfree(ioa_cfg->res_entries);
9858         goto out;
9859 }
9860
9861 /**
9862  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9863  * @ioa_cfg:    ioa config struct
9864  *
9865  * Return value:
9866  *      none
9867  **/
9868 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9869 {
9870         int i;
9871
9872         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9873                 ioa_cfg->bus_attr[i].bus = i;
9874                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9875                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9876                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9877                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9878                 else
9879                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9880         }
9881 }
9882
9883 /**
9884  * ipr_init_regs - Initialize IOA registers
9885  * @ioa_cfg:    ioa config struct
9886  *
9887  * Return value:
9888  *      none
9889  **/
9890 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9891 {
9892         const struct ipr_interrupt_offsets *p;
9893         struct ipr_interrupts *t;
9894         void __iomem *base;
9895
9896         p = &ioa_cfg->chip_cfg->regs;
9897         t = &ioa_cfg->regs;
9898         base = ioa_cfg->hdw_dma_regs;
9899
9900         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9901         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9902         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9903         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9904         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9905         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9906         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9907         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9908         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9909         t->ioarrin_reg = base + p->ioarrin_reg;
9910         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9911         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9912         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9913         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9914         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9915         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9916
9917         if (ioa_cfg->sis64) {
9918                 t->init_feedback_reg = base + p->init_feedback_reg;
9919                 t->dump_addr_reg = base + p->dump_addr_reg;
9920                 t->dump_data_reg = base + p->dump_data_reg;
9921                 t->endian_swap_reg = base + p->endian_swap_reg;
9922         }
9923 }
9924
9925 /**
9926  * ipr_init_ioa_cfg - Initialize IOA config struct
9927  * @ioa_cfg:    ioa config struct
9928  * @host:               scsi host struct
9929  * @pdev:               PCI dev struct
9930  *
9931  * Return value:
9932  *      none
9933  **/
9934 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9935                              struct Scsi_Host *host, struct pci_dev *pdev)
9936 {
9937         int i;
9938
9939         ioa_cfg->host = host;
9940         ioa_cfg->pdev = pdev;
9941         ioa_cfg->log_level = ipr_log_level;
9942         ioa_cfg->doorbell = IPR_DOORBELL;
9943         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9944         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9945         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9946         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9947         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9948         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9949
9950         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9951         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9952         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9953         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9954         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9955         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9956         INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9957         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9958         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9959         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9960         ioa_cfg->sdt_state = INACTIVE;
9961
9962         ipr_initialize_bus_attr(ioa_cfg);
9963         ioa_cfg->max_devs_supported = ipr_max_devs;
9964
9965         if (ioa_cfg->sis64) {
9966                 host->max_channel = IPR_MAX_SIS64_BUSES;
9967                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9968                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9969                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9970                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9971                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9972                                            + ((sizeof(struct ipr_config_table_entry64)
9973                                                * ioa_cfg->max_devs_supported)));
9974         } else {
9975                 host->max_channel = IPR_VSET_BUS;
9976                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9977                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9978                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9979                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9980                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9981                                            + ((sizeof(struct ipr_config_table_entry)
9982                                                * ioa_cfg->max_devs_supported)));
9983         }
9984
9985         host->unique_id = host->host_no;
9986         host->max_cmd_len = IPR_MAX_CDB_LEN;
9987         host->can_queue = ioa_cfg->max_cmds;
9988         pci_set_drvdata(pdev, ioa_cfg);
9989
9990         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9991                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9992                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9993                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9994                 if (i == 0)
9995                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9996                 else
9997                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9998         }
9999 }
10000
10001 /**
10002  * ipr_get_chip_info - Find adapter chip information
10003  * @dev_id:             PCI device id struct
10004  *
10005  * Return value:
10006  *      ptr to chip information on success / NULL on failure
10007  **/
10008 static const struct ipr_chip_t *
10009 ipr_get_chip_info(const struct pci_device_id *dev_id)
10010 {
10011         int i;
10012
10013         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10014                 if (ipr_chip[i].vendor == dev_id->vendor &&
10015                     ipr_chip[i].device == dev_id->device)
10016                         return &ipr_chip[i];
10017         return NULL;
10018 }
10019
10020 /**
10021  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10022  *                                              during probe time
10023  * @ioa_cfg:    ioa config struct
10024  *
10025  * Return value:
10026  *      None
10027  **/
10028 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10029 {
10030         struct pci_dev *pdev = ioa_cfg->pdev;
10031
10032         if (pci_channel_offline(pdev)) {
10033                 wait_event_timeout(ioa_cfg->eeh_wait_q,
10034                                    !pci_channel_offline(pdev),
10035                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10036                 pci_restore_state(pdev);
10037         }
10038 }
10039
10040 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10041 {
10042         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10043
10044         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10045                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10046                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10047                 ioa_cfg->vectors_info[vec_idx].
10048                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10049         }
10050 }
10051
10052 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10053                 struct pci_dev *pdev)
10054 {
10055         int i, rc;
10056
10057         for (i = 1; i < ioa_cfg->nvectors; i++) {
10058                 rc = request_irq(pci_irq_vector(pdev, i),
10059                         ipr_isr_mhrrq,
10060                         0,
10061                         ioa_cfg->vectors_info[i].desc,
10062                         &ioa_cfg->hrrq[i]);
10063                 if (rc) {
10064                         while (--i > 0)
10065                                 free_irq(pci_irq_vector(pdev, i),
10066                                         &ioa_cfg->hrrq[i]);
10067                         return rc;
10068                 }
10069         }
10070         return 0;
10071 }
10072
10073 /**
10074  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10075  * @pdev:               PCI device struct
10076  *
10077  * Description: Simply set the msi_received flag to 1 indicating that
10078  * Message Signaled Interrupts are supported.
10079  *
10080  * Return value:
10081  *      0 on success / non-zero on failure
10082  **/
10083 static irqreturn_t ipr_test_intr(int irq, void *devp)
10084 {
10085         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10086         unsigned long lock_flags = 0;
10087         irqreturn_t rc = IRQ_HANDLED;
10088
10089         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10090         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10091
10092         ioa_cfg->msi_received = 1;
10093         wake_up(&ioa_cfg->msi_wait_q);
10094
10095         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10096         return rc;
10097 }
10098
10099 /**
10100  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10101  * @pdev:               PCI device struct
10102  *
10103  * Description: This routine sets up and initiates a test interrupt to determine
10104  * if the interrupt is received via the ipr_test_intr() service routine.
10105  * If the tests fails, the driver will fall back to LSI.
10106  *
10107  * Return value:
10108  *      0 on success / non-zero on failure
10109  **/
10110 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10111 {
10112         int rc;
10113         volatile u32 int_reg;
10114         unsigned long lock_flags = 0;
10115         int irq = pci_irq_vector(pdev, 0);
10116
10117         ENTER;
10118
10119         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10120         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10121         ioa_cfg->msi_received = 0;
10122         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10123         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10124         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10125         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10126
10127         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10128         if (rc) {
10129                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10130                 return rc;
10131         } else if (ipr_debug)
10132                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10133
10134         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10135         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10136         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10137         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10138         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10139
10140         if (!ioa_cfg->msi_received) {
10141                 /* MSI test failed */
10142                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10143                 rc = -EOPNOTSUPP;
10144         } else if (ipr_debug)
10145                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10146
10147         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10148
10149         free_irq(irq, ioa_cfg);
10150
10151         LEAVE;
10152
10153         return rc;
10154 }
10155
10156  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10157  * @pdev:               PCI device struct
10158  * @dev_id:             PCI device id struct
10159  *
10160  * Return value:
10161  *      0 on success / non-zero on failure
10162  **/
10163 static int ipr_probe_ioa(struct pci_dev *pdev,
10164                          const struct pci_device_id *dev_id)
10165 {
10166         struct ipr_ioa_cfg *ioa_cfg;
10167         struct Scsi_Host *host;
10168         unsigned long ipr_regs_pci;
10169         void __iomem *ipr_regs;
10170         int rc = PCIBIOS_SUCCESSFUL;
10171         volatile u32 mask, uproc, interrupts;
10172         unsigned long lock_flags, driver_lock_flags;
10173         unsigned int irq_flag;
10174
10175         ENTER;
10176
10177         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10178         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10179
10180         if (!host) {
10181                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10182                 rc = -ENOMEM;
10183                 goto out;
10184         }
10185
10186         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10187         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10188         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10189
10190         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10191
10192         if (!ioa_cfg->ipr_chip) {
10193                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10194                         dev_id->vendor, dev_id->device);
10195                 goto out_scsi_host_put;
10196         }
10197
10198         /* set SIS 32 or SIS 64 */
10199         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10200         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10201         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10202         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10203
10204         if (ipr_transop_timeout)
10205                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10206         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10207                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10208         else
10209                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10210
10211         ioa_cfg->revid = pdev->revision;
10212
10213         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10214
10215         ipr_regs_pci = pci_resource_start(pdev, 0);
10216
10217         rc = pci_request_regions(pdev, IPR_NAME);
10218         if (rc < 0) {
10219                 dev_err(&pdev->dev,
10220                         "Couldn't register memory range of registers\n");
10221                 goto out_scsi_host_put;
10222         }
10223
10224         rc = pci_enable_device(pdev);
10225
10226         if (rc || pci_channel_offline(pdev)) {
10227                 if (pci_channel_offline(pdev)) {
10228                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10229                         rc = pci_enable_device(pdev);
10230                 }
10231
10232                 if (rc) {
10233                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10234                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10235                         goto out_release_regions;
10236                 }
10237         }
10238
10239         ipr_regs = pci_ioremap_bar(pdev, 0);
10240
10241         if (!ipr_regs) {
10242                 dev_err(&pdev->dev,
10243                         "Couldn't map memory range of registers\n");
10244                 rc = -ENOMEM;
10245                 goto out_disable;
10246         }
10247
10248         ioa_cfg->hdw_dma_regs = ipr_regs;
10249         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10250         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10251
10252         ipr_init_regs(ioa_cfg);
10253
10254         if (ioa_cfg->sis64) {
10255                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10256                 if (rc < 0) {
10257                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10258                         rc = dma_set_mask_and_coherent(&pdev->dev,
10259                                                        DMA_BIT_MASK(32));
10260                 }
10261         } else
10262                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10263
10264         if (rc < 0) {
10265                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10266                 goto cleanup_nomem;
10267         }
10268
10269         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10270                                    ioa_cfg->chip_cfg->cache_line_size);
10271
10272         if (rc != PCIBIOS_SUCCESSFUL) {
10273                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10274                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10275                 rc = -EIO;
10276                 goto cleanup_nomem;
10277         }
10278
10279         /* Issue MMIO read to ensure card is not in EEH */
10280         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10281         ipr_wait_for_pci_err_recovery(ioa_cfg);
10282
10283         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10284                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10285                         IPR_MAX_MSIX_VECTORS);
10286                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10287         }
10288
10289         irq_flag = PCI_IRQ_LEGACY;
10290         if (ioa_cfg->ipr_chip->has_msi)
10291                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10292         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10293         if (rc < 0) {
10294                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10295                 goto cleanup_nomem;
10296         }
10297         ioa_cfg->nvectors = rc;
10298
10299         if (!pdev->msi_enabled && !pdev->msix_enabled)
10300                 ioa_cfg->clear_isr = 1;
10301
10302         pci_set_master(pdev);
10303
10304         if (pci_channel_offline(pdev)) {
10305                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10306                 pci_set_master(pdev);
10307                 if (pci_channel_offline(pdev)) {
10308                         rc = -EIO;
10309                         goto out_msi_disable;
10310                 }
10311         }
10312
10313         if (pdev->msi_enabled || pdev->msix_enabled) {
10314                 rc = ipr_test_msi(ioa_cfg, pdev);
10315                 switch (rc) {
10316                 case 0:
10317                         dev_info(&pdev->dev,
10318                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10319                                 pdev->msix_enabled ? "-X" : "");
10320                         break;
10321                 case -EOPNOTSUPP:
10322                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10323                         pci_free_irq_vectors(pdev);
10324
10325                         ioa_cfg->nvectors = 1;
10326                         ioa_cfg->clear_isr = 1;
10327                         break;
10328                 default:
10329                         goto out_msi_disable;
10330                 }
10331         }
10332
10333         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10334                                 (unsigned int)num_online_cpus(),
10335                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10336
10337         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10338                 goto out_msi_disable;
10339
10340         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10341                 goto out_msi_disable;
10342
10343         rc = ipr_alloc_mem(ioa_cfg);
10344         if (rc < 0) {
10345                 dev_err(&pdev->dev,
10346                         "Couldn't allocate enough memory for device driver!\n");
10347                 goto out_msi_disable;
10348         }
10349
10350         /* Save away PCI config space for use following IOA reset */
10351         rc = pci_save_state(pdev);
10352
10353         if (rc != PCIBIOS_SUCCESSFUL) {
10354                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10355                 rc = -EIO;
10356                 goto cleanup_nolog;
10357         }
10358
10359         /*
10360          * If HRRQ updated interrupt is not masked, or reset alert is set,
10361          * the card is in an unknown state and needs a hard reset
10362          */
10363         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10364         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10365         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10366         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10367                 ioa_cfg->needs_hard_reset = 1;
10368         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10369                 ioa_cfg->needs_hard_reset = 1;
10370         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10371                 ioa_cfg->ioa_unit_checked = 1;
10372
10373         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10374         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10375         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10376
10377         if (pdev->msi_enabled || pdev->msix_enabled) {
10378                 name_msi_vectors(ioa_cfg);
10379                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10380                         ioa_cfg->vectors_info[0].desc,
10381                         &ioa_cfg->hrrq[0]);
10382                 if (!rc)
10383                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10384         } else {
10385                 rc = request_irq(pdev->irq, ipr_isr,
10386                          IRQF_SHARED,
10387                          IPR_NAME, &ioa_cfg->hrrq[0]);
10388         }
10389         if (rc) {
10390                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10391                         pdev->irq, rc);
10392                 goto cleanup_nolog;
10393         }
10394
10395         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10396             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10397                 ioa_cfg->needs_warm_reset = 1;
10398                 ioa_cfg->reset = ipr_reset_slot_reset;
10399
10400                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10401                                                                 WQ_MEM_RECLAIM, host->host_no);
10402
10403                 if (!ioa_cfg->reset_work_q) {
10404                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10405                         rc = -ENOMEM;
10406                         goto out_free_irq;
10407                 }
10408         } else
10409                 ioa_cfg->reset = ipr_reset_start_bist;
10410
10411         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10412         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10413         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10414
10415         LEAVE;
10416 out:
10417         return rc;
10418
10419 out_free_irq:
10420         ipr_free_irqs(ioa_cfg);
10421 cleanup_nolog:
10422         ipr_free_mem(ioa_cfg);
10423 out_msi_disable:
10424         ipr_wait_for_pci_err_recovery(ioa_cfg);
10425         pci_free_irq_vectors(pdev);
10426 cleanup_nomem:
10427         iounmap(ipr_regs);
10428 out_disable:
10429         pci_disable_device(pdev);
10430 out_release_regions:
10431         pci_release_regions(pdev);
10432 out_scsi_host_put:
10433         scsi_host_put(host);
10434         goto out;
10435 }
10436
10437 /**
10438  * ipr_initiate_ioa_bringdown - Bring down an adapter
10439  * @ioa_cfg:            ioa config struct
10440  * @shutdown_type:      shutdown type
10441  *
10442  * Description: This function will initiate bringing down the adapter.
10443  * This consists of issuing an IOA shutdown to the adapter
10444  * to flush the cache, and running BIST.
10445  * If the caller needs to wait on the completion of the reset,
10446  * the caller must sleep on the reset_wait_q.
10447  *
10448  * Return value:
10449  *      none
10450  **/
10451 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10452                                        enum ipr_shutdown_type shutdown_type)
10453 {
10454         ENTER;
10455         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10456                 ioa_cfg->sdt_state = ABORT_DUMP;
10457         ioa_cfg->reset_retries = 0;
10458         ioa_cfg->in_ioa_bringdown = 1;
10459         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10460         LEAVE;
10461 }
10462
10463 /**
10464  * __ipr_remove - Remove a single adapter
10465  * @pdev:       pci device struct
10466  *
10467  * Adapter hot plug remove entry point.
10468  *
10469  * Return value:
10470  *      none
10471  **/
10472 static void __ipr_remove(struct pci_dev *pdev)
10473 {
10474         unsigned long host_lock_flags = 0;
10475         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10476         int i;
10477         unsigned long driver_lock_flags;
10478         ENTER;
10479
10480         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10481         while (ioa_cfg->in_reset_reload) {
10482                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10483                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10484                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10485         }
10486
10487         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10488                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10489                 ioa_cfg->hrrq[i].removing_ioa = 1;
10490                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10491         }
10492         wmb();
10493         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10494
10495         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10496         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10497         flush_work(&ioa_cfg->work_q);
10498         if (ioa_cfg->reset_work_q)
10499                 flush_workqueue(ioa_cfg->reset_work_q);
10500         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10501         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10502
10503         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10504         list_del(&ioa_cfg->queue);
10505         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10506
10507         if (ioa_cfg->sdt_state == ABORT_DUMP)
10508                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10509         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10510
10511         ipr_free_all_resources(ioa_cfg);
10512
10513         LEAVE;
10514 }
10515
10516 /**
10517  * ipr_remove - IOA hot plug remove entry point
10518  * @pdev:       pci device struct
10519  *
10520  * Adapter hot plug remove entry point.
10521  *
10522  * Return value:
10523  *      none
10524  **/
10525 static void ipr_remove(struct pci_dev *pdev)
10526 {
10527         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10528
10529         ENTER;
10530
10531         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10532                               &ipr_trace_attr);
10533         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10534                              &ipr_dump_attr);
10535         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10536                         &ipr_ioa_async_err_log);
10537         scsi_remove_host(ioa_cfg->host);
10538
10539         __ipr_remove(pdev);
10540
10541         LEAVE;
10542 }
10543
10544 /**
10545  * ipr_probe - Adapter hot plug add entry point
10546  *
10547  * Return value:
10548  *      0 on success / non-zero on failure
10549  **/
10550 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10551 {
10552         struct ipr_ioa_cfg *ioa_cfg;
10553         unsigned long flags;
10554         int rc, i;
10555
10556         rc = ipr_probe_ioa(pdev, dev_id);
10557
10558         if (rc)
10559                 return rc;
10560
10561         ioa_cfg = pci_get_drvdata(pdev);
10562         rc = ipr_probe_ioa_part2(ioa_cfg);
10563
10564         if (rc) {
10565                 __ipr_remove(pdev);
10566                 return rc;
10567         }
10568
10569         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10570
10571         if (rc) {
10572                 __ipr_remove(pdev);
10573                 return rc;
10574         }
10575
10576         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10577                                    &ipr_trace_attr);
10578
10579         if (rc) {
10580                 scsi_remove_host(ioa_cfg->host);
10581                 __ipr_remove(pdev);
10582                 return rc;
10583         }
10584
10585         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10586                         &ipr_ioa_async_err_log);
10587
10588         if (rc) {
10589                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10590                                 &ipr_dump_attr);
10591                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10592                                 &ipr_trace_attr);
10593                 scsi_remove_host(ioa_cfg->host);
10594                 __ipr_remove(pdev);
10595                 return rc;
10596         }
10597
10598         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10599                                    &ipr_dump_attr);
10600
10601         if (rc) {
10602                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10603                                       &ipr_ioa_async_err_log);
10604                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10605                                       &ipr_trace_attr);
10606                 scsi_remove_host(ioa_cfg->host);
10607                 __ipr_remove(pdev);
10608                 return rc;
10609         }
10610         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10611         ioa_cfg->scan_enabled = 1;
10612         schedule_work(&ioa_cfg->work_q);
10613         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10614
10615         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10616
10617         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10618                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10619                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10620                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10621                 }
10622         }
10623
10624         scsi_scan_host(ioa_cfg->host);
10625
10626         return 0;
10627 }
10628
10629 /**
10630  * ipr_shutdown - Shutdown handler.
10631  * @pdev:       pci device struct
10632  *
10633  * This function is invoked upon system shutdown/reboot. It will issue
10634  * an adapter shutdown to the adapter to flush the write cache.
10635  *
10636  * Return value:
10637  *      none
10638  **/
10639 static void ipr_shutdown(struct pci_dev *pdev)
10640 {
10641         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10642         unsigned long lock_flags = 0;
10643         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10644         int i;
10645
10646         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10647         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10648                 ioa_cfg->iopoll_weight = 0;
10649                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10650                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10651         }
10652
10653         while (ioa_cfg->in_reset_reload) {
10654                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10655                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10656                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10657         }
10658
10659         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10660                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10661
10662         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10663         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10664         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10665         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10666                 ipr_free_irqs(ioa_cfg);
10667                 pci_disable_device(ioa_cfg->pdev);
10668         }
10669 }
10670
10671 static struct pci_device_id ipr_pci_table[] = {
10672         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10673                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10674         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10675                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10676         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10677                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10678         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10679                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10680         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10681                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10682         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10683                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10684         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10685                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10686         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10687                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10688                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10689         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10690               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10691         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10692               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10693               IPR_USE_LONG_TRANSOP_TIMEOUT },
10694         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10695               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10696               IPR_USE_LONG_TRANSOP_TIMEOUT },
10697         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10698               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10699         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10700               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10701               IPR_USE_LONG_TRANSOP_TIMEOUT},
10702         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10703               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10704               IPR_USE_LONG_TRANSOP_TIMEOUT },
10705         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10706               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10707               IPR_USE_LONG_TRANSOP_TIMEOUT },
10708         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10709               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10710         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10711               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10712         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10713               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10714               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10715         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10716                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10717         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10718                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10719         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10720                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10721                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10722         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10723                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10724                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10725         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10726                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10727         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10728                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10729         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10730                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10731         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10732                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10733         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10734                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10735         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10736                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10737         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10738                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10739         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10740                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10741         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10742                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10743         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10744                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10745         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10746                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10747         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10749         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10751         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10753         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10754                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10755         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10756                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10757         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10758                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10759         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10760                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10761         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10762                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10763         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10764                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10765         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10766                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10767         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10768                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10769         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10770                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10771         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10772                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10773         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10774                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10775         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10776                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10777         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10778                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10779         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10780                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10781         { }
10782 };
10783 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10784
10785 static const struct pci_error_handlers ipr_err_handler = {
10786         .error_detected = ipr_pci_error_detected,
10787         .mmio_enabled = ipr_pci_mmio_enabled,
10788         .slot_reset = ipr_pci_slot_reset,
10789 };
10790
10791 static struct pci_driver ipr_driver = {
10792         .name = IPR_NAME,
10793         .id_table = ipr_pci_table,
10794         .probe = ipr_probe,
10795         .remove = ipr_remove,
10796         .shutdown = ipr_shutdown,
10797         .err_handler = &ipr_err_handler,
10798 };
10799
10800 /**
10801  * ipr_halt_done - Shutdown prepare completion
10802  *
10803  * Return value:
10804  *      none
10805  **/
10806 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10807 {
10808         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10809 }
10810
10811 /**
10812  * ipr_halt - Issue shutdown prepare to all adapters
10813  *
10814  * Return value:
10815  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10816  **/
10817 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10818 {
10819         struct ipr_cmnd *ipr_cmd;
10820         struct ipr_ioa_cfg *ioa_cfg;
10821         unsigned long flags = 0, driver_lock_flags;
10822
10823         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10824                 return NOTIFY_DONE;
10825
10826         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10827
10828         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10829                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10830                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10831                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10832                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10833                         continue;
10834                 }
10835
10836                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10837                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10838                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10839                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10840                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10841
10842                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10843                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10844         }
10845         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10846
10847         return NOTIFY_OK;
10848 }
10849
10850 static struct notifier_block ipr_notifier = {
10851         ipr_halt, NULL, 0
10852 };
10853
10854 /**
10855  * ipr_init - Module entry point
10856  *
10857  * Return value:
10858  *      0 on success / negative value on failure
10859  **/
10860 static int __init ipr_init(void)
10861 {
10862         int rc;
10863
10864         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10865                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10866
10867         register_reboot_notifier(&ipr_notifier);
10868         rc = pci_register_driver(&ipr_driver);
10869         if (rc) {
10870                 unregister_reboot_notifier(&ipr_notifier);
10871                 return rc;
10872         }
10873
10874         return 0;
10875 }
10876
10877 /**
10878  * ipr_exit - Module unload
10879  *
10880  * Module unload entry point.
10881  *
10882  * Return value:
10883  *      none
10884  **/
10885 static void __exit ipr_exit(void)
10886 {
10887         unregister_reboot_notifier(&ipr_notifier);
10888         pci_unregister_driver(&ipr_driver);
10889 }
10890
10891 module_init(ipr_init);
10892 module_exit(ipr_exit);