GNU Linux-libre 4.14.303-gnu1
[releases.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
439         "3140: Device bus not ready to ready transition"},
440         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "FFFB: SCSI bus was reset"},
442         {0x06290500, 0, 0,
443         "FFFE: SCSI bus transition to single ended"},
444         {0x06290600, 0, 0,
445         "FFFE: SCSI bus transition to LVD"},
446         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
447         "FFFB: SCSI bus was reset by another initiator"},
448         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
449         "3029: A device replacement has occurred"},
450         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "4102: Device bus fabric performance degradation"},
452         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9051: IOA cache data exists for a missing or failed device"},
454         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
456         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9025: Disk unit is not supported at its physical location"},
458         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
459         "3020: IOA detected a SCSI bus configuration error"},
460         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3150: SCSI bus configuration error"},
462         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
463         "9074: Asymmetric advanced function disk configuration"},
464         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
465         "4040: Incomplete multipath connection between IOA and enclosure"},
466         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4041: Incomplete multipath connection between enclosure and device"},
468         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
469         "9075: Incomplete multipath connection between IOA and remote IOA"},
470         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9076: Configuration error, missing remote IOA"},
472         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4050: Enclosure does not support a required multipath function"},
474         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4121: Configuration error, required cable is missing"},
476         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4122: Cable is not plugged into the correct location on remote IOA"},
478         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4123: Configuration error, invalid cable vital product data"},
480         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4124: Configuration error, both cable ends are plugged into the same IOA"},
482         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4070: Logically bad block written on device"},
484         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9041: Array protection temporarily suspended"},
486         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9042: Corrupt array parity detected on specified device"},
488         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9030: Array no longer protected due to missing or failed disk unit"},
490         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9071: Link operational transition"},
492         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9072: Link not operational transition"},
494         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9032: Array exposed but still protected"},
496         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
497         "70DD: Device forced failed by disrupt device command"},
498         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
499         "4061: Multipath redundancy level got better"},
500         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4060: Multipath redundancy level got worse"},
502         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
503         "9083: Device raw mode enabled"},
504         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
505         "9084: Device raw mode disabled"},
506         {0x07270000, 0, 0,
507         "Failure due to other device"},
508         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9008: IOA does not support functions expected by devices"},
510         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9010: Cache data associated with attached devices cannot be found"},
512         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9011: Cache data belongs to devices other than those attached"},
514         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9020: Array missing 2 or more devices with only 1 device present"},
516         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9021: Array missing 2 or more devices with 2 or more devices present"},
518         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9022: Exposed array is missing a required device"},
520         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9023: Array member(s) not at required physical locations"},
522         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9024: Array not functional due to present hardware configuration"},
524         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9026: Array not functional due to present hardware configuration"},
526         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9027: Array is missing a device and parity is out of sync"},
528         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9028: Maximum number of arrays already exist"},
530         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9050: Required cache data cannot be located for a disk unit"},
532         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9052: Cache data exists for a device that has been modified"},
534         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9054: IOA resources not available due to previous problems"},
536         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9092: Disk unit requires initialization before use"},
538         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9029: Incorrect hardware configuration change has been detected"},
540         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9060: One or more disk pairs are missing from an array"},
542         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9061: One or more disks are missing from an array"},
544         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9062: One or more disks are missing from an array"},
546         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9063: Maximum number of functional arrays has been exceeded"},
548         {0x07279A00, 0, 0,
549         "Data protect, other volume set problem"},
550         {0x0B260000, 0, 0,
551         "Aborted command, invalid descriptor"},
552         {0x0B3F9000, 0, 0,
553         "Target operating conditions have changed, dual adapter takeover"},
554         {0x0B530200, 0, 0,
555         "Aborted command, medium removal prevented"},
556         {0x0B5A0000, 0, 0,
557         "Command terminated by host"},
558         {0x0B5B8000, 0, 0,
559         "Aborted command, command terminated by host"}
560 };
561
562 static const struct ipr_ses_table_entry ipr_ses_table[] = {
563         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
564         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
565         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
570         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
571         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
574         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
576 };
577
578 /*
579  *  Function Prototypes
580  */
581 static int ipr_reset_alert(struct ipr_cmnd *);
582 static void ipr_process_ccn(struct ipr_cmnd *);
583 static void ipr_process_error(struct ipr_cmnd *);
584 static void ipr_reset_ioa_job(struct ipr_cmnd *);
585 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586                                    enum ipr_shutdown_type);
587
588 #ifdef CONFIG_SCSI_IPR_TRACE
589 /**
590  * ipr_trc_hook - Add a trace entry to the driver trace
591  * @ipr_cmd:    ipr command struct
592  * @type:               trace type
593  * @add_data:   additional data
594  *
595  * Return value:
596  *      none
597  **/
598 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599                          u8 type, u32 add_data)
600 {
601         struct ipr_trace_entry *trace_entry;
602         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
603         unsigned int trace_index;
604
605         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606         trace_entry = &ioa_cfg->trace[trace_index];
607         trace_entry->time = jiffies;
608         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609         trace_entry->type = type;
610         if (ipr_cmd->ioa_cfg->sis64)
611                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
612         else
613                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
614         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
615         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616         trace_entry->u.add_data = add_data;
617         wmb();
618 }
619 #else
620 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
621 #endif
622
623 /**
624  * ipr_lock_and_done - Acquire lock and complete command
625  * @ipr_cmd:    ipr command struct
626  *
627  * Return value:
628  *      none
629  **/
630 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
631 {
632         unsigned long lock_flags;
633         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
634
635         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636         ipr_cmd->done(ipr_cmd);
637         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
638 }
639
640 /**
641  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642  * @ipr_cmd:    ipr command struct
643  *
644  * Return value:
645  *      none
646  **/
647 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
648 {
649         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
650         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
652         dma_addr_t dma_addr = ipr_cmd->dma_addr;
653         int hrrq_id;
654
655         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
656         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
657         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
658         ioarcb->data_transfer_length = 0;
659         ioarcb->read_data_transfer_length = 0;
660         ioarcb->ioadl_len = 0;
661         ioarcb->read_ioadl_len = 0;
662
663         if (ipr_cmd->ioa_cfg->sis64) {
664                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
666                 ioasa64->u.gata.status = 0;
667         } else {
668                 ioarcb->write_ioadl_addr =
669                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
671                 ioasa->u.gata.status = 0;
672         }
673
674         ioasa->hdr.ioasc = 0;
675         ioasa->hdr.residual_data_len = 0;
676         ipr_cmd->scsi_cmd = NULL;
677         ipr_cmd->qc = NULL;
678         ipr_cmd->sense_buffer[0] = 0;
679         ipr_cmd->dma_use_sg = 0;
680 }
681
682 /**
683  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684  * @ipr_cmd:    ipr command struct
685  *
686  * Return value:
687  *      none
688  **/
689 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690                               void (*fast_done) (struct ipr_cmnd *))
691 {
692         ipr_reinit_ipr_cmnd(ipr_cmd);
693         ipr_cmd->u.scratch = 0;
694         ipr_cmd->sibling = NULL;
695         ipr_cmd->eh_comp = NULL;
696         ipr_cmd->fast_done = fast_done;
697         init_timer(&ipr_cmd->timer);
698 }
699
700 /**
701  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
702  * @ioa_cfg:    ioa config struct
703  *
704  * Return value:
705  *      pointer to ipr command struct
706  **/
707 static
708 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
709 {
710         struct ipr_cmnd *ipr_cmd = NULL;
711
712         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714                         struct ipr_cmnd, queue);
715                 list_del(&ipr_cmd->queue);
716         }
717
718
719         return ipr_cmd;
720 }
721
722 /**
723  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724  * @ioa_cfg:    ioa config struct
725  *
726  * Return value:
727  *      pointer to ipr command struct
728  **/
729 static
730 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
731 {
732         struct ipr_cmnd *ipr_cmd =
733                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
734         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
735         return ipr_cmd;
736 }
737
738 /**
739  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740  * @ioa_cfg:    ioa config struct
741  * @clr_ints:     interrupts to clear
742  *
743  * This function masks all interrupts on the adapter, then clears the
744  * interrupts specified in the mask
745  *
746  * Return value:
747  *      none
748  **/
749 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
750                                           u32 clr_ints)
751 {
752         volatile u32 int_reg;
753         int i;
754
755         /* Stop new interrupts */
756         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757                 spin_lock(&ioa_cfg->hrrq[i]._lock);
758                 ioa_cfg->hrrq[i].allow_interrupts = 0;
759                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
760         }
761         wmb();
762
763         /* Set interrupt mask to stop all new interrupts */
764         if (ioa_cfg->sis64)
765                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766         else
767                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768
769         /* Clear any pending interrupts */
770         if (ioa_cfg->sis64)
771                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
773         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
774 }
775
776 /**
777  * ipr_save_pcix_cmd_reg - Save PCI-X command register
778  * @ioa_cfg:    ioa config struct
779  *
780  * Return value:
781  *      0 on success / -EIO on failure
782  **/
783 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
784 {
785         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
786
787         if (pcix_cmd_reg == 0)
788                 return 0;
789
790         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
793                 return -EIO;
794         }
795
796         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
797         return 0;
798 }
799
800 /**
801  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802  * @ioa_cfg:    ioa config struct
803  *
804  * Return value:
805  *      0 on success / -EIO on failure
806  **/
807 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
808 {
809         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
810
811         if (pcix_cmd_reg) {
812                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
815                         return -EIO;
816                 }
817         }
818
819         return 0;
820 }
821
822 /**
823  * __ipr_sata_eh_done - done function for aborted SATA commands
824  * @ipr_cmd:    ipr command struct
825  *
826  * This function is invoked for ops generated to SATA
827  * devices which are being aborted.
828  *
829  * Return value:
830  *      none
831  **/
832 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
833 {
834         struct ata_queued_cmd *qc = ipr_cmd->qc;
835         struct ipr_sata_port *sata_port = qc->ap->private_data;
836
837         qc->err_mask |= AC_ERR_OTHER;
838         sata_port->ioasa.status |= ATA_BUSY;
839         ata_qc_complete(qc);
840         if (ipr_cmd->eh_comp)
841                 complete(ipr_cmd->eh_comp);
842         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
843 }
844
845 /**
846  * ipr_sata_eh_done - done function for aborted SATA commands
847  * @ipr_cmd:    ipr command struct
848  *
849  * This function is invoked for ops generated to SATA
850  * devices which are being aborted.
851  *
852  * Return value:
853  *      none
854  **/
855 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
856 {
857         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
858         unsigned long hrrq_flags;
859
860         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
861         __ipr_sata_eh_done(ipr_cmd);
862         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
863 }
864
865 /**
866  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
867  * @ipr_cmd:    ipr command struct
868  *
869  * This function is invoked by the interrupt handler for
870  * ops generated by the SCSI mid-layer which are being aborted.
871  *
872  * Return value:
873  *      none
874  **/
875 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
876 {
877         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
878
879         scsi_cmd->result |= (DID_ERROR << 16);
880
881         scsi_dma_unmap(ipr_cmd->scsi_cmd);
882         scsi_cmd->scsi_done(scsi_cmd);
883         if (ipr_cmd->eh_comp)
884                 complete(ipr_cmd->eh_comp);
885         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
886 }
887
888 /**
889  * ipr_scsi_eh_done - mid-layer done function for aborted ops
890  * @ipr_cmd:    ipr command struct
891  *
892  * This function is invoked by the interrupt handler for
893  * ops generated by the SCSI mid-layer which are being aborted.
894  *
895  * Return value:
896  *      none
897  **/
898 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
899 {
900         unsigned long hrrq_flags;
901         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
902
903         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
904         __ipr_scsi_eh_done(ipr_cmd);
905         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
906 }
907
908 /**
909  * ipr_fail_all_ops - Fails all outstanding ops.
910  * @ioa_cfg:    ioa config struct
911  *
912  * This function fails all outstanding ops.
913  *
914  * Return value:
915  *      none
916  **/
917 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
918 {
919         struct ipr_cmnd *ipr_cmd, *temp;
920         struct ipr_hrr_queue *hrrq;
921
922         ENTER;
923         for_each_hrrq(hrrq, ioa_cfg) {
924                 spin_lock(&hrrq->_lock);
925                 list_for_each_entry_safe(ipr_cmd,
926                                         temp, &hrrq->hrrq_pending_q, queue) {
927                         list_del(&ipr_cmd->queue);
928
929                         ipr_cmd->s.ioasa.hdr.ioasc =
930                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
931                         ipr_cmd->s.ioasa.hdr.ilid =
932                                 cpu_to_be32(IPR_DRIVER_ILID);
933
934                         if (ipr_cmd->scsi_cmd)
935                                 ipr_cmd->done = __ipr_scsi_eh_done;
936                         else if (ipr_cmd->qc)
937                                 ipr_cmd->done = __ipr_sata_eh_done;
938
939                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
940                                      IPR_IOASC_IOA_WAS_RESET);
941                         del_timer(&ipr_cmd->timer);
942                         ipr_cmd->done(ipr_cmd);
943                 }
944                 spin_unlock(&hrrq->_lock);
945         }
946         LEAVE;
947 }
948
949 /**
950  * ipr_send_command -  Send driver initiated requests.
951  * @ipr_cmd:            ipr command struct
952  *
953  * This function sends a command to the adapter using the correct write call.
954  * In the case of sis64, calculate the ioarcb size required. Then or in the
955  * appropriate bits.
956  *
957  * Return value:
958  *      none
959  **/
960 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
961 {
962         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
963         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
964
965         if (ioa_cfg->sis64) {
966                 /* The default size is 256 bytes */
967                 send_dma_addr |= 0x1;
968
969                 /* If the number of ioadls * size of ioadl > 128 bytes,
970                    then use a 512 byte ioarcb */
971                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
972                         send_dma_addr |= 0x4;
973                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
974         } else
975                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 }
977
978 /**
979  * ipr_do_req -  Send driver initiated requests.
980  * @ipr_cmd:            ipr command struct
981  * @done:                       done function
982  * @timeout_func:       timeout function
983  * @timeout:            timeout value
984  *
985  * This function sends the specified command to the adapter with the
986  * timeout given. The done function is invoked on command completion.
987  *
988  * Return value:
989  *      none
990  **/
991 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
992                        void (*done) (struct ipr_cmnd *),
993                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
994 {
995         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
996
997         ipr_cmd->done = done;
998
999         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
1000         ipr_cmd->timer.expires = jiffies + timeout;
1001         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
1002
1003         add_timer(&ipr_cmd->timer);
1004
1005         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
1007         ipr_send_command(ipr_cmd);
1008 }
1009
1010 /**
1011  * ipr_internal_cmd_done - Op done function for an internally generated op.
1012  * @ipr_cmd:    ipr command struct
1013  *
1014  * This function is the op done function for an internally generated,
1015  * blocking op. It simply wakes the sleeping thread.
1016  *
1017  * Return value:
1018  *      none
1019  **/
1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021 {
1022         if (ipr_cmd->sibling)
1023                 ipr_cmd->sibling = NULL;
1024         else
1025                 complete(&ipr_cmd->completion);
1026 }
1027
1028 /**
1029  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030  * @ipr_cmd:    ipr command struct
1031  * @dma_addr:   dma address
1032  * @len:        transfer length
1033  * @flags:      ioadl flag value
1034  *
1035  * This function initializes an ioadl in the case where there is only a single
1036  * descriptor.
1037  *
1038  * Return value:
1039  *      nothing
1040  **/
1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042                            u32 len, int flags)
1043 {
1044         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047         ipr_cmd->dma_use_sg = 1;
1048
1049         if (ipr_cmd->ioa_cfg->sis64) {
1050                 ioadl64->flags = cpu_to_be32(flags);
1051                 ioadl64->data_len = cpu_to_be32(len);
1052                 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054                 ipr_cmd->ioarcb.ioadl_len =
1055                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057         } else {
1058                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059                 ioadl->address = cpu_to_be32(dma_addr);
1060
1061                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062                         ipr_cmd->ioarcb.read_ioadl_len =
1063                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065                 } else {
1066                         ipr_cmd->ioarcb.ioadl_len =
1067                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069                 }
1070         }
1071 }
1072
1073 /**
1074  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075  * @ipr_cmd:    ipr command struct
1076  * @timeout_func:       function to invoke if command times out
1077  * @timeout:    timeout
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1084                                   u32 timeout)
1085 {
1086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088         init_completion(&ipr_cmd->completion);
1089         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091         spin_unlock_irq(ioa_cfg->host->host_lock);
1092         wait_for_completion(&ipr_cmd->completion);
1093         spin_lock_irq(ioa_cfg->host->host_lock);
1094 }
1095
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097 {
1098         unsigned int hrrq;
1099
1100         if (ioa_cfg->hrrq_num == 1)
1101                 hrrq = 0;
1102         else {
1103                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105         }
1106         return hrrq;
1107 }
1108
1109 /**
1110  * ipr_send_hcam - Send an HCAM to the adapter.
1111  * @ioa_cfg:    ioa config struct
1112  * @type:               HCAM type
1113  * @hostrcb:    hostrcb struct
1114  *
1115  * This function will send a Host Controlled Async command to the adapter.
1116  * If HCAMs are currently not allowed to be issued to the adapter, it will
1117  * place the hostrcb on the free queue.
1118  *
1119  * Return value:
1120  *      none
1121  **/
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123                           struct ipr_hostrcb *hostrcb)
1124 {
1125         struct ipr_cmnd *ipr_cmd;
1126         struct ipr_ioarcb *ioarcb;
1127
1128         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1129                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1130                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1131                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133                 ipr_cmd->u.hostrcb = hostrcb;
1134                 ioarcb = &ipr_cmd->ioarcb;
1135
1136                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139                 ioarcb->cmd_pkt.cdb[1] = type;
1140                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
1143                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1145
1146                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147                         ipr_cmd->done = ipr_process_ccn;
1148                 else
1149                         ipr_cmd->done = ipr_process_error;
1150
1151                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
1153                 ipr_send_command(ipr_cmd);
1154         } else {
1155                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156         }
1157 }
1158
1159 /**
1160  * ipr_update_ata_class - Update the ata class in the resource entry
1161  * @res:        resource entry struct
1162  * @proto:      cfgte device bus protocol value
1163  *
1164  * Return value:
1165  *      none
1166  **/
1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1168 {
1169         switch (proto) {
1170         case IPR_PROTO_SATA:
1171         case IPR_PROTO_SAS_STP:
1172                 res->ata_class = ATA_DEV_ATA;
1173                 break;
1174         case IPR_PROTO_SATA_ATAPI:
1175         case IPR_PROTO_SAS_STP_ATAPI:
1176                 res->ata_class = ATA_DEV_ATAPI;
1177                 break;
1178         default:
1179                 res->ata_class = ATA_DEV_UNKNOWN;
1180                 break;
1181         };
1182 }
1183
1184 /**
1185  * ipr_init_res_entry - Initialize a resource entry struct.
1186  * @res:        resource entry struct
1187  * @cfgtew:     config table entry wrapper struct
1188  *
1189  * Return value:
1190  *      none
1191  **/
1192 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193                                struct ipr_config_table_entry_wrapper *cfgtew)
1194 {
1195         int found = 0;
1196         unsigned int proto;
1197         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198         struct ipr_resource_entry *gscsi_res = NULL;
1199
1200         res->needs_sync_complete = 0;
1201         res->in_erp = 0;
1202         res->add_to_ml = 0;
1203         res->del_from_ml = 0;
1204         res->resetting_device = 0;
1205         res->reset_occurred = 0;
1206         res->sdev = NULL;
1207         res->sata_port = NULL;
1208
1209         if (ioa_cfg->sis64) {
1210                 proto = cfgtew->u.cfgte64->proto;
1211                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1213                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1214                 res->type = cfgtew->u.cfgte64->res_type;
1215
1216                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217                         sizeof(res->res_path));
1218
1219                 res->bus = 0;
1220                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221                         sizeof(res->dev_lun.scsi_lun));
1222                 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227                                         found = 1;
1228                                         res->target = gscsi_res->target;
1229                                         break;
1230                                 }
1231                         }
1232                         if (!found) {
1233                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234                                                                   ioa_cfg->max_devs_supported);
1235                                 set_bit(res->target, ioa_cfg->target_ids);
1236                         }
1237                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239                         res->target = 0;
1240                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243                                                           ioa_cfg->max_devs_supported);
1244                         set_bit(res->target, ioa_cfg->array_ids);
1245                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246                         res->bus = IPR_VSET_VIRTUAL_BUS;
1247                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248                                                           ioa_cfg->max_devs_supported);
1249                         set_bit(res->target, ioa_cfg->vset_ids);
1250                 } else {
1251                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252                                                           ioa_cfg->max_devs_supported);
1253                         set_bit(res->target, ioa_cfg->target_ids);
1254                 }
1255         } else {
1256                 proto = cfgtew->u.cfgte->proto;
1257                 res->qmodel = IPR_QUEUEING_MODEL(res);
1258                 res->flags = cfgtew->u.cfgte->flags;
1259                 if (res->flags & IPR_IS_IOA_RESOURCE)
1260                         res->type = IPR_RES_TYPE_IOAFP;
1261                 else
1262                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265                 res->target = cfgtew->u.cfgte->res_addr.target;
1266                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1267                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1268         }
1269
1270         ipr_update_ata_class(res, proto);
1271 }
1272
1273 /**
1274  * ipr_is_same_device - Determine if two devices are the same.
1275  * @res:        resource entry struct
1276  * @cfgtew:     config table entry wrapper struct
1277  *
1278  * Return value:
1279  *      1 if the devices are the same / 0 otherwise
1280  **/
1281 static int ipr_is_same_device(struct ipr_resource_entry *res,
1282                               struct ipr_config_table_entry_wrapper *cfgtew)
1283 {
1284         if (res->ioa_cfg->sis64) {
1285                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1287                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1288                                         sizeof(cfgtew->u.cfgte64->lun))) {
1289                         return 1;
1290                 }
1291         } else {
1292                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293                     res->target == cfgtew->u.cfgte->res_addr.target &&
1294                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1295                         return 1;
1296         }
1297
1298         return 0;
1299 }
1300
1301 /**
1302  * __ipr_format_res_path - Format the resource path for printing.
1303  * @res_path:   resource path
1304  * @buf:        buffer
1305  * @len:        length of buffer provided
1306  *
1307  * Return value:
1308  *      pointer to buffer
1309  **/
1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1311 {
1312         int i;
1313         char *p = buffer;
1314
1315         *p = '\0';
1316         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1319
1320         return buffer;
1321 }
1322
1323 /**
1324  * ipr_format_res_path - Format the resource path for printing.
1325  * @ioa_cfg:    ioa config struct
1326  * @res_path:   resource path
1327  * @buf:        buffer
1328  * @len:        length of buffer provided
1329  *
1330  * Return value:
1331  *      pointer to buffer
1332  **/
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334                                  u8 *res_path, char *buffer, int len)
1335 {
1336         char *p = buffer;
1337
1338         *p = '\0';
1339         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340         __ipr_format_res_path(res_path, p, len - (buffer - p));
1341         return buffer;
1342 }
1343
1344 /**
1345  * ipr_update_res_entry - Update the resource entry.
1346  * @res:        resource entry struct
1347  * @cfgtew:     config table entry wrapper struct
1348  *
1349  * Return value:
1350  *      none
1351  **/
1352 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353                                  struct ipr_config_table_entry_wrapper *cfgtew)
1354 {
1355         char buffer[IPR_MAX_RES_PATH_LENGTH];
1356         unsigned int proto;
1357         int new_path = 0;
1358
1359         if (res->ioa_cfg->sis64) {
1360                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1362                 res->type = cfgtew->u.cfgte64->res_type;
1363
1364                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365                         sizeof(struct ipr_std_inq_data));
1366
1367                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368                 proto = cfgtew->u.cfgte64->proto;
1369                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373                         sizeof(res->dev_lun.scsi_lun));
1374
1375                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376                                         sizeof(res->res_path))) {
1377                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378                                 sizeof(res->res_path));
1379                         new_path = 1;
1380                 }
1381
1382                 if (res->sdev && new_path)
1383                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1384                                     ipr_format_res_path(res->ioa_cfg,
1385                                         res->res_path, buffer, sizeof(buffer)));
1386         } else {
1387                 res->flags = cfgtew->u.cfgte->flags;
1388                 if (res->flags & IPR_IS_IOA_RESOURCE)
1389                         res->type = IPR_RES_TYPE_IOAFP;
1390                 else
1391                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394                         sizeof(struct ipr_std_inq_data));
1395
1396                 res->qmodel = IPR_QUEUEING_MODEL(res);
1397                 proto = cfgtew->u.cfgte->proto;
1398                 res->res_handle = cfgtew->u.cfgte->res_handle;
1399         }
1400
1401         ipr_update_ata_class(res, proto);
1402 }
1403
1404 /**
1405  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406  *                        for the resource.
1407  * @res:        resource entry struct
1408  * @cfgtew:     config table entry wrapper struct
1409  *
1410  * Return value:
1411  *      none
1412  **/
1413 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414 {
1415         struct ipr_resource_entry *gscsi_res = NULL;
1416         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418         if (!ioa_cfg->sis64)
1419                 return;
1420
1421         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422                 clear_bit(res->target, ioa_cfg->array_ids);
1423         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424                 clear_bit(res->target, ioa_cfg->vset_ids);
1425         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428                                 return;
1429                 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431         } else if (res->bus == 0)
1432                 clear_bit(res->target, ioa_cfg->target_ids);
1433 }
1434
1435 /**
1436  * ipr_handle_config_change - Handle a config change from the adapter
1437  * @ioa_cfg:    ioa config struct
1438  * @hostrcb:    hostrcb
1439  *
1440  * Return value:
1441  *      none
1442  **/
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1444                                      struct ipr_hostrcb *hostrcb)
1445 {
1446         struct ipr_resource_entry *res = NULL;
1447         struct ipr_config_table_entry_wrapper cfgtew;
1448         __be32 cc_res_handle;
1449
1450         u32 is_ndn = 1;
1451
1452         if (ioa_cfg->sis64) {
1453                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455         } else {
1456                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458         }
1459
1460         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1461                 if (res->res_handle == cc_res_handle) {
1462                         is_ndn = 0;
1463                         break;
1464                 }
1465         }
1466
1467         if (is_ndn) {
1468                 if (list_empty(&ioa_cfg->free_res_q)) {
1469                         ipr_send_hcam(ioa_cfg,
1470                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471                                       hostrcb);
1472                         return;
1473                 }
1474
1475                 res = list_entry(ioa_cfg->free_res_q.next,
1476                                  struct ipr_resource_entry, queue);
1477
1478                 list_del(&res->queue);
1479                 ipr_init_res_entry(res, &cfgtew);
1480                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481         }
1482
1483         ipr_update_res_entry(res, &cfgtew);
1484
1485         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486                 if (res->sdev) {
1487                         res->del_from_ml = 1;
1488                         res->res_handle = IPR_INVALID_RES_HANDLE;
1489                         schedule_work(&ioa_cfg->work_q);
1490                 } else {
1491                         ipr_clear_res_target(res);
1492                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1493                 }
1494         } else if (!res->sdev || res->del_from_ml) {
1495                 res->add_to_ml = 1;
1496                 schedule_work(&ioa_cfg->work_q);
1497         }
1498
1499         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500 }
1501
1502 /**
1503  * ipr_process_ccn - Op done function for a CCN.
1504  * @ipr_cmd:    ipr command struct
1505  *
1506  * This function is the op done function for a configuration
1507  * change notification host controlled async from the adapter.
1508  *
1509  * Return value:
1510  *      none
1511  **/
1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513 {
1514         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1516         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1517
1518         list_del_init(&hostrcb->queue);
1519         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1520
1521         if (ioasc) {
1522                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1524                         dev_err(&ioa_cfg->pdev->dev,
1525                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528         } else {
1529                 ipr_handle_config_change(ioa_cfg, hostrcb);
1530         }
1531 }
1532
1533 /**
1534  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535  * @i:          index into buffer
1536  * @buf:                string to modify
1537  *
1538  * This function will strip all trailing whitespace, pad the end
1539  * of the string with a single space, and NULL terminate the string.
1540  *
1541  * Return value:
1542  *      new length of string
1543  **/
1544 static int strip_and_pad_whitespace(int i, char *buf)
1545 {
1546         while (i && buf[i] == ' ')
1547                 i--;
1548         buf[i+1] = ' ';
1549         buf[i+2] = '\0';
1550         return i + 2;
1551 }
1552
1553 /**
1554  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555  * @prefix:             string to print at start of printk
1556  * @hostrcb:    hostrcb pointer
1557  * @vpd:                vendor/product id/sn struct
1558  *
1559  * Return value:
1560  *      none
1561  **/
1562 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563                                 struct ipr_vpd *vpd)
1564 {
1565         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1566         int i = 0;
1567
1568         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1570
1571         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1573
1574         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1575         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1576
1577         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1578 }
1579
1580 /**
1581  * ipr_log_vpd - Log the passed VPD to the error log.
1582  * @vpd:                vendor/product id/sn struct
1583  *
1584  * Return value:
1585  *      none
1586  **/
1587 static void ipr_log_vpd(struct ipr_vpd *vpd)
1588 {
1589         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1590                     + IPR_SERIAL_NUM_LEN];
1591
1592         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1593         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1594                IPR_PROD_ID_LEN);
1595         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1596         ipr_err("Vendor/Product ID: %s\n", buffer);
1597
1598         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1599         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1600         ipr_err("    Serial Number: %s\n", buffer);
1601 }
1602
1603 /**
1604  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605  * @prefix:             string to print at start of printk
1606  * @hostrcb:    hostrcb pointer
1607  * @vpd:                vendor/product id/sn/wwn struct
1608  *
1609  * Return value:
1610  *      none
1611  **/
1612 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1613                                     struct ipr_ext_vpd *vpd)
1614 {
1615         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1616         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1617                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1618 }
1619
1620 /**
1621  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622  * @vpd:                vendor/product id/sn/wwn struct
1623  *
1624  * Return value:
1625  *      none
1626  **/
1627 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1628 {
1629         ipr_log_vpd(&vpd->vpd);
1630         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1631                 be32_to_cpu(vpd->wwid[1]));
1632 }
1633
1634 /**
1635  * ipr_log_enhanced_cache_error - Log a cache error.
1636  * @ioa_cfg:    ioa config struct
1637  * @hostrcb:    hostrcb struct
1638  *
1639  * Return value:
1640  *      none
1641  **/
1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1643                                          struct ipr_hostrcb *hostrcb)
1644 {
1645         struct ipr_hostrcb_type_12_error *error;
1646
1647         if (ioa_cfg->sis64)
1648                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1649         else
1650                 error = &hostrcb->hcam.u.error.u.type_12_error;
1651
1652         ipr_err("-----Current Configuration-----\n");
1653         ipr_err("Cache Directory Card Information:\n");
1654         ipr_log_ext_vpd(&error->ioa_vpd);
1655         ipr_err("Adapter Card Information:\n");
1656         ipr_log_ext_vpd(&error->cfc_vpd);
1657
1658         ipr_err("-----Expected Configuration-----\n");
1659         ipr_err("Cache Directory Card Information:\n");
1660         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1661         ipr_err("Adapter Card Information:\n");
1662         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1663
1664         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665                      be32_to_cpu(error->ioa_data[0]),
1666                      be32_to_cpu(error->ioa_data[1]),
1667                      be32_to_cpu(error->ioa_data[2]));
1668 }
1669
1670 /**
1671  * ipr_log_cache_error - Log a cache error.
1672  * @ioa_cfg:    ioa config struct
1673  * @hostrcb:    hostrcb struct
1674  *
1675  * Return value:
1676  *      none
1677  **/
1678 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1679                                 struct ipr_hostrcb *hostrcb)
1680 {
1681         struct ipr_hostrcb_type_02_error *error =
1682                 &hostrcb->hcam.u.error.u.type_02_error;
1683
1684         ipr_err("-----Current Configuration-----\n");
1685         ipr_err("Cache Directory Card Information:\n");
1686         ipr_log_vpd(&error->ioa_vpd);
1687         ipr_err("Adapter Card Information:\n");
1688         ipr_log_vpd(&error->cfc_vpd);
1689
1690         ipr_err("-----Expected Configuration-----\n");
1691         ipr_err("Cache Directory Card Information:\n");
1692         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1693         ipr_err("Adapter Card Information:\n");
1694         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1695
1696         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697                      be32_to_cpu(error->ioa_data[0]),
1698                      be32_to_cpu(error->ioa_data[1]),
1699                      be32_to_cpu(error->ioa_data[2]));
1700 }
1701
1702 /**
1703  * ipr_log_enhanced_config_error - Log a configuration error.
1704  * @ioa_cfg:    ioa config struct
1705  * @hostrcb:    hostrcb struct
1706  *
1707  * Return value:
1708  *      none
1709  **/
1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711                                           struct ipr_hostrcb *hostrcb)
1712 {
1713         int errors_logged, i;
1714         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1715         struct ipr_hostrcb_type_13_error *error;
1716
1717         error = &hostrcb->hcam.u.error.u.type_13_error;
1718         errors_logged = be32_to_cpu(error->errors_logged);
1719
1720         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721                 be32_to_cpu(error->errors_detected), errors_logged);
1722
1723         dev_entry = error->dev;
1724
1725         for (i = 0; i < errors_logged; i++, dev_entry++) {
1726                 ipr_err_separator;
1727
1728                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1729                 ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731                 ipr_err("-----New Device Information-----\n");
1732                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734                 ipr_err("Cache Directory Card Information:\n");
1735                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737                 ipr_err("Adapter Card Information:\n");
1738                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739         }
1740 }
1741
1742 /**
1743  * ipr_log_sis64_config_error - Log a device error.
1744  * @ioa_cfg:    ioa config struct
1745  * @hostrcb:    hostrcb struct
1746  *
1747  * Return value:
1748  *      none
1749  **/
1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751                                        struct ipr_hostrcb *hostrcb)
1752 {
1753         int errors_logged, i;
1754         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1755         struct ipr_hostrcb_type_23_error *error;
1756         char buffer[IPR_MAX_RES_PATH_LENGTH];
1757
1758         error = &hostrcb->hcam.u.error64.u.type_23_error;
1759         errors_logged = be32_to_cpu(error->errors_logged);
1760
1761         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762                 be32_to_cpu(error->errors_detected), errors_logged);
1763
1764         dev_entry = error->dev;
1765
1766         for (i = 0; i < errors_logged; i++, dev_entry++) {
1767                 ipr_err_separator;
1768
1769                 ipr_err("Device %d : %s", i + 1,
1770                         __ipr_format_res_path(dev_entry->res_path,
1771                                               buffer, sizeof(buffer)));
1772                 ipr_log_ext_vpd(&dev_entry->vpd);
1773
1774                 ipr_err("-----New Device Information-----\n");
1775                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1776
1777                 ipr_err("Cache Directory Card Information:\n");
1778                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1779
1780                 ipr_err("Adapter Card Information:\n");
1781                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1782         }
1783 }
1784
1785 /**
1786  * ipr_log_config_error - Log a configuration error.
1787  * @ioa_cfg:    ioa config struct
1788  * @hostrcb:    hostrcb struct
1789  *
1790  * Return value:
1791  *      none
1792  **/
1793 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1794                                  struct ipr_hostrcb *hostrcb)
1795 {
1796         int errors_logged, i;
1797         struct ipr_hostrcb_device_data_entry *dev_entry;
1798         struct ipr_hostrcb_type_03_error *error;
1799
1800         error = &hostrcb->hcam.u.error.u.type_03_error;
1801         errors_logged = be32_to_cpu(error->errors_logged);
1802
1803         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804                 be32_to_cpu(error->errors_detected), errors_logged);
1805
1806         dev_entry = error->dev;
1807
1808         for (i = 0; i < errors_logged; i++, dev_entry++) {
1809                 ipr_err_separator;
1810
1811                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1812                 ipr_log_vpd(&dev_entry->vpd);
1813
1814                 ipr_err("-----New Device Information-----\n");
1815                 ipr_log_vpd(&dev_entry->new_vpd);
1816
1817                 ipr_err("Cache Directory Card Information:\n");
1818                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1819
1820                 ipr_err("Adapter Card Information:\n");
1821                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1822
1823                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824                         be32_to_cpu(dev_entry->ioa_data[0]),
1825                         be32_to_cpu(dev_entry->ioa_data[1]),
1826                         be32_to_cpu(dev_entry->ioa_data[2]),
1827                         be32_to_cpu(dev_entry->ioa_data[3]),
1828                         be32_to_cpu(dev_entry->ioa_data[4]));
1829         }
1830 }
1831
1832 /**
1833  * ipr_log_enhanced_array_error - Log an array configuration error.
1834  * @ioa_cfg:    ioa config struct
1835  * @hostrcb:    hostrcb struct
1836  *
1837  * Return value:
1838  *      none
1839  **/
1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1841                                          struct ipr_hostrcb *hostrcb)
1842 {
1843         int i, num_entries;
1844         struct ipr_hostrcb_type_14_error *error;
1845         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1846         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1847
1848         error = &hostrcb->hcam.u.error.u.type_14_error;
1849
1850         ipr_err_separator;
1851
1852         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853                 error->protection_level,
1854                 ioa_cfg->host->host_no,
1855                 error->last_func_vset_res_addr.bus,
1856                 error->last_func_vset_res_addr.target,
1857                 error->last_func_vset_res_addr.lun);
1858
1859         ipr_err_separator;
1860
1861         array_entry = error->array_member;
1862         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1863                             ARRAY_SIZE(error->array_member));
1864
1865         for (i = 0; i < num_entries; i++, array_entry++) {
1866                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1867                         continue;
1868
1869                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1870                         ipr_err("Exposed Array Member %d:\n", i);
1871                 else
1872                         ipr_err("Array Member %d:\n", i);
1873
1874                 ipr_log_ext_vpd(&array_entry->vpd);
1875                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877                                  "Expected Location");
1878
1879                 ipr_err_separator;
1880         }
1881 }
1882
1883 /**
1884  * ipr_log_array_error - Log an array configuration error.
1885  * @ioa_cfg:    ioa config struct
1886  * @hostrcb:    hostrcb struct
1887  *
1888  * Return value:
1889  *      none
1890  **/
1891 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1892                                 struct ipr_hostrcb *hostrcb)
1893 {
1894         int i;
1895         struct ipr_hostrcb_type_04_error *error;
1896         struct ipr_hostrcb_array_data_entry *array_entry;
1897         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1898
1899         error = &hostrcb->hcam.u.error.u.type_04_error;
1900
1901         ipr_err_separator;
1902
1903         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904                 error->protection_level,
1905                 ioa_cfg->host->host_no,
1906                 error->last_func_vset_res_addr.bus,
1907                 error->last_func_vset_res_addr.target,
1908                 error->last_func_vset_res_addr.lun);
1909
1910         ipr_err_separator;
1911
1912         array_entry = error->array_member;
1913
1914         for (i = 0; i < 18; i++) {
1915                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1916                         continue;
1917
1918                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1919                         ipr_err("Exposed Array Member %d:\n", i);
1920                 else
1921                         ipr_err("Array Member %d:\n", i);
1922
1923                 ipr_log_vpd(&array_entry->vpd);
1924
1925                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1926                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1927                                  "Expected Location");
1928
1929                 ipr_err_separator;
1930
1931                 if (i == 9)
1932                         array_entry = error->array_member2;
1933                 else
1934                         array_entry++;
1935         }
1936 }
1937
1938 /**
1939  * ipr_log_hex_data - Log additional hex IOA error data.
1940  * @ioa_cfg:    ioa config struct
1941  * @data:               IOA error data
1942  * @len:                data length
1943  *
1944  * Return value:
1945  *      none
1946  **/
1947 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1948 {
1949         int i;
1950
1951         if (len == 0)
1952                 return;
1953
1954         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1955                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1956
1957         for (i = 0; i < len / 4; i += 4) {
1958                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1959                         be32_to_cpu(data[i]),
1960                         be32_to_cpu(data[i+1]),
1961                         be32_to_cpu(data[i+2]),
1962                         be32_to_cpu(data[i+3]));
1963         }
1964 }
1965
1966 /**
1967  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968  * @ioa_cfg:    ioa config struct
1969  * @hostrcb:    hostrcb struct
1970  *
1971  * Return value:
1972  *      none
1973  **/
1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1975                                             struct ipr_hostrcb *hostrcb)
1976 {
1977         struct ipr_hostrcb_type_17_error *error;
1978
1979         if (ioa_cfg->sis64)
1980                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1981         else
1982                 error = &hostrcb->hcam.u.error.u.type_17_error;
1983
1984         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1985         strim(error->failure_reason);
1986
1987         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1988                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1989         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1990         ipr_log_hex_data(ioa_cfg, error->data,
1991                          be32_to_cpu(hostrcb->hcam.length) -
1992                          (offsetof(struct ipr_hostrcb_error, u) +
1993                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1994 }
1995
1996 /**
1997  * ipr_log_dual_ioa_error - Log a dual adapter error.
1998  * @ioa_cfg:    ioa config struct
1999  * @hostrcb:    hostrcb struct
2000  *
2001  * Return value:
2002  *      none
2003  **/
2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2005                                    struct ipr_hostrcb *hostrcb)
2006 {
2007         struct ipr_hostrcb_type_07_error *error;
2008
2009         error = &hostrcb->hcam.u.error.u.type_07_error;
2010         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2011         strim(error->failure_reason);
2012
2013         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2014                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2015         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2016         ipr_log_hex_data(ioa_cfg, error->data,
2017                          be32_to_cpu(hostrcb->hcam.length) -
2018                          (offsetof(struct ipr_hostrcb_error, u) +
2019                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2020 }
2021
2022 static const struct {
2023         u8 active;
2024         char *desc;
2025 } path_active_desc[] = {
2026         { IPR_PATH_NO_INFO, "Path" },
2027         { IPR_PATH_ACTIVE, "Active path" },
2028         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2029 };
2030
2031 static const struct {
2032         u8 state;
2033         char *desc;
2034 } path_state_desc[] = {
2035         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2036         { IPR_PATH_HEALTHY, "is healthy" },
2037         { IPR_PATH_DEGRADED, "is degraded" },
2038         { IPR_PATH_FAILED, "is failed" }
2039 };
2040
2041 /**
2042  * ipr_log_fabric_path - Log a fabric path error
2043  * @hostrcb:    hostrcb struct
2044  * @fabric:             fabric descriptor
2045  *
2046  * Return value:
2047  *      none
2048  **/
2049 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2050                                 struct ipr_hostrcb_fabric_desc *fabric)
2051 {
2052         int i, j;
2053         u8 path_state = fabric->path_state;
2054         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2055         u8 state = path_state & IPR_PATH_STATE_MASK;
2056
2057         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058                 if (path_active_desc[i].active != active)
2059                         continue;
2060
2061                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062                         if (path_state_desc[j].state != state)
2063                                 continue;
2064
2065                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2066                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2067                                              path_active_desc[i].desc, path_state_desc[j].desc,
2068                                              fabric->ioa_port);
2069                         } else if (fabric->cascaded_expander == 0xff) {
2070                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2071                                              path_active_desc[i].desc, path_state_desc[j].desc,
2072                                              fabric->ioa_port, fabric->phy);
2073                         } else if (fabric->phy == 0xff) {
2074                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2075                                              path_active_desc[i].desc, path_state_desc[j].desc,
2076                                              fabric->ioa_port, fabric->cascaded_expander);
2077                         } else {
2078                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079                                              path_active_desc[i].desc, path_state_desc[j].desc,
2080                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2081                         }
2082                         return;
2083                 }
2084         }
2085
2086         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2087                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2088 }
2089
2090 /**
2091  * ipr_log64_fabric_path - Log a fabric path error
2092  * @hostrcb:    hostrcb struct
2093  * @fabric:             fabric descriptor
2094  *
2095  * Return value:
2096  *      none
2097  **/
2098 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2099                                   struct ipr_hostrcb64_fabric_desc *fabric)
2100 {
2101         int i, j;
2102         u8 path_state = fabric->path_state;
2103         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2104         u8 state = path_state & IPR_PATH_STATE_MASK;
2105         char buffer[IPR_MAX_RES_PATH_LENGTH];
2106
2107         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2108                 if (path_active_desc[i].active != active)
2109                         continue;
2110
2111                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2112                         if (path_state_desc[j].state != state)
2113                                 continue;
2114
2115                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2116                                      path_active_desc[i].desc, path_state_desc[j].desc,
2117                                      ipr_format_res_path(hostrcb->ioa_cfg,
2118                                                 fabric->res_path,
2119                                                 buffer, sizeof(buffer)));
2120                         return;
2121                 }
2122         }
2123
2124         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2125                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2126                                     buffer, sizeof(buffer)));
2127 }
2128
2129 static const struct {
2130         u8 type;
2131         char *desc;
2132 } path_type_desc[] = {
2133         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2134         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2135         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2136         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2137 };
2138
2139 static const struct {
2140         u8 status;
2141         char *desc;
2142 } path_status_desc[] = {
2143         { IPR_PATH_CFG_NO_PROB, "Functional" },
2144         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2145         { IPR_PATH_CFG_FAILED, "Failed" },
2146         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2147         { IPR_PATH_NOT_DETECTED, "Missing" },
2148         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2149 };
2150
2151 static const char *link_rate[] = {
2152         "unknown",
2153         "disabled",
2154         "phy reset problem",
2155         "spinup hold",
2156         "port selector",
2157         "unknown",
2158         "unknown",
2159         "unknown",
2160         "1.5Gbps",
2161         "3.0Gbps",
2162         "unknown",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown",
2167         "unknown"
2168 };
2169
2170 /**
2171  * ipr_log_path_elem - Log a fabric path element.
2172  * @hostrcb:    hostrcb struct
2173  * @cfg:                fabric path element struct
2174  *
2175  * Return value:
2176  *      none
2177  **/
2178 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2179                               struct ipr_hostrcb_config_element *cfg)
2180 {
2181         int i, j;
2182         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2183         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2184
2185         if (type == IPR_PATH_CFG_NOT_EXIST)
2186                 return;
2187
2188         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2189                 if (path_type_desc[i].type != type)
2190                         continue;
2191
2192                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2193                         if (path_status_desc[j].status != status)
2194                                 continue;
2195
2196                         if (type == IPR_PATH_CFG_IOA_PORT) {
2197                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198                                              path_status_desc[j].desc, path_type_desc[i].desc,
2199                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2200                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2201                         } else {
2202                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2203                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2205                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2206                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2207                                 } else if (cfg->cascaded_expander == 0xff) {
2208                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2209                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2210                                                      path_type_desc[i].desc, cfg->phy,
2211                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2212                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2213                                 } else if (cfg->phy == 0xff) {
2214                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2215                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2216                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2217                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2218                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2219                                 } else {
2220                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2222                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2223                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2224                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2225                                 }
2226                         }
2227                         return;
2228                 }
2229         }
2230
2231         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2233                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2234                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2235 }
2236
2237 /**
2238  * ipr_log64_path_elem - Log a fabric path element.
2239  * @hostrcb:    hostrcb struct
2240  * @cfg:                fabric path element struct
2241  *
2242  * Return value:
2243  *      none
2244  **/
2245 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2246                                 struct ipr_hostrcb64_config_element *cfg)
2247 {
2248         int i, j;
2249         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2250         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2251         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2252         char buffer[IPR_MAX_RES_PATH_LENGTH];
2253
2254         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2255                 return;
2256
2257         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2258                 if (path_type_desc[i].type != type)
2259                         continue;
2260
2261                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2262                         if (path_status_desc[j].status != status)
2263                                 continue;
2264
2265                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266                                      path_status_desc[j].desc, path_type_desc[i].desc,
2267                                      ipr_format_res_path(hostrcb->ioa_cfg,
2268                                         cfg->res_path, buffer, sizeof(buffer)),
2269                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2270                                         be32_to_cpu(cfg->wwid[0]),
2271                                         be32_to_cpu(cfg->wwid[1]));
2272                         return;
2273                 }
2274         }
2275         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276                      "WWN=%08X%08X\n", cfg->type_status,
2277                      ipr_format_res_path(hostrcb->ioa_cfg,
2278                         cfg->res_path, buffer, sizeof(buffer)),
2279                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2280                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2281 }
2282
2283 /**
2284  * ipr_log_fabric_error - Log a fabric error.
2285  * @ioa_cfg:    ioa config struct
2286  * @hostrcb:    hostrcb struct
2287  *
2288  * Return value:
2289  *      none
2290  **/
2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2292                                  struct ipr_hostrcb *hostrcb)
2293 {
2294         struct ipr_hostrcb_type_20_error *error;
2295         struct ipr_hostrcb_fabric_desc *fabric;
2296         struct ipr_hostrcb_config_element *cfg;
2297         int i, add_len;
2298
2299         error = &hostrcb->hcam.u.error.u.type_20_error;
2300         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2301         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2302
2303         add_len = be32_to_cpu(hostrcb->hcam.length) -
2304                 (offsetof(struct ipr_hostrcb_error, u) +
2305                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2306
2307         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2308                 ipr_log_fabric_path(hostrcb, fabric);
2309                 for_each_fabric_cfg(fabric, cfg)
2310                         ipr_log_path_elem(hostrcb, cfg);
2311
2312                 add_len -= be16_to_cpu(fabric->length);
2313                 fabric = (struct ipr_hostrcb_fabric_desc *)
2314                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2315         }
2316
2317         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2318 }
2319
2320 /**
2321  * ipr_log_sis64_array_error - Log a sis64 array error.
2322  * @ioa_cfg:    ioa config struct
2323  * @hostrcb:    hostrcb struct
2324  *
2325  * Return value:
2326  *      none
2327  **/
2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2329                                       struct ipr_hostrcb *hostrcb)
2330 {
2331         int i, num_entries;
2332         struct ipr_hostrcb_type_24_error *error;
2333         struct ipr_hostrcb64_array_data_entry *array_entry;
2334         char buffer[IPR_MAX_RES_PATH_LENGTH];
2335         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2336
2337         error = &hostrcb->hcam.u.error64.u.type_24_error;
2338
2339         ipr_err_separator;
2340
2341         ipr_err("RAID %s Array Configuration: %s\n",
2342                 error->protection_level,
2343                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2344                         buffer, sizeof(buffer)));
2345
2346         ipr_err_separator;
2347
2348         array_entry = error->array_member;
2349         num_entries = min_t(u32, error->num_entries,
2350                             ARRAY_SIZE(error->array_member));
2351
2352         for (i = 0; i < num_entries; i++, array_entry++) {
2353
2354                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2355                         continue;
2356
2357                 if (error->exposed_mode_adn == i)
2358                         ipr_err("Exposed Array Member %d:\n", i);
2359                 else
2360                         ipr_err("Array Member %d:\n", i);
2361
2362                 ipr_err("Array Member %d:\n", i);
2363                 ipr_log_ext_vpd(&array_entry->vpd);
2364                 ipr_err("Current Location: %s\n",
2365                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2366                                 buffer, sizeof(buffer)));
2367                 ipr_err("Expected Location: %s\n",
2368                          ipr_format_res_path(ioa_cfg,
2369                                 array_entry->expected_res_path,
2370                                 buffer, sizeof(buffer)));
2371
2372                 ipr_err_separator;
2373         }
2374 }
2375
2376 /**
2377  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378  * @ioa_cfg:    ioa config struct
2379  * @hostrcb:    hostrcb struct
2380  *
2381  * Return value:
2382  *      none
2383  **/
2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2385                                        struct ipr_hostrcb *hostrcb)
2386 {
2387         struct ipr_hostrcb_type_30_error *error;
2388         struct ipr_hostrcb64_fabric_desc *fabric;
2389         struct ipr_hostrcb64_config_element *cfg;
2390         int i, add_len;
2391
2392         error = &hostrcb->hcam.u.error64.u.type_30_error;
2393
2394         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2395         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2396
2397         add_len = be32_to_cpu(hostrcb->hcam.length) -
2398                 (offsetof(struct ipr_hostrcb64_error, u) +
2399                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2400
2401         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2402                 ipr_log64_fabric_path(hostrcb, fabric);
2403                 for_each_fabric_cfg(fabric, cfg)
2404                         ipr_log64_path_elem(hostrcb, cfg);
2405
2406                 add_len -= be16_to_cpu(fabric->length);
2407                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2408                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2409         }
2410
2411         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2412 }
2413
2414 /**
2415  * ipr_log_generic_error - Log an adapter error.
2416  * @ioa_cfg:    ioa config struct
2417  * @hostrcb:    hostrcb struct
2418  *
2419  * Return value:
2420  *      none
2421  **/
2422 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2423                                   struct ipr_hostrcb *hostrcb)
2424 {
2425         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2426                          be32_to_cpu(hostrcb->hcam.length));
2427 }
2428
2429 /**
2430  * ipr_log_sis64_device_error - Log a cache error.
2431  * @ioa_cfg:    ioa config struct
2432  * @hostrcb:    hostrcb struct
2433  *
2434  * Return value:
2435  *      none
2436  **/
2437 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2438                                          struct ipr_hostrcb *hostrcb)
2439 {
2440         struct ipr_hostrcb_type_21_error *error;
2441         char buffer[IPR_MAX_RES_PATH_LENGTH];
2442
2443         error = &hostrcb->hcam.u.error64.u.type_21_error;
2444
2445         ipr_err("-----Failing Device Information-----\n");
2446         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2447                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2448                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2449         ipr_err("Device Resource Path: %s\n",
2450                 __ipr_format_res_path(error->res_path,
2451                                       buffer, sizeof(buffer)));
2452         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2453         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2454         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2455         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2456         ipr_err("SCSI Sense Data:\n");
2457         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2458         ipr_err("SCSI Command Descriptor Block: \n");
2459         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2460
2461         ipr_err("Additional IOA Data:\n");
2462         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2463 }
2464
2465 /**
2466  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2467  * @ioasc:      IOASC
2468  *
2469  * This function will return the index of into the ipr_error_table
2470  * for the specified IOASC. If the IOASC is not in the table,
2471  * 0 will be returned, which points to the entry used for unknown errors.
2472  *
2473  * Return value:
2474  *      index into the ipr_error_table
2475  **/
2476 static u32 ipr_get_error(u32 ioasc)
2477 {
2478         int i;
2479
2480         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2481                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2482                         return i;
2483
2484         return 0;
2485 }
2486
2487 /**
2488  * ipr_handle_log_data - Log an adapter error.
2489  * @ioa_cfg:    ioa config struct
2490  * @hostrcb:    hostrcb struct
2491  *
2492  * This function logs an adapter error to the system.
2493  *
2494  * Return value:
2495  *      none
2496  **/
2497 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2498                                 struct ipr_hostrcb *hostrcb)
2499 {
2500         u32 ioasc;
2501         int error_index;
2502         struct ipr_hostrcb_type_21_error *error;
2503
2504         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2505                 return;
2506
2507         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2508                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2509
2510         if (ioa_cfg->sis64)
2511                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2512         else
2513                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2514
2515         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2516             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2517                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2518                 scsi_report_bus_reset(ioa_cfg->host,
2519                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2520         }
2521
2522         error_index = ipr_get_error(ioasc);
2523
2524         if (!ipr_error_table[error_index].log_hcam)
2525                 return;
2526
2527         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2528             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2529                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2530
2531                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2532                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2533                                 return;
2534         }
2535
2536         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2537
2538         /* Set indication we have logged an error */
2539         ioa_cfg->errors_logged++;
2540
2541         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2542                 return;
2543         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2544                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2545
2546         switch (hostrcb->hcam.overlay_id) {
2547         case IPR_HOST_RCB_OVERLAY_ID_2:
2548                 ipr_log_cache_error(ioa_cfg, hostrcb);
2549                 break;
2550         case IPR_HOST_RCB_OVERLAY_ID_3:
2551                 ipr_log_config_error(ioa_cfg, hostrcb);
2552                 break;
2553         case IPR_HOST_RCB_OVERLAY_ID_4:
2554         case IPR_HOST_RCB_OVERLAY_ID_6:
2555                 ipr_log_array_error(ioa_cfg, hostrcb);
2556                 break;
2557         case IPR_HOST_RCB_OVERLAY_ID_7:
2558                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2559                 break;
2560         case IPR_HOST_RCB_OVERLAY_ID_12:
2561                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2562                 break;
2563         case IPR_HOST_RCB_OVERLAY_ID_13:
2564                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2565                 break;
2566         case IPR_HOST_RCB_OVERLAY_ID_14:
2567         case IPR_HOST_RCB_OVERLAY_ID_16:
2568                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2569                 break;
2570         case IPR_HOST_RCB_OVERLAY_ID_17:
2571                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2572                 break;
2573         case IPR_HOST_RCB_OVERLAY_ID_20:
2574                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2575                 break;
2576         case IPR_HOST_RCB_OVERLAY_ID_21:
2577                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2578                 break;
2579         case IPR_HOST_RCB_OVERLAY_ID_23:
2580                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2581                 break;
2582         case IPR_HOST_RCB_OVERLAY_ID_24:
2583         case IPR_HOST_RCB_OVERLAY_ID_26:
2584                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2585                 break;
2586         case IPR_HOST_RCB_OVERLAY_ID_30:
2587                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2588                 break;
2589         case IPR_HOST_RCB_OVERLAY_ID_1:
2590         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2591         default:
2592                 ipr_log_generic_error(ioa_cfg, hostrcb);
2593                 break;
2594         }
2595 }
2596
2597 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2598 {
2599         struct ipr_hostrcb *hostrcb;
2600
2601         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2602                                         struct ipr_hostrcb, queue);
2603
2604         if (unlikely(!hostrcb)) {
2605                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2606                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2607                                                 struct ipr_hostrcb, queue);
2608         }
2609
2610         list_del_init(&hostrcb->queue);
2611         return hostrcb;
2612 }
2613
2614 /**
2615  * ipr_process_error - Op done function for an adapter error log.
2616  * @ipr_cmd:    ipr command struct
2617  *
2618  * This function is the op done function for an error log host
2619  * controlled async from the adapter. It will log the error and
2620  * send the HCAM back to the adapter.
2621  *
2622  * Return value:
2623  *      none
2624  **/
2625 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2626 {
2627         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2628         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2629         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2630         u32 fd_ioasc;
2631
2632         if (ioa_cfg->sis64)
2633                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2634         else
2635                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2636
2637         list_del_init(&hostrcb->queue);
2638         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2639
2640         if (!ioasc) {
2641                 ipr_handle_log_data(ioa_cfg, hostrcb);
2642                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2643                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2644         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2645                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2646                 dev_err(&ioa_cfg->pdev->dev,
2647                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2648         }
2649
2650         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2651         schedule_work(&ioa_cfg->work_q);
2652         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2653
2654         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2655 }
2656
2657 /**
2658  * ipr_timeout -  An internally generated op has timed out.
2659  * @ipr_cmd:    ipr command struct
2660  *
2661  * This function blocks host requests and initiates an
2662  * adapter reset.
2663  *
2664  * Return value:
2665  *      none
2666  **/
2667 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2668 {
2669         unsigned long lock_flags = 0;
2670         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2671
2672         ENTER;
2673         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2674
2675         ioa_cfg->errors_logged++;
2676         dev_err(&ioa_cfg->pdev->dev,
2677                 "Adapter being reset due to command timeout.\n");
2678
2679         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2680                 ioa_cfg->sdt_state = GET_DUMP;
2681
2682         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2683                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2684
2685         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2686         LEAVE;
2687 }
2688
2689 /**
2690  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2691  * @ipr_cmd:    ipr command struct
2692  *
2693  * This function blocks host requests and initiates an
2694  * adapter reset.
2695  *
2696  * Return value:
2697  *      none
2698  **/
2699 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2700 {
2701         unsigned long lock_flags = 0;
2702         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2703
2704         ENTER;
2705         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2706
2707         ioa_cfg->errors_logged++;
2708         dev_err(&ioa_cfg->pdev->dev,
2709                 "Adapter timed out transitioning to operational.\n");
2710
2711         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2712                 ioa_cfg->sdt_state = GET_DUMP;
2713
2714         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2715                 if (ipr_fastfail)
2716                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2717                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2718         }
2719
2720         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721         LEAVE;
2722 }
2723
2724 /**
2725  * ipr_find_ses_entry - Find matching SES in SES table
2726  * @res:        resource entry struct of SES
2727  *
2728  * Return value:
2729  *      pointer to SES table entry / NULL on failure
2730  **/
2731 static const struct ipr_ses_table_entry *
2732 ipr_find_ses_entry(struct ipr_resource_entry *res)
2733 {
2734         int i, j, matches;
2735         struct ipr_std_inq_vpids *vpids;
2736         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2737
2738         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2739                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2740                         if (ste->compare_product_id_byte[j] == 'X') {
2741                                 vpids = &res->std_inq_data.vpids;
2742                                 if (vpids->product_id[j] == ste->product_id[j])
2743                                         matches++;
2744                                 else
2745                                         break;
2746                         } else
2747                                 matches++;
2748                 }
2749
2750                 if (matches == IPR_PROD_ID_LEN)
2751                         return ste;
2752         }
2753
2754         return NULL;
2755 }
2756
2757 /**
2758  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2759  * @ioa_cfg:    ioa config struct
2760  * @bus:                SCSI bus
2761  * @bus_width:  bus width
2762  *
2763  * Return value:
2764  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2765  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2766  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2767  *      max 160MHz = max 320MB/sec).
2768  **/
2769 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2770 {
2771         struct ipr_resource_entry *res;
2772         const struct ipr_ses_table_entry *ste;
2773         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2774
2775         /* Loop through each config table entry in the config table buffer */
2776         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2777                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2778                         continue;
2779
2780                 if (bus != res->bus)
2781                         continue;
2782
2783                 if (!(ste = ipr_find_ses_entry(res)))
2784                         continue;
2785
2786                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2787         }
2788
2789         return max_xfer_rate;
2790 }
2791
2792 /**
2793  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2794  * @ioa_cfg:            ioa config struct
2795  * @max_delay:          max delay in micro-seconds to wait
2796  *
2797  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2798  *
2799  * Return value:
2800  *      0 on success / other on failure
2801  **/
2802 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2803 {
2804         volatile u32 pcii_reg;
2805         int delay = 1;
2806
2807         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2808         while (delay < max_delay) {
2809                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2810
2811                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2812                         return 0;
2813
2814                 /* udelay cannot be used if delay is more than a few milliseconds */
2815                 if ((delay / 1000) > MAX_UDELAY_MS)
2816                         mdelay(delay / 1000);
2817                 else
2818                         udelay(delay);
2819
2820                 delay += delay;
2821         }
2822         return -EIO;
2823 }
2824
2825 /**
2826  * ipr_get_sis64_dump_data_section - Dump IOA memory
2827  * @ioa_cfg:                    ioa config struct
2828  * @start_addr:                 adapter address to dump
2829  * @dest:                       destination kernel buffer
2830  * @length_in_words:            length to dump in 4 byte words
2831  *
2832  * Return value:
2833  *      0 on success
2834  **/
2835 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2836                                            u32 start_addr,
2837                                            __be32 *dest, u32 length_in_words)
2838 {
2839         int i;
2840
2841         for (i = 0; i < length_in_words; i++) {
2842                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2843                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2844                 dest++;
2845         }
2846
2847         return 0;
2848 }
2849
2850 /**
2851  * ipr_get_ldump_data_section - Dump IOA memory
2852  * @ioa_cfg:                    ioa config struct
2853  * @start_addr:                 adapter address to dump
2854  * @dest:                               destination kernel buffer
2855  * @length_in_words:    length to dump in 4 byte words
2856  *
2857  * Return value:
2858  *      0 on success / -EIO on failure
2859  **/
2860 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2861                                       u32 start_addr,
2862                                       __be32 *dest, u32 length_in_words)
2863 {
2864         volatile u32 temp_pcii_reg;
2865         int i, delay = 0;
2866
2867         if (ioa_cfg->sis64)
2868                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2869                                                        dest, length_in_words);
2870
2871         /* Write IOA interrupt reg starting LDUMP state  */
2872         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2873                ioa_cfg->regs.set_uproc_interrupt_reg32);
2874
2875         /* Wait for IO debug acknowledge */
2876         if (ipr_wait_iodbg_ack(ioa_cfg,
2877                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2878                 dev_err(&ioa_cfg->pdev->dev,
2879                         "IOA dump long data transfer timeout\n");
2880                 return -EIO;
2881         }
2882
2883         /* Signal LDUMP interlocked - clear IO debug ack */
2884         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2885                ioa_cfg->regs.clr_interrupt_reg);
2886
2887         /* Write Mailbox with starting address */
2888         writel(start_addr, ioa_cfg->ioa_mailbox);
2889
2890         /* Signal address valid - clear IOA Reset alert */
2891         writel(IPR_UPROCI_RESET_ALERT,
2892                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2893
2894         for (i = 0; i < length_in_words; i++) {
2895                 /* Wait for IO debug acknowledge */
2896                 if (ipr_wait_iodbg_ack(ioa_cfg,
2897                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2898                         dev_err(&ioa_cfg->pdev->dev,
2899                                 "IOA dump short data transfer timeout\n");
2900                         return -EIO;
2901                 }
2902
2903                 /* Read data from mailbox and increment destination pointer */
2904                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2905                 dest++;
2906
2907                 /* For all but the last word of data, signal data received */
2908                 if (i < (length_in_words - 1)) {
2909                         /* Signal dump data received - Clear IO debug Ack */
2910                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2911                                ioa_cfg->regs.clr_interrupt_reg);
2912                 }
2913         }
2914
2915         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2916         writel(IPR_UPROCI_RESET_ALERT,
2917                ioa_cfg->regs.set_uproc_interrupt_reg32);
2918
2919         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2920                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2921
2922         /* Signal dump data received - Clear IO debug Ack */
2923         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2924                ioa_cfg->regs.clr_interrupt_reg);
2925
2926         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2927         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2928                 temp_pcii_reg =
2929                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2930
2931                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2932                         return 0;
2933
2934                 udelay(10);
2935                 delay += 10;
2936         }
2937
2938         return 0;
2939 }
2940
2941 #ifdef CONFIG_SCSI_IPR_DUMP
2942 /**
2943  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2944  * @ioa_cfg:            ioa config struct
2945  * @pci_address:        adapter address
2946  * @length:                     length of data to copy
2947  *
2948  * Copy data from PCI adapter to kernel buffer.
2949  * Note: length MUST be a 4 byte multiple
2950  * Return value:
2951  *      0 on success / other on failure
2952  **/
2953 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2954                         unsigned long pci_address, u32 length)
2955 {
2956         int bytes_copied = 0;
2957         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2958         __be32 *page;
2959         unsigned long lock_flags = 0;
2960         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2961
2962         if (ioa_cfg->sis64)
2963                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2964         else
2965                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2966
2967         while (bytes_copied < length &&
2968                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2969                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2970                     ioa_dump->page_offset == 0) {
2971                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2972
2973                         if (!page) {
2974                                 ipr_trace;
2975                                 return bytes_copied;
2976                         }
2977
2978                         ioa_dump->page_offset = 0;
2979                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2980                         ioa_dump->next_page_index++;
2981                 } else
2982                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2983
2984                 rem_len = length - bytes_copied;
2985                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2986                 cur_len = min(rem_len, rem_page_len);
2987
2988                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2989                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2990                         rc = -EIO;
2991                 } else {
2992                         rc = ipr_get_ldump_data_section(ioa_cfg,
2993                                                         pci_address + bytes_copied,
2994                                                         &page[ioa_dump->page_offset / 4],
2995                                                         (cur_len / sizeof(u32)));
2996                 }
2997                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2998
2999                 if (!rc) {
3000                         ioa_dump->page_offset += cur_len;
3001                         bytes_copied += cur_len;
3002                 } else {
3003                         ipr_trace;
3004                         break;
3005                 }
3006                 schedule();
3007         }
3008
3009         return bytes_copied;
3010 }
3011
3012 /**
3013  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3014  * @hdr:        dump entry header struct
3015  *
3016  * Return value:
3017  *      nothing
3018  **/
3019 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3020 {
3021         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3022         hdr->num_elems = 1;
3023         hdr->offset = sizeof(*hdr);
3024         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3025 }
3026
3027 /**
3028  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3029  * @ioa_cfg:    ioa config struct
3030  * @driver_dump:        driver dump struct
3031  *
3032  * Return value:
3033  *      nothing
3034  **/
3035 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3036                                    struct ipr_driver_dump *driver_dump)
3037 {
3038         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3039
3040         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3041         driver_dump->ioa_type_entry.hdr.len =
3042                 sizeof(struct ipr_dump_ioa_type_entry) -
3043                 sizeof(struct ipr_dump_entry_header);
3044         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3045         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3046         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3047         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3048                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3049                 ucode_vpd->minor_release[1];
3050         driver_dump->hdr.num_entries++;
3051 }
3052
3053 /**
3054  * ipr_dump_version_data - Fill in the driver version in the dump.
3055  * @ioa_cfg:    ioa config struct
3056  * @driver_dump:        driver dump struct
3057  *
3058  * Return value:
3059  *      nothing
3060  **/
3061 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3062                                   struct ipr_driver_dump *driver_dump)
3063 {
3064         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3065         driver_dump->version_entry.hdr.len =
3066                 sizeof(struct ipr_dump_version_entry) -
3067                 sizeof(struct ipr_dump_entry_header);
3068         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3069         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3070         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3071         driver_dump->hdr.num_entries++;
3072 }
3073
3074 /**
3075  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3076  * @ioa_cfg:    ioa config struct
3077  * @driver_dump:        driver dump struct
3078  *
3079  * Return value:
3080  *      nothing
3081  **/
3082 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3083                                    struct ipr_driver_dump *driver_dump)
3084 {
3085         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3086         driver_dump->trace_entry.hdr.len =
3087                 sizeof(struct ipr_dump_trace_entry) -
3088                 sizeof(struct ipr_dump_entry_header);
3089         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3090         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3091         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3092         driver_dump->hdr.num_entries++;
3093 }
3094
3095 /**
3096  * ipr_dump_location_data - Fill in the IOA location in the dump.
3097  * @ioa_cfg:    ioa config struct
3098  * @driver_dump:        driver dump struct
3099  *
3100  * Return value:
3101  *      nothing
3102  **/
3103 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3104                                    struct ipr_driver_dump *driver_dump)
3105 {
3106         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3107         driver_dump->location_entry.hdr.len =
3108                 sizeof(struct ipr_dump_location_entry) -
3109                 sizeof(struct ipr_dump_entry_header);
3110         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3111         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3112         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3113         driver_dump->hdr.num_entries++;
3114 }
3115
3116 /**
3117  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3118  * @ioa_cfg:    ioa config struct
3119  * @dump:               dump struct
3120  *
3121  * Return value:
3122  *      nothing
3123  **/
3124 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3125 {
3126         unsigned long start_addr, sdt_word;
3127         unsigned long lock_flags = 0;
3128         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3129         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3130         u32 num_entries, max_num_entries, start_off, end_off;
3131         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3132         struct ipr_sdt *sdt;
3133         int valid = 1;
3134         int i;
3135
3136         ENTER;
3137
3138         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3139
3140         if (ioa_cfg->sdt_state != READ_DUMP) {
3141                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3142                 return;
3143         }
3144
3145         if (ioa_cfg->sis64) {
3146                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3147                 ssleep(IPR_DUMP_DELAY_SECONDS);
3148                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3149         }
3150
3151         start_addr = readl(ioa_cfg->ioa_mailbox);
3152
3153         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3154                 dev_err(&ioa_cfg->pdev->dev,
3155                         "Invalid dump table format: %lx\n", start_addr);
3156                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3157                 return;
3158         }
3159
3160         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3161
3162         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3163
3164         /* Initialize the overall dump header */
3165         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3166         driver_dump->hdr.num_entries = 1;
3167         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3168         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3169         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3170         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3171
3172         ipr_dump_version_data(ioa_cfg, driver_dump);
3173         ipr_dump_location_data(ioa_cfg, driver_dump);
3174         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3175         ipr_dump_trace_data(ioa_cfg, driver_dump);
3176
3177         /* Update dump_header */
3178         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3179
3180         /* IOA Dump entry */
3181         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3182         ioa_dump->hdr.len = 0;
3183         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3184         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3185
3186         /* First entries in sdt are actually a list of dump addresses and
3187          lengths to gather the real dump data.  sdt represents the pointer
3188          to the ioa generated dump table.  Dump data will be extracted based
3189          on entries in this table */
3190         sdt = &ioa_dump->sdt;
3191
3192         if (ioa_cfg->sis64) {
3193                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3194                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3195         } else {
3196                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3197                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3198         }
3199
3200         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3201                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3202         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3203                                         bytes_to_copy / sizeof(__be32));
3204
3205         /* Smart Dump table is ready to use and the first entry is valid */
3206         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3207             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3208                 dev_err(&ioa_cfg->pdev->dev,
3209                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3210                         rc, be32_to_cpu(sdt->hdr.state));
3211                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3212                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3213                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3214                 return;
3215         }
3216
3217         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3218
3219         if (num_entries > max_num_entries)
3220                 num_entries = max_num_entries;
3221
3222         /* Update dump length to the actual data to be copied */
3223         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3224         if (ioa_cfg->sis64)
3225                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3226         else
3227                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3228
3229         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230
3231         for (i = 0; i < num_entries; i++) {
3232                 if (ioa_dump->hdr.len > max_dump_size) {
3233                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3234                         break;
3235                 }
3236
3237                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3238                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3239                         if (ioa_cfg->sis64)
3240                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3241                         else {
3242                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3243                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3244
3245                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3246                                         bytes_to_copy = end_off - start_off;
3247                                 else
3248                                         valid = 0;
3249                         }
3250                         if (valid) {
3251                                 if (bytes_to_copy > max_dump_size) {
3252                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3253                                         continue;
3254                                 }
3255
3256                                 /* Copy data from adapter to driver buffers */
3257                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3258                                                             bytes_to_copy);
3259
3260                                 ioa_dump->hdr.len += bytes_copied;
3261
3262                                 if (bytes_copied != bytes_to_copy) {
3263                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3264                                         break;
3265                                 }
3266                         }
3267                 }
3268         }
3269
3270         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3271
3272         /* Update dump_header */
3273         driver_dump->hdr.len += ioa_dump->hdr.len;
3274         wmb();
3275         ioa_cfg->sdt_state = DUMP_OBTAINED;
3276         LEAVE;
3277 }
3278
3279 #else
3280 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3281 #endif
3282
3283 /**
3284  * ipr_release_dump - Free adapter dump memory
3285  * @kref:       kref struct
3286  *
3287  * Return value:
3288  *      nothing
3289  **/
3290 static void ipr_release_dump(struct kref *kref)
3291 {
3292         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3293         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3294         unsigned long lock_flags = 0;
3295         int i;
3296
3297         ENTER;
3298         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3299         ioa_cfg->dump = NULL;
3300         ioa_cfg->sdt_state = INACTIVE;
3301         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3302
3303         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3304                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3305
3306         vfree(dump->ioa_dump.ioa_data);
3307         kfree(dump);
3308         LEAVE;
3309 }
3310
3311 static void ipr_add_remove_thread(struct work_struct *work)
3312 {
3313         unsigned long lock_flags;
3314         struct ipr_resource_entry *res;
3315         struct scsi_device *sdev;
3316         struct ipr_ioa_cfg *ioa_cfg =
3317                 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3318         u8 bus, target, lun;
3319         int did_work;
3320
3321         ENTER;
3322         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3323
3324 restart:
3325         do {
3326                 did_work = 0;
3327                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3328                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329                         return;
3330                 }
3331
3332                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3333                         if (res->del_from_ml && res->sdev) {
3334                                 did_work = 1;
3335                                 sdev = res->sdev;
3336                                 if (!scsi_device_get(sdev)) {
3337                                         if (!res->add_to_ml)
3338                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3339                                         else
3340                                                 res->del_from_ml = 0;
3341                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3342                                         scsi_remove_device(sdev);
3343                                         scsi_device_put(sdev);
3344                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3345                                 }
3346                                 break;
3347                         }
3348                 }
3349         } while (did_work);
3350
3351         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3352                 if (res->add_to_ml) {
3353                         bus = res->bus;
3354                         target = res->target;
3355                         lun = res->lun;
3356                         res->add_to_ml = 0;
3357                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3358                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3359                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3360                         goto restart;
3361                 }
3362         }
3363
3364         ioa_cfg->scan_done = 1;
3365         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3366         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3367         LEAVE;
3368 }
3369
3370 /**
3371  * ipr_worker_thread - Worker thread
3372  * @work:               ioa config struct
3373  *
3374  * Called at task level from a work thread. This function takes care
3375  * of adding and removing device from the mid-layer as configuration
3376  * changes are detected by the adapter.
3377  *
3378  * Return value:
3379  *      nothing
3380  **/
3381 static void ipr_worker_thread(struct work_struct *work)
3382 {
3383         unsigned long lock_flags;
3384         struct ipr_dump *dump;
3385         struct ipr_ioa_cfg *ioa_cfg =
3386                 container_of(work, struct ipr_ioa_cfg, work_q);
3387
3388         ENTER;
3389         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3390
3391         if (ioa_cfg->sdt_state == READ_DUMP) {
3392                 dump = ioa_cfg->dump;
3393                 if (!dump) {
3394                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3395                         return;
3396                 }
3397                 kref_get(&dump->kref);
3398                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3399                 ipr_get_ioa_dump(ioa_cfg, dump);
3400                 kref_put(&dump->kref, ipr_release_dump);
3401
3402                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3403                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3404                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3405                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3406                 return;
3407         }
3408
3409         if (ioa_cfg->scsi_unblock) {
3410                 ioa_cfg->scsi_unblock = 0;
3411                 ioa_cfg->scsi_blocked = 0;
3412                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3413                 scsi_unblock_requests(ioa_cfg->host);
3414                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3415                 if (ioa_cfg->scsi_blocked)
3416                         scsi_block_requests(ioa_cfg->host);
3417         }
3418
3419         if (!ioa_cfg->scan_enabled) {
3420                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3421                 return;
3422         }
3423
3424         schedule_work(&ioa_cfg->scsi_add_work_q);
3425
3426         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3427         LEAVE;
3428 }
3429
3430 #ifdef CONFIG_SCSI_IPR_TRACE
3431 /**
3432  * ipr_read_trace - Dump the adapter trace
3433  * @filp:               open sysfs file
3434  * @kobj:               kobject struct
3435  * @bin_attr:           bin_attribute struct
3436  * @buf:                buffer
3437  * @off:                offset
3438  * @count:              buffer size
3439  *
3440  * Return value:
3441  *      number of bytes printed to buffer
3442  **/
3443 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3444                               struct bin_attribute *bin_attr,
3445                               char *buf, loff_t off, size_t count)
3446 {
3447         struct device *dev = container_of(kobj, struct device, kobj);
3448         struct Scsi_Host *shost = class_to_shost(dev);
3449         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3450         unsigned long lock_flags = 0;
3451         ssize_t ret;
3452
3453         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3454         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3455                                 IPR_TRACE_SIZE);
3456         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3457
3458         return ret;
3459 }
3460
3461 static struct bin_attribute ipr_trace_attr = {
3462         .attr = {
3463                 .name = "trace",
3464                 .mode = S_IRUGO,
3465         },
3466         .size = 0,
3467         .read = ipr_read_trace,
3468 };
3469 #endif
3470
3471 /**
3472  * ipr_show_fw_version - Show the firmware version
3473  * @dev:        class device struct
3474  * @buf:        buffer
3475  *
3476  * Return value:
3477  *      number of bytes printed to buffer
3478  **/
3479 static ssize_t ipr_show_fw_version(struct device *dev,
3480                                    struct device_attribute *attr, char *buf)
3481 {
3482         struct Scsi_Host *shost = class_to_shost(dev);
3483         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3484         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3485         unsigned long lock_flags = 0;
3486         int len;
3487
3488         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3489         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3490                        ucode_vpd->major_release, ucode_vpd->card_type,
3491                        ucode_vpd->minor_release[0],
3492                        ucode_vpd->minor_release[1]);
3493         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3494         return len;
3495 }
3496
3497 static struct device_attribute ipr_fw_version_attr = {
3498         .attr = {
3499                 .name =         "fw_version",
3500                 .mode =         S_IRUGO,
3501         },
3502         .show = ipr_show_fw_version,
3503 };
3504
3505 /**
3506  * ipr_show_log_level - Show the adapter's error logging level
3507  * @dev:        class device struct
3508  * @buf:        buffer
3509  *
3510  * Return value:
3511  *      number of bytes printed to buffer
3512  **/
3513 static ssize_t ipr_show_log_level(struct device *dev,
3514                                    struct device_attribute *attr, char *buf)
3515 {
3516         struct Scsi_Host *shost = class_to_shost(dev);
3517         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3518         unsigned long lock_flags = 0;
3519         int len;
3520
3521         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3522         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3523         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3524         return len;
3525 }
3526
3527 /**
3528  * ipr_store_log_level - Change the adapter's error logging level
3529  * @dev:        class device struct
3530  * @buf:        buffer
3531  *
3532  * Return value:
3533  *      number of bytes printed to buffer
3534  **/
3535 static ssize_t ipr_store_log_level(struct device *dev,
3536                                    struct device_attribute *attr,
3537                                    const char *buf, size_t count)
3538 {
3539         struct Scsi_Host *shost = class_to_shost(dev);
3540         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3541         unsigned long lock_flags = 0;
3542
3543         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3544         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3545         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3546         return strlen(buf);
3547 }
3548
3549 static struct device_attribute ipr_log_level_attr = {
3550         .attr = {
3551                 .name =         "log_level",
3552                 .mode =         S_IRUGO | S_IWUSR,
3553         },
3554         .show = ipr_show_log_level,
3555         .store = ipr_store_log_level
3556 };
3557
3558 /**
3559  * ipr_store_diagnostics - IOA Diagnostics interface
3560  * @dev:        device struct
3561  * @buf:        buffer
3562  * @count:      buffer size
3563  *
3564  * This function will reset the adapter and wait a reasonable
3565  * amount of time for any errors that the adapter might log.
3566  *
3567  * Return value:
3568  *      count on success / other on failure
3569  **/
3570 static ssize_t ipr_store_diagnostics(struct device *dev,
3571                                      struct device_attribute *attr,
3572                                      const char *buf, size_t count)
3573 {
3574         struct Scsi_Host *shost = class_to_shost(dev);
3575         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3576         unsigned long lock_flags = 0;
3577         int rc = count;
3578
3579         if (!capable(CAP_SYS_ADMIN))
3580                 return -EACCES;
3581
3582         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3583         while (ioa_cfg->in_reset_reload) {
3584                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3585                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3586                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3587         }
3588
3589         ioa_cfg->errors_logged = 0;
3590         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3591
3592         if (ioa_cfg->in_reset_reload) {
3593                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3594                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3595
3596                 /* Wait for a second for any errors to be logged */
3597                 msleep(1000);
3598         } else {
3599                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3600                 return -EIO;
3601         }
3602
3603         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3604         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3605                 rc = -EIO;
3606         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3607
3608         return rc;
3609 }
3610
3611 static struct device_attribute ipr_diagnostics_attr = {
3612         .attr = {
3613                 .name =         "run_diagnostics",
3614                 .mode =         S_IWUSR,
3615         },
3616         .store = ipr_store_diagnostics
3617 };
3618
3619 /**
3620  * ipr_show_adapter_state - Show the adapter's state
3621  * @class_dev:  device struct
3622  * @buf:        buffer
3623  *
3624  * Return value:
3625  *      number of bytes printed to buffer
3626  **/
3627 static ssize_t ipr_show_adapter_state(struct device *dev,
3628                                       struct device_attribute *attr, char *buf)
3629 {
3630         struct Scsi_Host *shost = class_to_shost(dev);
3631         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3632         unsigned long lock_flags = 0;
3633         int len;
3634
3635         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3636         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3637                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3638         else
3639                 len = snprintf(buf, PAGE_SIZE, "online\n");
3640         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3641         return len;
3642 }
3643
3644 /**
3645  * ipr_store_adapter_state - Change adapter state
3646  * @dev:        device struct
3647  * @buf:        buffer
3648  * @count:      buffer size
3649  *
3650  * This function will change the adapter's state.
3651  *
3652  * Return value:
3653  *      count on success / other on failure
3654  **/
3655 static ssize_t ipr_store_adapter_state(struct device *dev,
3656                                        struct device_attribute *attr,
3657                                        const char *buf, size_t count)
3658 {
3659         struct Scsi_Host *shost = class_to_shost(dev);
3660         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3661         unsigned long lock_flags;
3662         int result = count, i;
3663
3664         if (!capable(CAP_SYS_ADMIN))
3665                 return -EACCES;
3666
3667         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3668         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3669             !strncmp(buf, "online", 6)) {
3670                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3671                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3672                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3673                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3674                 }
3675                 wmb();
3676                 ioa_cfg->reset_retries = 0;
3677                 ioa_cfg->in_ioa_bringdown = 0;
3678                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3679         }
3680         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3681         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3682
3683         return result;
3684 }
3685
3686 static struct device_attribute ipr_ioa_state_attr = {
3687         .attr = {
3688                 .name =         "online_state",
3689                 .mode =         S_IRUGO | S_IWUSR,
3690         },
3691         .show = ipr_show_adapter_state,
3692         .store = ipr_store_adapter_state
3693 };
3694
3695 /**
3696  * ipr_store_reset_adapter - Reset the adapter
3697  * @dev:        device struct
3698  * @buf:        buffer
3699  * @count:      buffer size
3700  *
3701  * This function will reset the adapter.
3702  *
3703  * Return value:
3704  *      count on success / other on failure
3705  **/
3706 static ssize_t ipr_store_reset_adapter(struct device *dev,
3707                                        struct device_attribute *attr,
3708                                        const char *buf, size_t count)
3709 {
3710         struct Scsi_Host *shost = class_to_shost(dev);
3711         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3712         unsigned long lock_flags;
3713         int result = count;
3714
3715         if (!capable(CAP_SYS_ADMIN))
3716                 return -EACCES;
3717
3718         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3719         if (!ioa_cfg->in_reset_reload)
3720                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3721         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3722         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3723
3724         return result;
3725 }
3726
3727 static struct device_attribute ipr_ioa_reset_attr = {
3728         .attr = {
3729                 .name =         "reset_host",
3730                 .mode =         S_IWUSR,
3731         },
3732         .store = ipr_store_reset_adapter
3733 };
3734
3735 static int ipr_iopoll(struct irq_poll *iop, int budget);
3736  /**
3737  * ipr_show_iopoll_weight - Show ipr polling mode
3738  * @dev:        class device struct
3739  * @buf:        buffer
3740  *
3741  * Return value:
3742  *      number of bytes printed to buffer
3743  **/
3744 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3745                                    struct device_attribute *attr, char *buf)
3746 {
3747         struct Scsi_Host *shost = class_to_shost(dev);
3748         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3749         unsigned long lock_flags = 0;
3750         int len;
3751
3752         spin_lock_irqsave(shost->host_lock, lock_flags);
3753         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3754         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3755
3756         return len;
3757 }
3758
3759 /**
3760  * ipr_store_iopoll_weight - Change the adapter's polling mode
3761  * @dev:        class device struct
3762  * @buf:        buffer
3763  *
3764  * Return value:
3765  *      number of bytes printed to buffer
3766  **/
3767 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3768                                         struct device_attribute *attr,
3769                                         const char *buf, size_t count)
3770 {
3771         struct Scsi_Host *shost = class_to_shost(dev);
3772         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3773         unsigned long user_iopoll_weight;
3774         unsigned long lock_flags = 0;
3775         int i;
3776
3777         if (!ioa_cfg->sis64) {
3778                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3779                 return -EINVAL;
3780         }
3781         if (kstrtoul(buf, 10, &user_iopoll_weight))
3782                 return -EINVAL;
3783
3784         if (user_iopoll_weight > 256) {
3785                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3786                 return -EINVAL;
3787         }
3788
3789         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3790                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3791                 return strlen(buf);
3792         }
3793
3794         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3795                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3796                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3797         }
3798
3799         spin_lock_irqsave(shost->host_lock, lock_flags);
3800         ioa_cfg->iopoll_weight = user_iopoll_weight;
3801         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3802                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3803                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3804                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3805                 }
3806         }
3807         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3808
3809         return strlen(buf);
3810 }
3811
3812 static struct device_attribute ipr_iopoll_weight_attr = {
3813         .attr = {
3814                 .name =         "iopoll_weight",
3815                 .mode =         S_IRUGO | S_IWUSR,
3816         },
3817         .show = ipr_show_iopoll_weight,
3818         .store = ipr_store_iopoll_weight
3819 };
3820
3821 /**
3822  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3823  * @buf_len:            buffer length
3824  *
3825  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3826  * list to use for microcode download
3827  *
3828  * Return value:
3829  *      pointer to sglist / NULL on failure
3830  **/
3831 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3832 {
3833         int sg_size, order, bsize_elem, num_elem, i, j;
3834         struct ipr_sglist *sglist;
3835         struct scatterlist *scatterlist;
3836         struct page *page;
3837
3838         /* Get the minimum size per scatter/gather element */
3839         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3840
3841         /* Get the actual size per element */
3842         order = get_order(sg_size);
3843
3844         /* Determine the actual number of bytes per element */
3845         bsize_elem = PAGE_SIZE * (1 << order);
3846
3847         /* Determine the actual number of sg entries needed */
3848         if (buf_len % bsize_elem)
3849                 num_elem = (buf_len / bsize_elem) + 1;
3850         else
3851                 num_elem = buf_len / bsize_elem;
3852
3853         /* Allocate a scatter/gather list for the DMA */
3854         sglist = kzalloc(sizeof(struct ipr_sglist) +
3855                          (sizeof(struct scatterlist) * (num_elem - 1)),
3856                          GFP_KERNEL);
3857
3858         if (sglist == NULL) {
3859                 ipr_trace;
3860                 return NULL;
3861         }
3862
3863         scatterlist = sglist->scatterlist;
3864         sg_init_table(scatterlist, num_elem);
3865
3866         sglist->order = order;
3867         sglist->num_sg = num_elem;
3868
3869         /* Allocate a bunch of sg elements */
3870         for (i = 0; i < num_elem; i++) {
3871                 page = alloc_pages(GFP_KERNEL, order);
3872                 if (!page) {
3873                         ipr_trace;
3874
3875                         /* Free up what we already allocated */
3876                         for (j = i - 1; j >= 0; j--)
3877                                 __free_pages(sg_page(&scatterlist[j]), order);
3878                         kfree(sglist);
3879                         return NULL;
3880                 }
3881
3882                 sg_set_page(&scatterlist[i], page, 0, 0);
3883         }
3884
3885         return sglist;
3886 }
3887
3888 /**
3889  * ipr_free_ucode_buffer - Frees a microcode download buffer
3890  * @p_dnld:             scatter/gather list pointer
3891  *
3892  * Free a DMA'able ucode download buffer previously allocated with
3893  * ipr_alloc_ucode_buffer
3894  *
3895  * Return value:
3896  *      nothing
3897  **/
3898 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3899 {
3900         int i;
3901
3902         for (i = 0; i < sglist->num_sg; i++)
3903                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3904
3905         kfree(sglist);
3906 }
3907
3908 /**
3909  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3910  * @sglist:             scatter/gather list pointer
3911  * @buffer:             buffer pointer
3912  * @len:                buffer length
3913  *
3914  * Copy a microcode image from a user buffer into a buffer allocated by
3915  * ipr_alloc_ucode_buffer
3916  *
3917  * Return value:
3918  *      0 on success / other on failure
3919  **/
3920 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3921                                  u8 *buffer, u32 len)
3922 {
3923         int bsize_elem, i, result = 0;
3924         struct scatterlist *scatterlist;
3925         void *kaddr;
3926
3927         /* Determine the actual number of bytes per element */
3928         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3929
3930         scatterlist = sglist->scatterlist;
3931
3932         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3933                 struct page *page = sg_page(&scatterlist[i]);
3934
3935                 kaddr = kmap(page);
3936                 memcpy(kaddr, buffer, bsize_elem);
3937                 kunmap(page);
3938
3939                 scatterlist[i].length = bsize_elem;
3940
3941                 if (result != 0) {
3942                         ipr_trace;
3943                         return result;
3944                 }
3945         }
3946
3947         if (len % bsize_elem) {
3948                 struct page *page = sg_page(&scatterlist[i]);
3949
3950                 kaddr = kmap(page);
3951                 memcpy(kaddr, buffer, len % bsize_elem);
3952                 kunmap(page);
3953
3954                 scatterlist[i].length = len % bsize_elem;
3955         }
3956
3957         sglist->buffer_len = len;
3958         return result;
3959 }
3960
3961 /**
3962  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3963  * @ipr_cmd:            ipr command struct
3964  * @sglist:             scatter/gather list
3965  *
3966  * Builds a microcode download IOA data list (IOADL).
3967  *
3968  **/
3969 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3970                                     struct ipr_sglist *sglist)
3971 {
3972         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3973         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3974         struct scatterlist *scatterlist = sglist->scatterlist;
3975         int i;
3976
3977         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3978         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3979         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3980
3981         ioarcb->ioadl_len =
3982                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3983         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3984                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3985                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3986                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3987         }
3988
3989         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3990 }
3991
3992 /**
3993  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3994  * @ipr_cmd:    ipr command struct
3995  * @sglist:             scatter/gather list
3996  *
3997  * Builds a microcode download IOA data list (IOADL).
3998  *
3999  **/
4000 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
4001                                   struct ipr_sglist *sglist)
4002 {
4003         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4004         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4005         struct scatterlist *scatterlist = sglist->scatterlist;
4006         int i;
4007
4008         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
4009         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4010         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4011
4012         ioarcb->ioadl_len =
4013                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4014
4015         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4016                 ioadl[i].flags_and_data_len =
4017                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
4018                 ioadl[i].address =
4019                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
4020         }
4021
4022         ioadl[i-1].flags_and_data_len |=
4023                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4024 }
4025
4026 /**
4027  * ipr_update_ioa_ucode - Update IOA's microcode
4028  * @ioa_cfg:    ioa config struct
4029  * @sglist:             scatter/gather list
4030  *
4031  * Initiate an adapter reset to update the IOA's microcode
4032  *
4033  * Return value:
4034  *      0 on success / -EIO on failure
4035  **/
4036 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4037                                 struct ipr_sglist *sglist)
4038 {
4039         unsigned long lock_flags;
4040
4041         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4042         while (ioa_cfg->in_reset_reload) {
4043                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4045                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4046         }
4047
4048         if (ioa_cfg->ucode_sglist) {
4049                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4050                 dev_err(&ioa_cfg->pdev->dev,
4051                         "Microcode download already in progress\n");
4052                 return -EIO;
4053         }
4054
4055         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4056                                         sglist->scatterlist, sglist->num_sg,
4057                                         DMA_TO_DEVICE);
4058
4059         if (!sglist->num_dma_sg) {
4060                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4061                 dev_err(&ioa_cfg->pdev->dev,
4062                         "Failed to map microcode download buffer!\n");
4063                 return -EIO;
4064         }
4065
4066         ioa_cfg->ucode_sglist = sglist;
4067         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4068         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4069         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4070
4071         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4072         ioa_cfg->ucode_sglist = NULL;
4073         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4074         return 0;
4075 }
4076
4077 /**
4078  * ipr_store_update_fw - Update the firmware on the adapter
4079  * @class_dev:  device struct
4080  * @buf:        buffer
4081  * @count:      buffer size
4082  *
4083  * This function will update the firmware on the adapter.
4084  *
4085  * Return value:
4086  *      count on success / other on failure
4087  **/
4088 static ssize_t ipr_store_update_fw(struct device *dev,
4089                                    struct device_attribute *attr,
4090                                    const char *buf, size_t count)
4091 {
4092         struct Scsi_Host *shost = class_to_shost(dev);
4093         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4094         struct ipr_ucode_image_header *image_hdr;
4095         const struct firmware *fw_entry;
4096         struct ipr_sglist *sglist;
4097         char fname[100];
4098         char *src;
4099         char *endline;
4100         int result, dnld_size;
4101
4102         if (!capable(CAP_SYS_ADMIN))
4103                 return -EACCES;
4104
4105         snprintf(fname, sizeof(fname), "%s", buf);
4106
4107         endline = strchr(fname, '\n');
4108         if (endline)
4109                 *endline = '\0';
4110
4111         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4112                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4113                 return -EIO;
4114         }
4115
4116         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4117
4118         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4119         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4120         sglist = ipr_alloc_ucode_buffer(dnld_size);
4121
4122         if (!sglist) {
4123                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4124                 release_firmware(fw_entry);
4125                 return -ENOMEM;
4126         }
4127
4128         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4129
4130         if (result) {
4131                 dev_err(&ioa_cfg->pdev->dev,
4132                         "Microcode buffer copy to DMA buffer failed\n");
4133                 goto out;
4134         }
4135
4136         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4137
4138         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4139
4140         if (!result)
4141                 result = count;
4142 out:
4143         ipr_free_ucode_buffer(sglist);
4144         release_firmware(fw_entry);
4145         return result;
4146 }
4147
4148 static struct device_attribute ipr_update_fw_attr = {
4149         .attr = {
4150                 .name =         "update_fw",
4151                 .mode =         S_IWUSR,
4152         },
4153         .store = ipr_store_update_fw
4154 };
4155
4156 /**
4157  * ipr_show_fw_type - Show the adapter's firmware type.
4158  * @dev:        class device struct
4159  * @buf:        buffer
4160  *
4161  * Return value:
4162  *      number of bytes printed to buffer
4163  **/
4164 static ssize_t ipr_show_fw_type(struct device *dev,
4165                                 struct device_attribute *attr, char *buf)
4166 {
4167         struct Scsi_Host *shost = class_to_shost(dev);
4168         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4169         unsigned long lock_flags = 0;
4170         int len;
4171
4172         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4173         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4174         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4175         return len;
4176 }
4177
4178 static struct device_attribute ipr_ioa_fw_type_attr = {
4179         .attr = {
4180                 .name =         "fw_type",
4181                 .mode =         S_IRUGO,
4182         },
4183         .show = ipr_show_fw_type
4184 };
4185
4186 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4187                                 struct bin_attribute *bin_attr, char *buf,
4188                                 loff_t off, size_t count)
4189 {
4190         struct device *cdev = container_of(kobj, struct device, kobj);
4191         struct Scsi_Host *shost = class_to_shost(cdev);
4192         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4193         struct ipr_hostrcb *hostrcb;
4194         unsigned long lock_flags = 0;
4195         int ret;
4196
4197         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4198         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4199                                         struct ipr_hostrcb, queue);
4200         if (!hostrcb) {
4201                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4202                 return 0;
4203         }
4204         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4205                                 sizeof(hostrcb->hcam));
4206         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4207         return ret;
4208 }
4209
4210 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4211                                 struct bin_attribute *bin_attr, char *buf,
4212                                 loff_t off, size_t count)
4213 {
4214         struct device *cdev = container_of(kobj, struct device, kobj);
4215         struct Scsi_Host *shost = class_to_shost(cdev);
4216         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4217         struct ipr_hostrcb *hostrcb;
4218         unsigned long lock_flags = 0;
4219
4220         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4221         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4222                                         struct ipr_hostrcb, queue);
4223         if (!hostrcb) {
4224                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4225                 return count;
4226         }
4227
4228         /* Reclaim hostrcb before exit */
4229         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4230         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4231         return count;
4232 }
4233
4234 static struct bin_attribute ipr_ioa_async_err_log = {
4235         .attr = {
4236                 .name =         "async_err_log",
4237                 .mode =         S_IRUGO | S_IWUSR,
4238         },
4239         .size = 0,
4240         .read = ipr_read_async_err_log,
4241         .write = ipr_next_async_err_log
4242 };
4243
4244 static struct device_attribute *ipr_ioa_attrs[] = {
4245         &ipr_fw_version_attr,
4246         &ipr_log_level_attr,
4247         &ipr_diagnostics_attr,
4248         &ipr_ioa_state_attr,
4249         &ipr_ioa_reset_attr,
4250         &ipr_update_fw_attr,
4251         &ipr_ioa_fw_type_attr,
4252         &ipr_iopoll_weight_attr,
4253         NULL,
4254 };
4255
4256 #ifdef CONFIG_SCSI_IPR_DUMP
4257 /**
4258  * ipr_read_dump - Dump the adapter
4259  * @filp:               open sysfs file
4260  * @kobj:               kobject struct
4261  * @bin_attr:           bin_attribute struct
4262  * @buf:                buffer
4263  * @off:                offset
4264  * @count:              buffer size
4265  *
4266  * Return value:
4267  *      number of bytes printed to buffer
4268  **/
4269 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4270                              struct bin_attribute *bin_attr,
4271                              char *buf, loff_t off, size_t count)
4272 {
4273         struct device *cdev = container_of(kobj, struct device, kobj);
4274         struct Scsi_Host *shost = class_to_shost(cdev);
4275         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4276         struct ipr_dump *dump;
4277         unsigned long lock_flags = 0;
4278         char *src;
4279         int len, sdt_end;
4280         size_t rc = count;
4281
4282         if (!capable(CAP_SYS_ADMIN))
4283                 return -EACCES;
4284
4285         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4286         dump = ioa_cfg->dump;
4287
4288         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4289                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4290                 return 0;
4291         }
4292         kref_get(&dump->kref);
4293         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4294
4295         if (off > dump->driver_dump.hdr.len) {
4296                 kref_put(&dump->kref, ipr_release_dump);
4297                 return 0;
4298         }
4299
4300         if (off + count > dump->driver_dump.hdr.len) {
4301                 count = dump->driver_dump.hdr.len - off;
4302                 rc = count;
4303         }
4304
4305         if (count && off < sizeof(dump->driver_dump)) {
4306                 if (off + count > sizeof(dump->driver_dump))
4307                         len = sizeof(dump->driver_dump) - off;
4308                 else
4309                         len = count;
4310                 src = (u8 *)&dump->driver_dump + off;
4311                 memcpy(buf, src, len);
4312                 buf += len;
4313                 off += len;
4314                 count -= len;
4315         }
4316
4317         off -= sizeof(dump->driver_dump);
4318
4319         if (ioa_cfg->sis64)
4320                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4321                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4322                            sizeof(struct ipr_sdt_entry));
4323         else
4324                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4325                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4326
4327         if (count && off < sdt_end) {
4328                 if (off + count > sdt_end)
4329                         len = sdt_end - off;
4330                 else
4331                         len = count;
4332                 src = (u8 *)&dump->ioa_dump + off;
4333                 memcpy(buf, src, len);
4334                 buf += len;
4335                 off += len;
4336                 count -= len;
4337         }
4338
4339         off -= sdt_end;
4340
4341         while (count) {
4342                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4343                         len = PAGE_ALIGN(off) - off;
4344                 else
4345                         len = count;
4346                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4347                 src += off & ~PAGE_MASK;
4348                 memcpy(buf, src, len);
4349                 buf += len;
4350                 off += len;
4351                 count -= len;
4352         }
4353
4354         kref_put(&dump->kref, ipr_release_dump);
4355         return rc;
4356 }
4357
4358 /**
4359  * ipr_alloc_dump - Prepare for adapter dump
4360  * @ioa_cfg:    ioa config struct
4361  *
4362  * Return value:
4363  *      0 on success / other on failure
4364  **/
4365 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4366 {
4367         struct ipr_dump *dump;
4368         __be32 **ioa_data;
4369         unsigned long lock_flags = 0;
4370
4371         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4372
4373         if (!dump) {
4374                 ipr_err("Dump memory allocation failed\n");
4375                 return -ENOMEM;
4376         }
4377
4378         if (ioa_cfg->sis64)
4379                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4380         else
4381                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4382
4383         if (!ioa_data) {
4384                 ipr_err("Dump memory allocation failed\n");
4385                 kfree(dump);
4386                 return -ENOMEM;
4387         }
4388
4389         dump->ioa_dump.ioa_data = ioa_data;
4390
4391         kref_init(&dump->kref);
4392         dump->ioa_cfg = ioa_cfg;
4393
4394         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4395
4396         if (INACTIVE != ioa_cfg->sdt_state) {
4397                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4398                 vfree(dump->ioa_dump.ioa_data);
4399                 kfree(dump);
4400                 return 0;
4401         }
4402
4403         ioa_cfg->dump = dump;
4404         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4405         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4406                 ioa_cfg->dump_taken = 1;
4407                 schedule_work(&ioa_cfg->work_q);
4408         }
4409         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4410
4411         return 0;
4412 }
4413
4414 /**
4415  * ipr_free_dump - Free adapter dump memory
4416  * @ioa_cfg:    ioa config struct
4417  *
4418  * Return value:
4419  *      0 on success / other on failure
4420  **/
4421 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4422 {
4423         struct ipr_dump *dump;
4424         unsigned long lock_flags = 0;
4425
4426         ENTER;
4427
4428         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4429         dump = ioa_cfg->dump;
4430         if (!dump) {
4431                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4432                 return 0;
4433         }
4434
4435         ioa_cfg->dump = NULL;
4436         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4437
4438         kref_put(&dump->kref, ipr_release_dump);
4439
4440         LEAVE;
4441         return 0;
4442 }
4443
4444 /**
4445  * ipr_write_dump - Setup dump state of adapter
4446  * @filp:               open sysfs file
4447  * @kobj:               kobject struct
4448  * @bin_attr:           bin_attribute struct
4449  * @buf:                buffer
4450  * @off:                offset
4451  * @count:              buffer size
4452  *
4453  * Return value:
4454  *      number of bytes printed to buffer
4455  **/
4456 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4457                               struct bin_attribute *bin_attr,
4458                               char *buf, loff_t off, size_t count)
4459 {
4460         struct device *cdev = container_of(kobj, struct device, kobj);
4461         struct Scsi_Host *shost = class_to_shost(cdev);
4462         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4463         int rc;
4464
4465         if (!capable(CAP_SYS_ADMIN))
4466                 return -EACCES;
4467
4468         if (buf[0] == '1')
4469                 rc = ipr_alloc_dump(ioa_cfg);
4470         else if (buf[0] == '0')
4471                 rc = ipr_free_dump(ioa_cfg);
4472         else
4473                 return -EINVAL;
4474
4475         if (rc)
4476                 return rc;
4477         else
4478                 return count;
4479 }
4480
4481 static struct bin_attribute ipr_dump_attr = {
4482         .attr = {
4483                 .name = "dump",
4484                 .mode = S_IRUSR | S_IWUSR,
4485         },
4486         .size = 0,
4487         .read = ipr_read_dump,
4488         .write = ipr_write_dump
4489 };
4490 #else
4491 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4492 #endif
4493
4494 /**
4495  * ipr_change_queue_depth - Change the device's queue depth
4496  * @sdev:       scsi device struct
4497  * @qdepth:     depth to set
4498  * @reason:     calling context
4499  *
4500  * Return value:
4501  *      actual depth set
4502  **/
4503 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4504 {
4505         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4506         struct ipr_resource_entry *res;
4507         unsigned long lock_flags = 0;
4508
4509         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4510         res = (struct ipr_resource_entry *)sdev->hostdata;
4511
4512         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4513                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4514         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4515
4516         scsi_change_queue_depth(sdev, qdepth);
4517         return sdev->queue_depth;
4518 }
4519
4520 /**
4521  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4522  * @dev:        device struct
4523  * @attr:       device attribute structure
4524  * @buf:        buffer
4525  *
4526  * Return value:
4527  *      number of bytes printed to buffer
4528  **/
4529 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4530 {
4531         struct scsi_device *sdev = to_scsi_device(dev);
4532         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4533         struct ipr_resource_entry *res;
4534         unsigned long lock_flags = 0;
4535         ssize_t len = -ENXIO;
4536
4537         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4538         res = (struct ipr_resource_entry *)sdev->hostdata;
4539         if (res)
4540                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4541         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4542         return len;
4543 }
4544
4545 static struct device_attribute ipr_adapter_handle_attr = {
4546         .attr = {
4547                 .name =         "adapter_handle",
4548                 .mode =         S_IRUSR,
4549         },
4550         .show = ipr_show_adapter_handle
4551 };
4552
4553 /**
4554  * ipr_show_resource_path - Show the resource path or the resource address for
4555  *                          this device.
4556  * @dev:        device struct
4557  * @attr:       device attribute structure
4558  * @buf:        buffer
4559  *
4560  * Return value:
4561  *      number of bytes printed to buffer
4562  **/
4563 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4564 {
4565         struct scsi_device *sdev = to_scsi_device(dev);
4566         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4567         struct ipr_resource_entry *res;
4568         unsigned long lock_flags = 0;
4569         ssize_t len = -ENXIO;
4570         char buffer[IPR_MAX_RES_PATH_LENGTH];
4571
4572         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4573         res = (struct ipr_resource_entry *)sdev->hostdata;
4574         if (res && ioa_cfg->sis64)
4575                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4576                                __ipr_format_res_path(res->res_path, buffer,
4577                                                      sizeof(buffer)));
4578         else if (res)
4579                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4580                                res->bus, res->target, res->lun);
4581
4582         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4583         return len;
4584 }
4585
4586 static struct device_attribute ipr_resource_path_attr = {
4587         .attr = {
4588                 .name =         "resource_path",
4589                 .mode =         S_IRUGO,
4590         },
4591         .show = ipr_show_resource_path
4592 };
4593
4594 /**
4595  * ipr_show_device_id - Show the device_id for this device.
4596  * @dev:        device struct
4597  * @attr:       device attribute structure
4598  * @buf:        buffer
4599  *
4600  * Return value:
4601  *      number of bytes printed to buffer
4602  **/
4603 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4604 {
4605         struct scsi_device *sdev = to_scsi_device(dev);
4606         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4607         struct ipr_resource_entry *res;
4608         unsigned long lock_flags = 0;
4609         ssize_t len = -ENXIO;
4610
4611         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4612         res = (struct ipr_resource_entry *)sdev->hostdata;
4613         if (res && ioa_cfg->sis64)
4614                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4615         else if (res)
4616                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4617
4618         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4619         return len;
4620 }
4621
4622 static struct device_attribute ipr_device_id_attr = {
4623         .attr = {
4624                 .name =         "device_id",
4625                 .mode =         S_IRUGO,
4626         },
4627         .show = ipr_show_device_id
4628 };
4629
4630 /**
4631  * ipr_show_resource_type - Show the resource type for this device.
4632  * @dev:        device struct
4633  * @attr:       device attribute structure
4634  * @buf:        buffer
4635  *
4636  * Return value:
4637  *      number of bytes printed to buffer
4638  **/
4639 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4640 {
4641         struct scsi_device *sdev = to_scsi_device(dev);
4642         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4643         struct ipr_resource_entry *res;
4644         unsigned long lock_flags = 0;
4645         ssize_t len = -ENXIO;
4646
4647         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4648         res = (struct ipr_resource_entry *)sdev->hostdata;
4649
4650         if (res)
4651                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4652
4653         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4654         return len;
4655 }
4656
4657 static struct device_attribute ipr_resource_type_attr = {
4658         .attr = {
4659                 .name =         "resource_type",
4660                 .mode =         S_IRUGO,
4661         },
4662         .show = ipr_show_resource_type
4663 };
4664
4665 /**
4666  * ipr_show_raw_mode - Show the adapter's raw mode
4667  * @dev:        class device struct
4668  * @buf:        buffer
4669  *
4670  * Return value:
4671  *      number of bytes printed to buffer
4672  **/
4673 static ssize_t ipr_show_raw_mode(struct device *dev,
4674                                  struct device_attribute *attr, char *buf)
4675 {
4676         struct scsi_device *sdev = to_scsi_device(dev);
4677         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4678         struct ipr_resource_entry *res;
4679         unsigned long lock_flags = 0;
4680         ssize_t len;
4681
4682         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4683         res = (struct ipr_resource_entry *)sdev->hostdata;
4684         if (res)
4685                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4686         else
4687                 len = -ENXIO;
4688         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4689         return len;
4690 }
4691
4692 /**
4693  * ipr_store_raw_mode - Change the adapter's raw mode
4694  * @dev:        class device struct
4695  * @buf:        buffer
4696  *
4697  * Return value:
4698  *      number of bytes printed to buffer
4699  **/
4700 static ssize_t ipr_store_raw_mode(struct device *dev,
4701                                   struct device_attribute *attr,
4702                                   const char *buf, size_t count)
4703 {
4704         struct scsi_device *sdev = to_scsi_device(dev);
4705         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4706         struct ipr_resource_entry *res;
4707         unsigned long lock_flags = 0;
4708         ssize_t len;
4709
4710         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4711         res = (struct ipr_resource_entry *)sdev->hostdata;
4712         if (res) {
4713                 if (ipr_is_af_dasd_device(res)) {
4714                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4715                         len = strlen(buf);
4716                         if (res->sdev)
4717                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4718                                         res->raw_mode ? "enabled" : "disabled");
4719                 } else
4720                         len = -EINVAL;
4721         } else
4722                 len = -ENXIO;
4723         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4724         return len;
4725 }
4726
4727 static struct device_attribute ipr_raw_mode_attr = {
4728         .attr = {
4729                 .name =         "raw_mode",
4730                 .mode =         S_IRUGO | S_IWUSR,
4731         },
4732         .show = ipr_show_raw_mode,
4733         .store = ipr_store_raw_mode
4734 };
4735
4736 static struct device_attribute *ipr_dev_attrs[] = {
4737         &ipr_adapter_handle_attr,
4738         &ipr_resource_path_attr,
4739         &ipr_device_id_attr,
4740         &ipr_resource_type_attr,
4741         &ipr_raw_mode_attr,
4742         NULL,
4743 };
4744
4745 /**
4746  * ipr_biosparam - Return the HSC mapping
4747  * @sdev:                       scsi device struct
4748  * @block_device:       block device pointer
4749  * @capacity:           capacity of the device
4750  * @parm:                       Array containing returned HSC values.
4751  *
4752  * This function generates the HSC parms that fdisk uses.
4753  * We want to make sure we return something that places partitions
4754  * on 4k boundaries for best performance with the IOA.
4755  *
4756  * Return value:
4757  *      0 on success
4758  **/
4759 static int ipr_biosparam(struct scsi_device *sdev,
4760                          struct block_device *block_device,
4761                          sector_t capacity, int *parm)
4762 {
4763         int heads, sectors;
4764         sector_t cylinders;
4765
4766         heads = 128;
4767         sectors = 32;
4768
4769         cylinders = capacity;
4770         sector_div(cylinders, (128 * 32));
4771
4772         /* return result */
4773         parm[0] = heads;
4774         parm[1] = sectors;
4775         parm[2] = cylinders;
4776
4777         return 0;
4778 }
4779
4780 /**
4781  * ipr_find_starget - Find target based on bus/target.
4782  * @starget:    scsi target struct
4783  *
4784  * Return value:
4785  *      resource entry pointer if found / NULL if not found
4786  **/
4787 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4788 {
4789         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4790         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4791         struct ipr_resource_entry *res;
4792
4793         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4794                 if ((res->bus == starget->channel) &&
4795                     (res->target == starget->id)) {
4796                         return res;
4797                 }
4798         }
4799
4800         return NULL;
4801 }
4802
4803 static struct ata_port_info sata_port_info;
4804
4805 /**
4806  * ipr_target_alloc - Prepare for commands to a SCSI target
4807  * @starget:    scsi target struct
4808  *
4809  * If the device is a SATA device, this function allocates an
4810  * ATA port with libata, else it does nothing.
4811  *
4812  * Return value:
4813  *      0 on success / non-0 on failure
4814  **/
4815 static int ipr_target_alloc(struct scsi_target *starget)
4816 {
4817         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4818         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4819         struct ipr_sata_port *sata_port;
4820         struct ata_port *ap;
4821         struct ipr_resource_entry *res;
4822         unsigned long lock_flags;
4823
4824         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4825         res = ipr_find_starget(starget);
4826         starget->hostdata = NULL;
4827
4828         if (res && ipr_is_gata(res)) {
4829                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4830                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4831                 if (!sata_port)
4832                         return -ENOMEM;
4833
4834                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4835                 if (ap) {
4836                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4837                         sata_port->ioa_cfg = ioa_cfg;
4838                         sata_port->ap = ap;
4839                         sata_port->res = res;
4840
4841                         res->sata_port = sata_port;
4842                         ap->private_data = sata_port;
4843                         starget->hostdata = sata_port;
4844                 } else {
4845                         kfree(sata_port);
4846                         return -ENOMEM;
4847                 }
4848         }
4849         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4850
4851         return 0;
4852 }
4853
4854 /**
4855  * ipr_target_destroy - Destroy a SCSI target
4856  * @starget:    scsi target struct
4857  *
4858  * If the device was a SATA device, this function frees the libata
4859  * ATA port, else it does nothing.
4860  *
4861  **/
4862 static void ipr_target_destroy(struct scsi_target *starget)
4863 {
4864         struct ipr_sata_port *sata_port = starget->hostdata;
4865         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4866         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4867
4868         if (ioa_cfg->sis64) {
4869                 if (!ipr_find_starget(starget)) {
4870                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4871                                 clear_bit(starget->id, ioa_cfg->array_ids);
4872                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4873                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4874                         else if (starget->channel == 0)
4875                                 clear_bit(starget->id, ioa_cfg->target_ids);
4876                 }
4877         }
4878
4879         if (sata_port) {
4880                 starget->hostdata = NULL;
4881                 ata_sas_port_destroy(sata_port->ap);
4882                 kfree(sata_port);
4883         }
4884 }
4885
4886 /**
4887  * ipr_find_sdev - Find device based on bus/target/lun.
4888  * @sdev:       scsi device struct
4889  *
4890  * Return value:
4891  *      resource entry pointer if found / NULL if not found
4892  **/
4893 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4894 {
4895         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4896         struct ipr_resource_entry *res;
4897
4898         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4899                 if ((res->bus == sdev->channel) &&
4900                     (res->target == sdev->id) &&
4901                     (res->lun == sdev->lun))
4902                         return res;
4903         }
4904
4905         return NULL;
4906 }
4907
4908 /**
4909  * ipr_slave_destroy - Unconfigure a SCSI device
4910  * @sdev:       scsi device struct
4911  *
4912  * Return value:
4913  *      nothing
4914  **/
4915 static void ipr_slave_destroy(struct scsi_device *sdev)
4916 {
4917         struct ipr_resource_entry *res;
4918         struct ipr_ioa_cfg *ioa_cfg;
4919         unsigned long lock_flags = 0;
4920
4921         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4922
4923         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4924         res = (struct ipr_resource_entry *) sdev->hostdata;
4925         if (res) {
4926                 if (res->sata_port)
4927                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4928                 sdev->hostdata = NULL;
4929                 res->sdev = NULL;
4930                 res->sata_port = NULL;
4931         }
4932         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4933 }
4934
4935 /**
4936  * ipr_slave_configure - Configure a SCSI device
4937  * @sdev:       scsi device struct
4938  *
4939  * This function configures the specified scsi device.
4940  *
4941  * Return value:
4942  *      0 on success
4943  **/
4944 static int ipr_slave_configure(struct scsi_device *sdev)
4945 {
4946         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4947         struct ipr_resource_entry *res;
4948         struct ata_port *ap = NULL;
4949         unsigned long lock_flags = 0;
4950         char buffer[IPR_MAX_RES_PATH_LENGTH];
4951
4952         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4953         res = sdev->hostdata;
4954         if (res) {
4955                 if (ipr_is_af_dasd_device(res))
4956                         sdev->type = TYPE_RAID;
4957                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4958                         sdev->scsi_level = 4;
4959                         sdev->no_uld_attach = 1;
4960                 }
4961                 if (ipr_is_vset_device(res)) {
4962                         sdev->scsi_level = SCSI_SPC_3;
4963                         sdev->no_report_opcodes = 1;
4964                         blk_queue_rq_timeout(sdev->request_queue,
4965                                              IPR_VSET_RW_TIMEOUT);
4966                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4967                 }
4968                 if (ipr_is_gata(res) && res->sata_port)
4969                         ap = res->sata_port->ap;
4970                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4971
4972                 if (ap) {
4973                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4974                         ata_sas_slave_configure(sdev, ap);
4975                 }
4976
4977                 if (ioa_cfg->sis64)
4978                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4979                                     ipr_format_res_path(ioa_cfg,
4980                                 res->res_path, buffer, sizeof(buffer)));
4981                 return 0;
4982         }
4983         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4984         return 0;
4985 }
4986
4987 /**
4988  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4989  * @sdev:       scsi device struct
4990  *
4991  * This function initializes an ATA port so that future commands
4992  * sent through queuecommand will work.
4993  *
4994  * Return value:
4995  *      0 on success
4996  **/
4997 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4998 {
4999         struct ipr_sata_port *sata_port = NULL;
5000         int rc = -ENXIO;
5001
5002         ENTER;
5003         if (sdev->sdev_target)
5004                 sata_port = sdev->sdev_target->hostdata;
5005         if (sata_port) {
5006                 rc = ata_sas_port_init(sata_port->ap);
5007                 if (rc == 0)
5008                         rc = ata_sas_sync_probe(sata_port->ap);
5009         }
5010
5011         if (rc)
5012                 ipr_slave_destroy(sdev);
5013
5014         LEAVE;
5015         return rc;
5016 }
5017
5018 /**
5019  * ipr_slave_alloc - Prepare for commands to a device.
5020  * @sdev:       scsi device struct
5021  *
5022  * This function saves a pointer to the resource entry
5023  * in the scsi device struct if the device exists. We
5024  * can then use this pointer in ipr_queuecommand when
5025  * handling new commands.
5026  *
5027  * Return value:
5028  *      0 on success / -ENXIO if device does not exist
5029  **/
5030 static int ipr_slave_alloc(struct scsi_device *sdev)
5031 {
5032         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5033         struct ipr_resource_entry *res;
5034         unsigned long lock_flags;
5035         int rc = -ENXIO;
5036
5037         sdev->hostdata = NULL;
5038
5039         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5040
5041         res = ipr_find_sdev(sdev);
5042         if (res) {
5043                 res->sdev = sdev;
5044                 res->add_to_ml = 0;
5045                 res->in_erp = 0;
5046                 sdev->hostdata = res;
5047                 if (!ipr_is_naca_model(res))
5048                         res->needs_sync_complete = 1;
5049                 rc = 0;
5050                 if (ipr_is_gata(res)) {
5051                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5052                         return ipr_ata_slave_alloc(sdev);
5053                 }
5054         }
5055
5056         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5057
5058         return rc;
5059 }
5060
5061 /**
5062  * ipr_match_lun - Match function for specified LUN
5063  * @ipr_cmd:    ipr command struct
5064  * @device:             device to match (sdev)
5065  *
5066  * Returns:
5067  *      1 if command matches sdev / 0 if command does not match sdev
5068  **/
5069 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5070 {
5071         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5072                 return 1;
5073         return 0;
5074 }
5075
5076 /**
5077  * ipr_cmnd_is_free - Check if a command is free or not
5078  * @ipr_cmd     ipr command struct
5079  *
5080  * Returns:
5081  *      true / false
5082  **/
5083 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5084 {
5085         struct ipr_cmnd *loop_cmd;
5086
5087         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5088                 if (loop_cmd == ipr_cmd)
5089                         return true;
5090         }
5091
5092         return false;
5093 }
5094
5095 /**
5096  * ipr_match_res - Match function for specified resource entry
5097  * @ipr_cmd:    ipr command struct
5098  * @resource:   resource entry to match
5099  *
5100  * Returns:
5101  *      1 if command matches sdev / 0 if command does not match sdev
5102  **/
5103 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5104 {
5105         struct ipr_resource_entry *res = resource;
5106
5107         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5108                 return 1;
5109         return 0;
5110 }
5111
5112 /**
5113  * ipr_wait_for_ops - Wait for matching commands to complete
5114  * @ipr_cmd:    ipr command struct
5115  * @device:             device to match (sdev)
5116  * @match:              match function to use
5117  *
5118  * Returns:
5119  *      SUCCESS / FAILED
5120  **/
5121 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5122                             int (*match)(struct ipr_cmnd *, void *))
5123 {
5124         struct ipr_cmnd *ipr_cmd;
5125         int wait, i;
5126         unsigned long flags;
5127         struct ipr_hrr_queue *hrrq;
5128         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5129         DECLARE_COMPLETION_ONSTACK(comp);
5130
5131         ENTER;
5132         do {
5133                 wait = 0;
5134
5135                 for_each_hrrq(hrrq, ioa_cfg) {
5136                         spin_lock_irqsave(hrrq->lock, flags);
5137                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5138                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5139                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5140                                         if (match(ipr_cmd, device)) {
5141                                                 ipr_cmd->eh_comp = &comp;
5142                                                 wait++;
5143                                         }
5144                                 }
5145                         }
5146                         spin_unlock_irqrestore(hrrq->lock, flags);
5147                 }
5148
5149                 if (wait) {
5150                         timeout = wait_for_completion_timeout(&comp, timeout);
5151
5152                         if (!timeout) {
5153                                 wait = 0;
5154
5155                                 for_each_hrrq(hrrq, ioa_cfg) {
5156                                         spin_lock_irqsave(hrrq->lock, flags);
5157                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5158                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5159                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5160                                                         if (match(ipr_cmd, device)) {
5161                                                                 ipr_cmd->eh_comp = NULL;
5162                                                                 wait++;
5163                                                         }
5164                                                 }
5165                                         }
5166                                         spin_unlock_irqrestore(hrrq->lock, flags);
5167                                 }
5168
5169                                 if (wait)
5170                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5171                                 LEAVE;
5172                                 return wait ? FAILED : SUCCESS;
5173                         }
5174                 }
5175         } while (wait);
5176
5177         LEAVE;
5178         return SUCCESS;
5179 }
5180
5181 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5182 {
5183         struct ipr_ioa_cfg *ioa_cfg;
5184         unsigned long lock_flags = 0;
5185         int rc = SUCCESS;
5186
5187         ENTER;
5188         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5189         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5190
5191         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5192                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5193                 dev_err(&ioa_cfg->pdev->dev,
5194                         "Adapter being reset as a result of error recovery.\n");
5195
5196                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5197                         ioa_cfg->sdt_state = GET_DUMP;
5198         }
5199
5200         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5201         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5202         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5203
5204         /* If we got hit with a host reset while we were already resetting
5205          the adapter for some reason, and the reset failed. */
5206         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5207                 ipr_trace;
5208                 rc = FAILED;
5209         }
5210
5211         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5212         LEAVE;
5213         return rc;
5214 }
5215
5216 /**
5217  * ipr_device_reset - Reset the device
5218  * @ioa_cfg:    ioa config struct
5219  * @res:                resource entry struct
5220  *
5221  * This function issues a device reset to the affected device.
5222  * If the device is a SCSI device, a LUN reset will be sent
5223  * to the device first. If that does not work, a target reset
5224  * will be sent. If the device is a SATA device, a PHY reset will
5225  * be sent.
5226  *
5227  * Return value:
5228  *      0 on success / non-zero on failure
5229  **/
5230 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5231                             struct ipr_resource_entry *res)
5232 {
5233         struct ipr_cmnd *ipr_cmd;
5234         struct ipr_ioarcb *ioarcb;
5235         struct ipr_cmd_pkt *cmd_pkt;
5236         struct ipr_ioarcb_ata_regs *regs;
5237         u32 ioasc;
5238
5239         ENTER;
5240         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5241         ioarcb = &ipr_cmd->ioarcb;
5242         cmd_pkt = &ioarcb->cmd_pkt;
5243
5244         if (ipr_cmd->ioa_cfg->sis64) {
5245                 regs = &ipr_cmd->i.ata_ioadl.regs;
5246                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5247         } else
5248                 regs = &ioarcb->u.add_data.u.regs;
5249
5250         ioarcb->res_handle = res->res_handle;
5251         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5252         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5253         if (ipr_is_gata(res)) {
5254                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5255                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5256                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5257         }
5258
5259         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5260         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5261         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5262         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5263                 if (ipr_cmd->ioa_cfg->sis64)
5264                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5265                                sizeof(struct ipr_ioasa_gata));
5266                 else
5267                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5268                                sizeof(struct ipr_ioasa_gata));
5269         }
5270
5271         LEAVE;
5272         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5273 }
5274
5275 /**
5276  * ipr_sata_reset - Reset the SATA port
5277  * @link:       SATA link to reset
5278  * @classes:    class of the attached device
5279  *
5280  * This function issues a SATA phy reset to the affected ATA link.
5281  *
5282  * Return value:
5283  *      0 on success / non-zero on failure
5284  **/
5285 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5286                                 unsigned long deadline)
5287 {
5288         struct ipr_sata_port *sata_port = link->ap->private_data;
5289         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5290         struct ipr_resource_entry *res;
5291         unsigned long lock_flags = 0;
5292         int rc = -ENXIO, ret;
5293
5294         ENTER;
5295         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5296         while (ioa_cfg->in_reset_reload) {
5297                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5298                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5299                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5300         }
5301
5302         res = sata_port->res;
5303         if (res) {
5304                 rc = ipr_device_reset(ioa_cfg, res);
5305                 *classes = res->ata_class;
5306                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5307
5308                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5309                 if (ret != SUCCESS) {
5310                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5311                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5312                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5313
5314                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5315                 }
5316         } else
5317                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5318
5319         LEAVE;
5320         return rc;
5321 }
5322
5323 /**
5324  * ipr_eh_dev_reset - Reset the device
5325  * @scsi_cmd:   scsi command struct
5326  *
5327  * This function issues a device reset to the affected device.
5328  * A LUN reset will be sent to the device first. If that does
5329  * not work, a target reset will be sent.
5330  *
5331  * Return value:
5332  *      SUCCESS / FAILED
5333  **/
5334 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5335 {
5336         struct ipr_cmnd *ipr_cmd;
5337         struct ipr_ioa_cfg *ioa_cfg;
5338         struct ipr_resource_entry *res;
5339         struct ata_port *ap;
5340         int rc = 0, i;
5341         struct ipr_hrr_queue *hrrq;
5342
5343         ENTER;
5344         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5345         res = scsi_cmd->device->hostdata;
5346
5347         /*
5348          * If we are currently going through reset/reload, return failed. This will force the
5349          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5350          * reset to complete
5351          */
5352         if (ioa_cfg->in_reset_reload)
5353                 return FAILED;
5354         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5355                 return FAILED;
5356
5357         for_each_hrrq(hrrq, ioa_cfg) {
5358                 spin_lock(&hrrq->_lock);
5359                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5360                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5361
5362                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5363                                 if (!ipr_cmd->qc)
5364                                         continue;
5365                                 if (ipr_cmnd_is_free(ipr_cmd))
5366                                         continue;
5367
5368                                 ipr_cmd->done = ipr_sata_eh_done;
5369                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5370                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5371                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5372                                 }
5373                         }
5374                 }
5375                 spin_unlock(&hrrq->_lock);
5376         }
5377         res->resetting_device = 1;
5378         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5379
5380         if (ipr_is_gata(res) && res->sata_port) {
5381                 ap = res->sata_port->ap;
5382                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5383                 ata_std_error_handler(ap);
5384                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5385         } else
5386                 rc = ipr_device_reset(ioa_cfg, res);
5387         res->resetting_device = 0;
5388         res->reset_occurred = 1;
5389
5390         LEAVE;
5391         return rc ? FAILED : SUCCESS;
5392 }
5393
5394 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5395 {
5396         int rc;
5397         struct ipr_ioa_cfg *ioa_cfg;
5398         struct ipr_resource_entry *res;
5399
5400         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5401         res = cmd->device->hostdata;
5402
5403         if (!res)
5404                 return FAILED;
5405
5406         spin_lock_irq(cmd->device->host->host_lock);
5407         rc = __ipr_eh_dev_reset(cmd);
5408         spin_unlock_irq(cmd->device->host->host_lock);
5409
5410         if (rc == SUCCESS) {
5411                 if (ipr_is_gata(res) && res->sata_port)
5412                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5413                 else
5414                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5415         }
5416
5417         return rc;
5418 }
5419
5420 /**
5421  * ipr_bus_reset_done - Op done function for bus reset.
5422  * @ipr_cmd:    ipr command struct
5423  *
5424  * This function is the op done function for a bus reset
5425  *
5426  * Return value:
5427  *      none
5428  **/
5429 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5430 {
5431         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5432         struct ipr_resource_entry *res;
5433
5434         ENTER;
5435         if (!ioa_cfg->sis64)
5436                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5437                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5438                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5439                                 break;
5440                         }
5441                 }
5442
5443         /*
5444          * If abort has not completed, indicate the reset has, else call the
5445          * abort's done function to wake the sleeping eh thread
5446          */
5447         if (ipr_cmd->sibling->sibling)
5448                 ipr_cmd->sibling->sibling = NULL;
5449         else
5450                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5451
5452         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5453         LEAVE;
5454 }
5455
5456 /**
5457  * ipr_abort_timeout - An abort task has timed out
5458  * @ipr_cmd:    ipr command struct
5459  *
5460  * This function handles when an abort task times out. If this
5461  * happens we issue a bus reset since we have resources tied
5462  * up that must be freed before returning to the midlayer.
5463  *
5464  * Return value:
5465  *      none
5466  **/
5467 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5468 {
5469         struct ipr_cmnd *reset_cmd;
5470         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5471         struct ipr_cmd_pkt *cmd_pkt;
5472         unsigned long lock_flags = 0;
5473
5474         ENTER;
5475         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5476         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5477                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5478                 return;
5479         }
5480
5481         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5482         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5483         ipr_cmd->sibling = reset_cmd;
5484         reset_cmd->sibling = ipr_cmd;
5485         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5486         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5487         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5488         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5489         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5490
5491         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5492         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5493         LEAVE;
5494 }
5495
5496 /**
5497  * ipr_cancel_op - Cancel specified op
5498  * @scsi_cmd:   scsi command struct
5499  *
5500  * This function cancels specified op.
5501  *
5502  * Return value:
5503  *      SUCCESS / FAILED
5504  **/
5505 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5506 {
5507         struct ipr_cmnd *ipr_cmd;
5508         struct ipr_ioa_cfg *ioa_cfg;
5509         struct ipr_resource_entry *res;
5510         struct ipr_cmd_pkt *cmd_pkt;
5511         u32 ioasc, int_reg;
5512         int i, op_found = 0;
5513         struct ipr_hrr_queue *hrrq;
5514
5515         ENTER;
5516         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5517         res = scsi_cmd->device->hostdata;
5518
5519         /* If we are currently going through reset/reload, return failed.
5520          * This will force the mid-layer to call ipr_eh_host_reset,
5521          * which will then go to sleep and wait for the reset to complete
5522          */
5523         if (ioa_cfg->in_reset_reload ||
5524             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5525                 return FAILED;
5526         if (!res)
5527                 return FAILED;
5528
5529         /*
5530          * If we are aborting a timed out op, chances are that the timeout was caused
5531          * by a still not detected EEH error. In such cases, reading a register will
5532          * trigger the EEH recovery infrastructure.
5533          */
5534         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5535
5536         if (!ipr_is_gscsi(res))
5537                 return FAILED;
5538
5539         for_each_hrrq(hrrq, ioa_cfg) {
5540                 spin_lock(&hrrq->_lock);
5541                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5542                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5543                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5544                                         op_found = 1;
5545                                         break;
5546                                 }
5547                         }
5548                 }
5549                 spin_unlock(&hrrq->_lock);
5550         }
5551
5552         if (!op_found)
5553                 return SUCCESS;
5554
5555         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5556         ipr_cmd->ioarcb.res_handle = res->res_handle;
5557         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5558         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5559         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5560         ipr_cmd->u.sdev = scsi_cmd->device;
5561
5562         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5563                     scsi_cmd->cmnd[0]);
5564         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5565         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5566
5567         /*
5568          * If the abort task timed out and we sent a bus reset, we will get
5569          * one the following responses to the abort
5570          */
5571         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5572                 ioasc = 0;
5573                 ipr_trace;
5574         }
5575
5576         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5577         if (!ipr_is_naca_model(res))
5578                 res->needs_sync_complete = 1;
5579
5580         LEAVE;
5581         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5582 }
5583
5584 /**
5585  * ipr_eh_abort - Abort a single op
5586  * @scsi_cmd:   scsi command struct
5587  *
5588  * Return value:
5589  *      0 if scan in progress / 1 if scan is complete
5590  **/
5591 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5592 {
5593         unsigned long lock_flags;
5594         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5595         int rc = 0;
5596
5597         spin_lock_irqsave(shost->host_lock, lock_flags);
5598         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5599                 rc = 1;
5600         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5601                 rc = 1;
5602         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5603         return rc;
5604 }
5605
5606 /**
5607  * ipr_eh_host_reset - Reset the host adapter
5608  * @scsi_cmd:   scsi command struct
5609  *
5610  * Return value:
5611  *      SUCCESS / FAILED
5612  **/
5613 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5614 {
5615         unsigned long flags;
5616         int rc;
5617         struct ipr_ioa_cfg *ioa_cfg;
5618
5619         ENTER;
5620
5621         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5622
5623         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5624         rc = ipr_cancel_op(scsi_cmd);
5625         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5626
5627         if (rc == SUCCESS)
5628                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5629         LEAVE;
5630         return rc;
5631 }
5632
5633 /**
5634  * ipr_handle_other_interrupt - Handle "other" interrupts
5635  * @ioa_cfg:    ioa config struct
5636  * @int_reg:    interrupt register
5637  *
5638  * Return value:
5639  *      IRQ_NONE / IRQ_HANDLED
5640  **/
5641 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5642                                               u32 int_reg)
5643 {
5644         irqreturn_t rc = IRQ_HANDLED;
5645         u32 int_mask_reg;
5646
5647         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5648         int_reg &= ~int_mask_reg;
5649
5650         /* If an interrupt on the adapter did not occur, ignore it.
5651          * Or in the case of SIS 64, check for a stage change interrupt.
5652          */
5653         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5654                 if (ioa_cfg->sis64) {
5655                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5656                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5657                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5658
5659                                 /* clear stage change */
5660                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5661                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5662                                 list_del(&ioa_cfg->reset_cmd->queue);
5663                                 del_timer(&ioa_cfg->reset_cmd->timer);
5664                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5665                                 return IRQ_HANDLED;
5666                         }
5667                 }
5668
5669                 return IRQ_NONE;
5670         }
5671
5672         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5673                 /* Mask the interrupt */
5674                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5675                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5676
5677                 list_del(&ioa_cfg->reset_cmd->queue);
5678                 del_timer(&ioa_cfg->reset_cmd->timer);
5679                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5680         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5681                 if (ioa_cfg->clear_isr) {
5682                         if (ipr_debug && printk_ratelimit())
5683                                 dev_err(&ioa_cfg->pdev->dev,
5684                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5685                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5686                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5687                         return IRQ_NONE;
5688                 }
5689         } else {
5690                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5691                         ioa_cfg->ioa_unit_checked = 1;
5692                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5693                         dev_err(&ioa_cfg->pdev->dev,
5694                                 "No Host RRQ. 0x%08X\n", int_reg);
5695                 else
5696                         dev_err(&ioa_cfg->pdev->dev,
5697                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5698
5699                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5700                         ioa_cfg->sdt_state = GET_DUMP;
5701
5702                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5703                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5704         }
5705
5706         return rc;
5707 }
5708
5709 /**
5710  * ipr_isr_eh - Interrupt service routine error handler
5711  * @ioa_cfg:    ioa config struct
5712  * @msg:        message to log
5713  *
5714  * Return value:
5715  *      none
5716  **/
5717 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5718 {
5719         ioa_cfg->errors_logged++;
5720         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5721
5722         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5723                 ioa_cfg->sdt_state = GET_DUMP;
5724
5725         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5726 }
5727
5728 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5729                                                 struct list_head *doneq)
5730 {
5731         u32 ioasc;
5732         u16 cmd_index;
5733         struct ipr_cmnd *ipr_cmd;
5734         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5735         int num_hrrq = 0;
5736
5737         /* If interrupts are disabled, ignore the interrupt */
5738         if (!hrr_queue->allow_interrupts)
5739                 return 0;
5740
5741         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5742                hrr_queue->toggle_bit) {
5743
5744                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5745                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5746                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5747
5748                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5749                              cmd_index < hrr_queue->min_cmd_id)) {
5750                         ipr_isr_eh(ioa_cfg,
5751                                 "Invalid response handle from IOA: ",
5752                                 cmd_index);
5753                         break;
5754                 }
5755
5756                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5757                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5758
5759                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5760
5761                 list_move_tail(&ipr_cmd->queue, doneq);
5762
5763                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5764                         hrr_queue->hrrq_curr++;
5765                 } else {
5766                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5767                         hrr_queue->toggle_bit ^= 1u;
5768                 }
5769                 num_hrrq++;
5770                 if (budget > 0 && num_hrrq >= budget)
5771                         break;
5772         }
5773
5774         return num_hrrq;
5775 }
5776
5777 static int ipr_iopoll(struct irq_poll *iop, int budget)
5778 {
5779         struct ipr_ioa_cfg *ioa_cfg;
5780         struct ipr_hrr_queue *hrrq;
5781         struct ipr_cmnd *ipr_cmd, *temp;
5782         unsigned long hrrq_flags;
5783         int completed_ops;
5784         LIST_HEAD(doneq);
5785
5786         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5787         ioa_cfg = hrrq->ioa_cfg;
5788
5789         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5790         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5791
5792         if (completed_ops < budget)
5793                 irq_poll_complete(iop);
5794         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5795
5796         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5797                 list_del(&ipr_cmd->queue);
5798                 del_timer(&ipr_cmd->timer);
5799                 ipr_cmd->fast_done(ipr_cmd);
5800         }
5801
5802         return completed_ops;
5803 }
5804
5805 /**
5806  * ipr_isr - Interrupt service routine
5807  * @irq:        irq number
5808  * @devp:       pointer to ioa config struct
5809  *
5810  * Return value:
5811  *      IRQ_NONE / IRQ_HANDLED
5812  **/
5813 static irqreturn_t ipr_isr(int irq, void *devp)
5814 {
5815         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5816         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5817         unsigned long hrrq_flags = 0;
5818         u32 int_reg = 0;
5819         int num_hrrq = 0;
5820         int irq_none = 0;
5821         struct ipr_cmnd *ipr_cmd, *temp;
5822         irqreturn_t rc = IRQ_NONE;
5823         LIST_HEAD(doneq);
5824
5825         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5826         /* If interrupts are disabled, ignore the interrupt */
5827         if (!hrrq->allow_interrupts) {
5828                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5829                 return IRQ_NONE;
5830         }
5831
5832         while (1) {
5833                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5834                         rc =  IRQ_HANDLED;
5835
5836                         if (!ioa_cfg->clear_isr)
5837                                 break;
5838
5839                         /* Clear the PCI interrupt */
5840                         num_hrrq = 0;
5841                         do {
5842                                 writel(IPR_PCII_HRRQ_UPDATED,
5843                                      ioa_cfg->regs.clr_interrupt_reg32);
5844                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5845                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5846                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5847
5848                 } else if (rc == IRQ_NONE && irq_none == 0) {
5849                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5850                         irq_none++;
5851                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5852                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5853                         ipr_isr_eh(ioa_cfg,
5854                                 "Error clearing HRRQ: ", num_hrrq);
5855                         rc = IRQ_HANDLED;
5856                         break;
5857                 } else
5858                         break;
5859         }
5860
5861         if (unlikely(rc == IRQ_NONE))
5862                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5863
5864         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5865         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5866                 list_del(&ipr_cmd->queue);
5867                 del_timer(&ipr_cmd->timer);
5868                 ipr_cmd->fast_done(ipr_cmd);
5869         }
5870         return rc;
5871 }
5872
5873 /**
5874  * ipr_isr_mhrrq - Interrupt service routine
5875  * @irq:        irq number
5876  * @devp:       pointer to ioa config struct
5877  *
5878  * Return value:
5879  *      IRQ_NONE / IRQ_HANDLED
5880  **/
5881 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5882 {
5883         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5884         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5885         unsigned long hrrq_flags = 0;
5886         struct ipr_cmnd *ipr_cmd, *temp;
5887         irqreturn_t rc = IRQ_NONE;
5888         LIST_HEAD(doneq);
5889
5890         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5891
5892         /* If interrupts are disabled, ignore the interrupt */
5893         if (!hrrq->allow_interrupts) {
5894                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5895                 return IRQ_NONE;
5896         }
5897
5898         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5899                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5900                        hrrq->toggle_bit) {
5901                         irq_poll_sched(&hrrq->iopoll);
5902                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5903                         return IRQ_HANDLED;
5904                 }
5905         } else {
5906                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5907                         hrrq->toggle_bit)
5908
5909                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5910                                 rc =  IRQ_HANDLED;
5911         }
5912
5913         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5914
5915         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5916                 list_del(&ipr_cmd->queue);
5917                 del_timer(&ipr_cmd->timer);
5918                 ipr_cmd->fast_done(ipr_cmd);
5919         }
5920         return rc;
5921 }
5922
5923 /**
5924  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5925  * @ioa_cfg:    ioa config struct
5926  * @ipr_cmd:    ipr command struct
5927  *
5928  * Return value:
5929  *      0 on success / -1 on failure
5930  **/
5931 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5932                              struct ipr_cmnd *ipr_cmd)
5933 {
5934         int i, nseg;
5935         struct scatterlist *sg;
5936         u32 length;
5937         u32 ioadl_flags = 0;
5938         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5939         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5940         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5941
5942         length = scsi_bufflen(scsi_cmd);
5943         if (!length)
5944                 return 0;
5945
5946         nseg = scsi_dma_map(scsi_cmd);
5947         if (nseg < 0) {
5948                 if (printk_ratelimit())
5949                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5950                 return -1;
5951         }
5952
5953         ipr_cmd->dma_use_sg = nseg;
5954
5955         ioarcb->data_transfer_length = cpu_to_be32(length);
5956         ioarcb->ioadl_len =
5957                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5958
5959         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5960                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5961                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5962         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5963                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5964
5965         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5966                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5967                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5968                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5969         }
5970
5971         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5972         return 0;
5973 }
5974
5975 /**
5976  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5977  * @ioa_cfg:    ioa config struct
5978  * @ipr_cmd:    ipr command struct
5979  *
5980  * Return value:
5981  *      0 on success / -1 on failure
5982  **/
5983 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5984                            struct ipr_cmnd *ipr_cmd)
5985 {
5986         int i, nseg;
5987         struct scatterlist *sg;
5988         u32 length;
5989         u32 ioadl_flags = 0;
5990         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5991         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5992         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5993
5994         length = scsi_bufflen(scsi_cmd);
5995         if (!length)
5996                 return 0;
5997
5998         nseg = scsi_dma_map(scsi_cmd);
5999         if (nseg < 0) {
6000                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
6001                 return -1;
6002         }
6003
6004         ipr_cmd->dma_use_sg = nseg;
6005
6006         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6007                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6008                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6009                 ioarcb->data_transfer_length = cpu_to_be32(length);
6010                 ioarcb->ioadl_len =
6011                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6012         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6013                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6014                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6015                 ioarcb->read_ioadl_len =
6016                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6017         }
6018
6019         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6020                 ioadl = ioarcb->u.add_data.u.ioadl;
6021                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6022                                     offsetof(struct ipr_ioarcb, u.add_data));
6023                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6024         }
6025
6026         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6027                 ioadl[i].flags_and_data_len =
6028                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6029                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6030         }
6031
6032         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6033         return 0;
6034 }
6035
6036 /**
6037  * __ipr_erp_done - Process completion of ERP for a device
6038  * @ipr_cmd:            ipr command struct
6039  *
6040  * This function copies the sense buffer into the scsi_cmd
6041  * struct and pushes the scsi_done function.
6042  *
6043  * Return value:
6044  *      nothing
6045  **/
6046 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6047 {
6048         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6049         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6050         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6051
6052         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6053                 scsi_cmd->result |= (DID_ERROR << 16);
6054                 scmd_printk(KERN_ERR, scsi_cmd,
6055                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6056         } else {
6057                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6058                        SCSI_SENSE_BUFFERSIZE);
6059         }
6060
6061         if (res) {
6062                 if (!ipr_is_naca_model(res))
6063                         res->needs_sync_complete = 1;
6064                 res->in_erp = 0;
6065         }
6066         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6067         scsi_cmd->scsi_done(scsi_cmd);
6068         if (ipr_cmd->eh_comp)
6069                 complete(ipr_cmd->eh_comp);
6070         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6071 }
6072
6073 /**
6074  * ipr_erp_done - Process completion of ERP for a device
6075  * @ipr_cmd:            ipr command struct
6076  *
6077  * This function copies the sense buffer into the scsi_cmd
6078  * struct and pushes the scsi_done function.
6079  *
6080  * Return value:
6081  *      nothing
6082  **/
6083 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6084 {
6085         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6086         unsigned long hrrq_flags;
6087
6088         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6089         __ipr_erp_done(ipr_cmd);
6090         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6091 }
6092
6093 /**
6094  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6095  * @ipr_cmd:    ipr command struct
6096  *
6097  * Return value:
6098  *      none
6099  **/
6100 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6101 {
6102         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6103         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6104         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6105
6106         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6107         ioarcb->data_transfer_length = 0;
6108         ioarcb->read_data_transfer_length = 0;
6109         ioarcb->ioadl_len = 0;
6110         ioarcb->read_ioadl_len = 0;
6111         ioasa->hdr.ioasc = 0;
6112         ioasa->hdr.residual_data_len = 0;
6113
6114         if (ipr_cmd->ioa_cfg->sis64)
6115                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6116                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6117         else {
6118                 ioarcb->write_ioadl_addr =
6119                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6120                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6121         }
6122 }
6123
6124 /**
6125  * __ipr_erp_request_sense - Send request sense to a device
6126  * @ipr_cmd:    ipr command struct
6127  *
6128  * This function sends a request sense to a device as a result
6129  * of a check condition.
6130  *
6131  * Return value:
6132  *      nothing
6133  **/
6134 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6135 {
6136         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6137         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6138
6139         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6140                 __ipr_erp_done(ipr_cmd);
6141                 return;
6142         }
6143
6144         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6145
6146         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6147         cmd_pkt->cdb[0] = REQUEST_SENSE;
6148         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6149         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6150         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6151         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6152
6153         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6154                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6155
6156         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6157                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6158 }
6159
6160 /**
6161  * ipr_erp_request_sense - Send request sense to a device
6162  * @ipr_cmd:    ipr command struct
6163  *
6164  * This function sends a request sense to a device as a result
6165  * of a check condition.
6166  *
6167  * Return value:
6168  *      nothing
6169  **/
6170 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6171 {
6172         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6173         unsigned long hrrq_flags;
6174
6175         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6176         __ipr_erp_request_sense(ipr_cmd);
6177         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6178 }
6179
6180 /**
6181  * ipr_erp_cancel_all - Send cancel all to a device
6182  * @ipr_cmd:    ipr command struct
6183  *
6184  * This function sends a cancel all to a device to clear the
6185  * queue. If we are running TCQ on the device, QERR is set to 1,
6186  * which means all outstanding ops have been dropped on the floor.
6187  * Cancel all will return them to us.
6188  *
6189  * Return value:
6190  *      nothing
6191  **/
6192 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6193 {
6194         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6195         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6196         struct ipr_cmd_pkt *cmd_pkt;
6197
6198         res->in_erp = 1;
6199
6200         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6201
6202         if (!scsi_cmd->device->simple_tags) {
6203                 __ipr_erp_request_sense(ipr_cmd);
6204                 return;
6205         }
6206
6207         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6208         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6209         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6210
6211         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6212                    IPR_CANCEL_ALL_TIMEOUT);
6213 }
6214
6215 /**
6216  * ipr_dump_ioasa - Dump contents of IOASA
6217  * @ioa_cfg:    ioa config struct
6218  * @ipr_cmd:    ipr command struct
6219  * @res:                resource entry struct
6220  *
6221  * This function is invoked by the interrupt handler when ops
6222  * fail. It will log the IOASA if appropriate. Only called
6223  * for GPDD ops.
6224  *
6225  * Return value:
6226  *      none
6227  **/
6228 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6229                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6230 {
6231         int i;
6232         u16 data_len;
6233         u32 ioasc, fd_ioasc;
6234         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6235         __be32 *ioasa_data = (__be32 *)ioasa;
6236         int error_index;
6237
6238         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6239         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6240
6241         if (0 == ioasc)
6242                 return;
6243
6244         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6245                 return;
6246
6247         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6248                 error_index = ipr_get_error(fd_ioasc);
6249         else
6250                 error_index = ipr_get_error(ioasc);
6251
6252         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6253                 /* Don't log an error if the IOA already logged one */
6254                 if (ioasa->hdr.ilid != 0)
6255                         return;
6256
6257                 if (!ipr_is_gscsi(res))
6258                         return;
6259
6260                 if (ipr_error_table[error_index].log_ioasa == 0)
6261                         return;
6262         }
6263
6264         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6265
6266         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6267         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6268                 data_len = sizeof(struct ipr_ioasa64);
6269         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6270                 data_len = sizeof(struct ipr_ioasa);
6271
6272         ipr_err("IOASA Dump:\n");
6273
6274         for (i = 0; i < data_len / 4; i += 4) {
6275                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6276                         be32_to_cpu(ioasa_data[i]),
6277                         be32_to_cpu(ioasa_data[i+1]),
6278                         be32_to_cpu(ioasa_data[i+2]),
6279                         be32_to_cpu(ioasa_data[i+3]));
6280         }
6281 }
6282
6283 /**
6284  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6285  * @ioasa:              IOASA
6286  * @sense_buf:  sense data buffer
6287  *
6288  * Return value:
6289  *      none
6290  **/
6291 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6292 {
6293         u32 failing_lba;
6294         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6295         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6296         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6297         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6298
6299         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6300
6301         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6302                 return;
6303
6304         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6305
6306         if (ipr_is_vset_device(res) &&
6307             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6308             ioasa->u.vset.failing_lba_hi != 0) {
6309                 sense_buf[0] = 0x72;
6310                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6311                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6312                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6313
6314                 sense_buf[7] = 12;
6315                 sense_buf[8] = 0;
6316                 sense_buf[9] = 0x0A;
6317                 sense_buf[10] = 0x80;
6318
6319                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6320
6321                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6322                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6323                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6324                 sense_buf[15] = failing_lba & 0x000000ff;
6325
6326                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6327
6328                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6329                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6330                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6331                 sense_buf[19] = failing_lba & 0x000000ff;
6332         } else {
6333                 sense_buf[0] = 0x70;
6334                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6335                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6336                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6337
6338                 /* Illegal request */
6339                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6340                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6341                         sense_buf[7] = 10;      /* additional length */
6342
6343                         /* IOARCB was in error */
6344                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6345                                 sense_buf[15] = 0xC0;
6346                         else    /* Parameter data was invalid */
6347                                 sense_buf[15] = 0x80;
6348
6349                         sense_buf[16] =
6350                             ((IPR_FIELD_POINTER_MASK &
6351                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6352                         sense_buf[17] =
6353                             (IPR_FIELD_POINTER_MASK &
6354                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6355                 } else {
6356                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6357                                 if (ipr_is_vset_device(res))
6358                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6359                                 else
6360                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6361
6362                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6363                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6364                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6365                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6366                                 sense_buf[6] = failing_lba & 0x000000ff;
6367                         }
6368
6369                         sense_buf[7] = 6;       /* additional length */
6370                 }
6371         }
6372 }
6373
6374 /**
6375  * ipr_get_autosense - Copy autosense data to sense buffer
6376  * @ipr_cmd:    ipr command struct
6377  *
6378  * This function copies the autosense buffer to the buffer
6379  * in the scsi_cmd, if there is autosense available.
6380  *
6381  * Return value:
6382  *      1 if autosense was available / 0 if not
6383  **/
6384 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6385 {
6386         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6387         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6388
6389         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6390                 return 0;
6391
6392         if (ipr_cmd->ioa_cfg->sis64)
6393                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6394                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6395                            SCSI_SENSE_BUFFERSIZE));
6396         else
6397                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6398                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6399                            SCSI_SENSE_BUFFERSIZE));
6400         return 1;
6401 }
6402
6403 /**
6404  * ipr_erp_start - Process an error response for a SCSI op
6405  * @ioa_cfg:    ioa config struct
6406  * @ipr_cmd:    ipr command struct
6407  *
6408  * This function determines whether or not to initiate ERP
6409  * on the affected device.
6410  *
6411  * Return value:
6412  *      nothing
6413  **/
6414 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6415                               struct ipr_cmnd *ipr_cmd)
6416 {
6417         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6418         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6419         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6420         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6421
6422         if (!res) {
6423                 __ipr_scsi_eh_done(ipr_cmd);
6424                 return;
6425         }
6426
6427         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6428                 ipr_gen_sense(ipr_cmd);
6429
6430         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6431
6432         switch (masked_ioasc) {
6433         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6434                 if (ipr_is_naca_model(res))
6435                         scsi_cmd->result |= (DID_ABORT << 16);
6436                 else
6437                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6438                 break;
6439         case IPR_IOASC_IR_RESOURCE_HANDLE:
6440         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6441                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6442                 break;
6443         case IPR_IOASC_HW_SEL_TIMEOUT:
6444                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6445                 if (!ipr_is_naca_model(res))
6446                         res->needs_sync_complete = 1;
6447                 break;
6448         case IPR_IOASC_SYNC_REQUIRED:
6449                 if (!res->in_erp)
6450                         res->needs_sync_complete = 1;
6451                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6452                 break;
6453         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6454         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6455                 /*
6456                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6457                  * so SCSI mid-layer and upper layers handle it accordingly.
6458                  */
6459                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6460                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6461                 break;
6462         case IPR_IOASC_BUS_WAS_RESET:
6463         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6464                 /*
6465                  * Report the bus reset and ask for a retry. The device
6466                  * will give CC/UA the next command.
6467                  */
6468                 if (!res->resetting_device)
6469                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6470                 scsi_cmd->result |= (DID_ERROR << 16);
6471                 if (!ipr_is_naca_model(res))
6472                         res->needs_sync_complete = 1;
6473                 break;
6474         case IPR_IOASC_HW_DEV_BUS_STATUS:
6475                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6476                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6477                         if (!ipr_get_autosense(ipr_cmd)) {
6478                                 if (!ipr_is_naca_model(res)) {
6479                                         ipr_erp_cancel_all(ipr_cmd);
6480                                         return;
6481                                 }
6482                         }
6483                 }
6484                 if (!ipr_is_naca_model(res))
6485                         res->needs_sync_complete = 1;
6486                 break;
6487         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6488                 break;
6489         case IPR_IOASC_IR_NON_OPTIMIZED:
6490                 if (res->raw_mode) {
6491                         res->raw_mode = 0;
6492                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6493                 } else
6494                         scsi_cmd->result |= (DID_ERROR << 16);
6495                 break;
6496         default:
6497                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6498                         scsi_cmd->result |= (DID_ERROR << 16);
6499                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6500                         res->needs_sync_complete = 1;
6501                 break;
6502         }
6503
6504         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6505         scsi_cmd->scsi_done(scsi_cmd);
6506         if (ipr_cmd->eh_comp)
6507                 complete(ipr_cmd->eh_comp);
6508         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6509 }
6510
6511 /**
6512  * ipr_scsi_done - mid-layer done function
6513  * @ipr_cmd:    ipr command struct
6514  *
6515  * This function is invoked by the interrupt handler for
6516  * ops generated by the SCSI mid-layer
6517  *
6518  * Return value:
6519  *      none
6520  **/
6521 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6522 {
6523         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6524         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6525         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6526         unsigned long lock_flags;
6527
6528         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6529
6530         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6531                 scsi_dma_unmap(scsi_cmd);
6532
6533                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6534                 scsi_cmd->scsi_done(scsi_cmd);
6535                 if (ipr_cmd->eh_comp)
6536                         complete(ipr_cmd->eh_comp);
6537                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6538                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6539         } else {
6540                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6541                 spin_lock(&ipr_cmd->hrrq->_lock);
6542                 ipr_erp_start(ioa_cfg, ipr_cmd);
6543                 spin_unlock(&ipr_cmd->hrrq->_lock);
6544                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6545         }
6546 }
6547
6548 /**
6549  * ipr_queuecommand - Queue a mid-layer request
6550  * @shost:              scsi host struct
6551  * @scsi_cmd:   scsi command struct
6552  *
6553  * This function queues a request generated by the mid-layer.
6554  *
6555  * Return value:
6556  *      0 on success
6557  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6558  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6559  **/
6560 static int ipr_queuecommand(struct Scsi_Host *shost,
6561                             struct scsi_cmnd *scsi_cmd)
6562 {
6563         struct ipr_ioa_cfg *ioa_cfg;
6564         struct ipr_resource_entry *res;
6565         struct ipr_ioarcb *ioarcb;
6566         struct ipr_cmnd *ipr_cmd;
6567         unsigned long hrrq_flags, lock_flags;
6568         int rc;
6569         struct ipr_hrr_queue *hrrq;
6570         int hrrq_id;
6571
6572         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6573
6574         scsi_cmd->result = (DID_OK << 16);
6575         res = scsi_cmd->device->hostdata;
6576
6577         if (ipr_is_gata(res) && res->sata_port) {
6578                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6579                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6580                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6581                 return rc;
6582         }
6583
6584         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6585         hrrq = &ioa_cfg->hrrq[hrrq_id];
6586
6587         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6588         /*
6589          * We are currently blocking all devices due to a host reset
6590          * We have told the host to stop giving us new requests, but
6591          * ERP ops don't count. FIXME
6592          */
6593         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6594                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6595                 return SCSI_MLQUEUE_HOST_BUSY;
6596         }
6597
6598         /*
6599          * FIXME - Create scsi_set_host_offline interface
6600          *  and the ioa_is_dead check can be removed
6601          */
6602         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6603                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6604                 goto err_nodev;
6605         }
6606
6607         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6608         if (ipr_cmd == NULL) {
6609                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6610                 return SCSI_MLQUEUE_HOST_BUSY;
6611         }
6612         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6613
6614         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6615         ioarcb = &ipr_cmd->ioarcb;
6616
6617         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6618         ipr_cmd->scsi_cmd = scsi_cmd;
6619         ipr_cmd->done = ipr_scsi_eh_done;
6620
6621         if (ipr_is_gscsi(res)) {
6622                 if (scsi_cmd->underflow == 0)
6623                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6624
6625                 if (res->reset_occurred) {
6626                         res->reset_occurred = 0;
6627                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6628                 }
6629         }
6630
6631         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6632                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6633
6634                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6635                 if (scsi_cmd->flags & SCMD_TAGGED)
6636                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6637                 else
6638                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6639         }
6640
6641         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6642             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6643                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6644         }
6645         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6646                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6647
6648                 if (scsi_cmd->underflow == 0)
6649                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6650         }
6651
6652         if (ioa_cfg->sis64)
6653                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6654         else
6655                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6656
6657         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6658         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6659                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6660                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6661                 if (!rc)
6662                         scsi_dma_unmap(scsi_cmd);
6663                 return SCSI_MLQUEUE_HOST_BUSY;
6664         }
6665
6666         if (unlikely(hrrq->ioa_is_dead)) {
6667                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6668                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6669                 scsi_dma_unmap(scsi_cmd);
6670                 goto err_nodev;
6671         }
6672
6673         ioarcb->res_handle = res->res_handle;
6674         if (res->needs_sync_complete) {
6675                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6676                 res->needs_sync_complete = 0;
6677         }
6678         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6679         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6680         ipr_send_command(ipr_cmd);
6681         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6682         return 0;
6683
6684 err_nodev:
6685         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6686         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6687         scsi_cmd->result = (DID_NO_CONNECT << 16);
6688         scsi_cmd->scsi_done(scsi_cmd);
6689         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6690         return 0;
6691 }
6692
6693 /**
6694  * ipr_ioctl - IOCTL handler
6695  * @sdev:       scsi device struct
6696  * @cmd:        IOCTL cmd
6697  * @arg:        IOCTL arg
6698  *
6699  * Return value:
6700  *      0 on success / other on failure
6701  **/
6702 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6703 {
6704         struct ipr_resource_entry *res;
6705
6706         res = (struct ipr_resource_entry *)sdev->hostdata;
6707         if (res && ipr_is_gata(res)) {
6708                 if (cmd == HDIO_GET_IDENTITY)
6709                         return -ENOTTY;
6710                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6711         }
6712
6713         return -EINVAL;
6714 }
6715
6716 /**
6717  * ipr_info - Get information about the card/driver
6718  * @scsi_host:  scsi host struct
6719  *
6720  * Return value:
6721  *      pointer to buffer with description string
6722  **/
6723 static const char *ipr_ioa_info(struct Scsi_Host *host)
6724 {
6725         static char buffer[512];
6726         struct ipr_ioa_cfg *ioa_cfg;
6727         unsigned long lock_flags = 0;
6728
6729         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6730
6731         spin_lock_irqsave(host->host_lock, lock_flags);
6732         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6733         spin_unlock_irqrestore(host->host_lock, lock_flags);
6734
6735         return buffer;
6736 }
6737
6738 static struct scsi_host_template driver_template = {
6739         .module = THIS_MODULE,
6740         .name = "IPR",
6741         .info = ipr_ioa_info,
6742         .ioctl = ipr_ioctl,
6743         .queuecommand = ipr_queuecommand,
6744         .eh_abort_handler = ipr_eh_abort,
6745         .eh_device_reset_handler = ipr_eh_dev_reset,
6746         .eh_host_reset_handler = ipr_eh_host_reset,
6747         .slave_alloc = ipr_slave_alloc,
6748         .slave_configure = ipr_slave_configure,
6749         .slave_destroy = ipr_slave_destroy,
6750         .scan_finished = ipr_scan_finished,
6751         .target_alloc = ipr_target_alloc,
6752         .target_destroy = ipr_target_destroy,
6753         .change_queue_depth = ipr_change_queue_depth,
6754         .bios_param = ipr_biosparam,
6755         .can_queue = IPR_MAX_COMMANDS,
6756         .this_id = -1,
6757         .sg_tablesize = IPR_MAX_SGLIST,
6758         .max_sectors = IPR_IOA_MAX_SECTORS,
6759         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6760         .use_clustering = ENABLE_CLUSTERING,
6761         .shost_attrs = ipr_ioa_attrs,
6762         .sdev_attrs = ipr_dev_attrs,
6763         .proc_name = IPR_NAME,
6764 };
6765
6766 /**
6767  * ipr_ata_phy_reset - libata phy_reset handler
6768  * @ap:         ata port to reset
6769  *
6770  **/
6771 static void ipr_ata_phy_reset(struct ata_port *ap)
6772 {
6773         unsigned long flags;
6774         struct ipr_sata_port *sata_port = ap->private_data;
6775         struct ipr_resource_entry *res = sata_port->res;
6776         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6777         int rc;
6778
6779         ENTER;
6780         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6781         while (ioa_cfg->in_reset_reload) {
6782                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6783                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6784                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6785         }
6786
6787         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6788                 goto out_unlock;
6789
6790         rc = ipr_device_reset(ioa_cfg, res);
6791
6792         if (rc) {
6793                 ap->link.device[0].class = ATA_DEV_NONE;
6794                 goto out_unlock;
6795         }
6796
6797         ap->link.device[0].class = res->ata_class;
6798         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6799                 ap->link.device[0].class = ATA_DEV_NONE;
6800
6801 out_unlock:
6802         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6803         LEAVE;
6804 }
6805
6806 /**
6807  * ipr_ata_post_internal - Cleanup after an internal command
6808  * @qc: ATA queued command
6809  *
6810  * Return value:
6811  *      none
6812  **/
6813 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6814 {
6815         struct ipr_sata_port *sata_port = qc->ap->private_data;
6816         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6817         struct ipr_cmnd *ipr_cmd;
6818         struct ipr_hrr_queue *hrrq;
6819         unsigned long flags;
6820
6821         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6822         while (ioa_cfg->in_reset_reload) {
6823                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6824                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6825                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6826         }
6827
6828         for_each_hrrq(hrrq, ioa_cfg) {
6829                 spin_lock(&hrrq->_lock);
6830                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6831                         if (ipr_cmd->qc == qc) {
6832                                 ipr_device_reset(ioa_cfg, sata_port->res);
6833                                 break;
6834                         }
6835                 }
6836                 spin_unlock(&hrrq->_lock);
6837         }
6838         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6839 }
6840
6841 /**
6842  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6843  * @regs:       destination
6844  * @tf: source ATA taskfile
6845  *
6846  * Return value:
6847  *      none
6848  **/
6849 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6850                              struct ata_taskfile *tf)
6851 {
6852         regs->feature = tf->feature;
6853         regs->nsect = tf->nsect;
6854         regs->lbal = tf->lbal;
6855         regs->lbam = tf->lbam;
6856         regs->lbah = tf->lbah;
6857         regs->device = tf->device;
6858         regs->command = tf->command;
6859         regs->hob_feature = tf->hob_feature;
6860         regs->hob_nsect = tf->hob_nsect;
6861         regs->hob_lbal = tf->hob_lbal;
6862         regs->hob_lbam = tf->hob_lbam;
6863         regs->hob_lbah = tf->hob_lbah;
6864         regs->ctl = tf->ctl;
6865 }
6866
6867 /**
6868  * ipr_sata_done - done function for SATA commands
6869  * @ipr_cmd:    ipr command struct
6870  *
6871  * This function is invoked by the interrupt handler for
6872  * ops generated by the SCSI mid-layer to SATA devices
6873  *
6874  * Return value:
6875  *      none
6876  **/
6877 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6878 {
6879         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6880         struct ata_queued_cmd *qc = ipr_cmd->qc;
6881         struct ipr_sata_port *sata_port = qc->ap->private_data;
6882         struct ipr_resource_entry *res = sata_port->res;
6883         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6884
6885         spin_lock(&ipr_cmd->hrrq->_lock);
6886         if (ipr_cmd->ioa_cfg->sis64)
6887                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6888                        sizeof(struct ipr_ioasa_gata));
6889         else
6890                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6891                        sizeof(struct ipr_ioasa_gata));
6892         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6893
6894         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6895                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6896
6897         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6898                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6899         else
6900                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6901         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6902         spin_unlock(&ipr_cmd->hrrq->_lock);
6903         ata_qc_complete(qc);
6904 }
6905
6906 /**
6907  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6908  * @ipr_cmd:    ipr command struct
6909  * @qc:         ATA queued command
6910  *
6911  **/
6912 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6913                                   struct ata_queued_cmd *qc)
6914 {
6915         u32 ioadl_flags = 0;
6916         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6917         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6918         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6919         int len = qc->nbytes;
6920         struct scatterlist *sg;
6921         unsigned int si;
6922         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6923
6924         if (len == 0)
6925                 return;
6926
6927         if (qc->dma_dir == DMA_TO_DEVICE) {
6928                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6929                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6930         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6931                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6932
6933         ioarcb->data_transfer_length = cpu_to_be32(len);
6934         ioarcb->ioadl_len =
6935                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6936         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6937                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6938
6939         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6940                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6941                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6942                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6943
6944                 last_ioadl64 = ioadl64;
6945                 ioadl64++;
6946         }
6947
6948         if (likely(last_ioadl64))
6949                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6950 }
6951
6952 /**
6953  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6954  * @ipr_cmd:    ipr command struct
6955  * @qc:         ATA queued command
6956  *
6957  **/
6958 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6959                                 struct ata_queued_cmd *qc)
6960 {
6961         u32 ioadl_flags = 0;
6962         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6963         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6964         struct ipr_ioadl_desc *last_ioadl = NULL;
6965         int len = qc->nbytes;
6966         struct scatterlist *sg;
6967         unsigned int si;
6968
6969         if (len == 0)
6970                 return;
6971
6972         if (qc->dma_dir == DMA_TO_DEVICE) {
6973                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6974                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6975                 ioarcb->data_transfer_length = cpu_to_be32(len);
6976                 ioarcb->ioadl_len =
6977                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6978         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6979                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6980                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6981                 ioarcb->read_ioadl_len =
6982                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6983         }
6984
6985         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6986                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6987                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6988
6989                 last_ioadl = ioadl;
6990                 ioadl++;
6991         }
6992
6993         if (likely(last_ioadl))
6994                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6995 }
6996
6997 /**
6998  * ipr_qc_defer - Get a free ipr_cmd
6999  * @qc: queued command
7000  *
7001  * Return value:
7002  *      0 if success
7003  **/
7004 static int ipr_qc_defer(struct ata_queued_cmd *qc)
7005 {
7006         struct ata_port *ap = qc->ap;
7007         struct ipr_sata_port *sata_port = ap->private_data;
7008         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7009         struct ipr_cmnd *ipr_cmd;
7010         struct ipr_hrr_queue *hrrq;
7011         int hrrq_id;
7012
7013         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7014         hrrq = &ioa_cfg->hrrq[hrrq_id];
7015
7016         qc->lldd_task = NULL;
7017         spin_lock(&hrrq->_lock);
7018         if (unlikely(hrrq->ioa_is_dead)) {
7019                 spin_unlock(&hrrq->_lock);
7020                 return 0;
7021         }
7022
7023         if (unlikely(!hrrq->allow_cmds)) {
7024                 spin_unlock(&hrrq->_lock);
7025                 return ATA_DEFER_LINK;
7026         }
7027
7028         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7029         if (ipr_cmd == NULL) {
7030                 spin_unlock(&hrrq->_lock);
7031                 return ATA_DEFER_LINK;
7032         }
7033
7034         qc->lldd_task = ipr_cmd;
7035         spin_unlock(&hrrq->_lock);
7036         return 0;
7037 }
7038
7039 /**
7040  * ipr_qc_issue - Issue a SATA qc to a device
7041  * @qc: queued command
7042  *
7043  * Return value:
7044  *      0 if success
7045  **/
7046 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7047 {
7048         struct ata_port *ap = qc->ap;
7049         struct ipr_sata_port *sata_port = ap->private_data;
7050         struct ipr_resource_entry *res = sata_port->res;
7051         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7052         struct ipr_cmnd *ipr_cmd;
7053         struct ipr_ioarcb *ioarcb;
7054         struct ipr_ioarcb_ata_regs *regs;
7055
7056         if (qc->lldd_task == NULL)
7057                 ipr_qc_defer(qc);
7058
7059         ipr_cmd = qc->lldd_task;
7060         if (ipr_cmd == NULL)
7061                 return AC_ERR_SYSTEM;
7062
7063         qc->lldd_task = NULL;
7064         spin_lock(&ipr_cmd->hrrq->_lock);
7065         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7066                         ipr_cmd->hrrq->ioa_is_dead)) {
7067                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7068                 spin_unlock(&ipr_cmd->hrrq->_lock);
7069                 return AC_ERR_SYSTEM;
7070         }
7071
7072         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7073         ioarcb = &ipr_cmd->ioarcb;
7074
7075         if (ioa_cfg->sis64) {
7076                 regs = &ipr_cmd->i.ata_ioadl.regs;
7077                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7078         } else
7079                 regs = &ioarcb->u.add_data.u.regs;
7080
7081         memset(regs, 0, sizeof(*regs));
7082         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7083
7084         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7085         ipr_cmd->qc = qc;
7086         ipr_cmd->done = ipr_sata_done;
7087         ipr_cmd->ioarcb.res_handle = res->res_handle;
7088         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7089         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7090         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7091         ipr_cmd->dma_use_sg = qc->n_elem;
7092
7093         if (ioa_cfg->sis64)
7094                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7095         else
7096                 ipr_build_ata_ioadl(ipr_cmd, qc);
7097
7098         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7099         ipr_copy_sata_tf(regs, &qc->tf);
7100         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7101         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7102
7103         switch (qc->tf.protocol) {
7104         case ATA_PROT_NODATA:
7105         case ATA_PROT_PIO:
7106                 break;
7107
7108         case ATA_PROT_DMA:
7109                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7110                 break;
7111
7112         case ATAPI_PROT_PIO:
7113         case ATAPI_PROT_NODATA:
7114                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7115                 break;
7116
7117         case ATAPI_PROT_DMA:
7118                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7119                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7120                 break;
7121
7122         default:
7123                 WARN_ON(1);
7124                 spin_unlock(&ipr_cmd->hrrq->_lock);
7125                 return AC_ERR_INVALID;
7126         }
7127
7128         ipr_send_command(ipr_cmd);
7129         spin_unlock(&ipr_cmd->hrrq->_lock);
7130
7131         return 0;
7132 }
7133
7134 /**
7135  * ipr_qc_fill_rtf - Read result TF
7136  * @qc: ATA queued command
7137  *
7138  * Return value:
7139  *      true
7140  **/
7141 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7142 {
7143         struct ipr_sata_port *sata_port = qc->ap->private_data;
7144         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7145         struct ata_taskfile *tf = &qc->result_tf;
7146
7147         tf->feature = g->error;
7148         tf->nsect = g->nsect;
7149         tf->lbal = g->lbal;
7150         tf->lbam = g->lbam;
7151         tf->lbah = g->lbah;
7152         tf->device = g->device;
7153         tf->command = g->status;
7154         tf->hob_nsect = g->hob_nsect;
7155         tf->hob_lbal = g->hob_lbal;
7156         tf->hob_lbam = g->hob_lbam;
7157         tf->hob_lbah = g->hob_lbah;
7158
7159         return true;
7160 }
7161
7162 static struct ata_port_operations ipr_sata_ops = {
7163         .phy_reset = ipr_ata_phy_reset,
7164         .hardreset = ipr_sata_reset,
7165         .post_internal_cmd = ipr_ata_post_internal,
7166         .qc_prep = ata_noop_qc_prep,
7167         .qc_defer = ipr_qc_defer,
7168         .qc_issue = ipr_qc_issue,
7169         .qc_fill_rtf = ipr_qc_fill_rtf,
7170         .port_start = ata_sas_port_start,
7171         .port_stop = ata_sas_port_stop
7172 };
7173
7174 static struct ata_port_info sata_port_info = {
7175         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7176                           ATA_FLAG_SAS_HOST,
7177         .pio_mask       = ATA_PIO4_ONLY,
7178         .mwdma_mask     = ATA_MWDMA2,
7179         .udma_mask      = ATA_UDMA6,
7180         .port_ops       = &ipr_sata_ops
7181 };
7182
7183 #ifdef CONFIG_PPC_PSERIES
7184 static const u16 ipr_blocked_processors[] = {
7185         PVR_NORTHSTAR,
7186         PVR_PULSAR,
7187         PVR_POWER4,
7188         PVR_ICESTAR,
7189         PVR_SSTAR,
7190         PVR_POWER4p,
7191         PVR_630,
7192         PVR_630p
7193 };
7194
7195 /**
7196  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7197  * @ioa_cfg:    ioa cfg struct
7198  *
7199  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7200  * certain pSeries hardware. This function determines if the given
7201  * adapter is in one of these confgurations or not.
7202  *
7203  * Return value:
7204  *      1 if adapter is not supported / 0 if adapter is supported
7205  **/
7206 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7207 {
7208         int i;
7209
7210         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7211                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7212                         if (pvr_version_is(ipr_blocked_processors[i]))
7213                                 return 1;
7214                 }
7215         }
7216         return 0;
7217 }
7218 #else
7219 #define ipr_invalid_adapter(ioa_cfg) 0
7220 #endif
7221
7222 /**
7223  * ipr_ioa_bringdown_done - IOA bring down completion.
7224  * @ipr_cmd:    ipr command struct
7225  *
7226  * This function processes the completion of an adapter bring down.
7227  * It wakes any reset sleepers.
7228  *
7229  * Return value:
7230  *      IPR_RC_JOB_RETURN
7231  **/
7232 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7233 {
7234         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7235         int i;
7236
7237         ENTER;
7238         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7239                 ipr_trace;
7240                 ioa_cfg->scsi_unblock = 1;
7241                 schedule_work(&ioa_cfg->work_q);
7242         }
7243
7244         ioa_cfg->in_reset_reload = 0;
7245         ioa_cfg->reset_retries = 0;
7246         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7247                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7248                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7249                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7250         }
7251         wmb();
7252
7253         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7254         wake_up_all(&ioa_cfg->reset_wait_q);
7255         LEAVE;
7256
7257         return IPR_RC_JOB_RETURN;
7258 }
7259
7260 /**
7261  * ipr_ioa_reset_done - IOA reset completion.
7262  * @ipr_cmd:    ipr command struct
7263  *
7264  * This function processes the completion of an adapter reset.
7265  * It schedules any necessary mid-layer add/removes and
7266  * wakes any reset sleepers.
7267  *
7268  * Return value:
7269  *      IPR_RC_JOB_RETURN
7270  **/
7271 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7272 {
7273         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7274         struct ipr_resource_entry *res;
7275         int j;
7276
7277         ENTER;
7278         ioa_cfg->in_reset_reload = 0;
7279         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7280                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7281                 ioa_cfg->hrrq[j].allow_cmds = 1;
7282                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7283         }
7284         wmb();
7285         ioa_cfg->reset_cmd = NULL;
7286         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7287
7288         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7289                 if (res->add_to_ml || res->del_from_ml) {
7290                         ipr_trace;
7291                         break;
7292                 }
7293         }
7294         schedule_work(&ioa_cfg->work_q);
7295
7296         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7297                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7298                 if (j < IPR_NUM_LOG_HCAMS)
7299                         ipr_send_hcam(ioa_cfg,
7300                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7301                                 ioa_cfg->hostrcb[j]);
7302                 else
7303                         ipr_send_hcam(ioa_cfg,
7304                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7305                                 ioa_cfg->hostrcb[j]);
7306         }
7307
7308         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7309         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7310
7311         ioa_cfg->reset_retries = 0;
7312         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7313         wake_up_all(&ioa_cfg->reset_wait_q);
7314
7315         ioa_cfg->scsi_unblock = 1;
7316         schedule_work(&ioa_cfg->work_q);
7317         LEAVE;
7318         return IPR_RC_JOB_RETURN;
7319 }
7320
7321 /**
7322  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7323  * @supported_dev:      supported device struct
7324  * @vpids:                      vendor product id struct
7325  *
7326  * Return value:
7327  *      none
7328  **/
7329 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7330                                  struct ipr_std_inq_vpids *vpids)
7331 {
7332         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7333         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7334         supported_dev->num_records = 1;
7335         supported_dev->data_length =
7336                 cpu_to_be16(sizeof(struct ipr_supported_device));
7337         supported_dev->reserved = 0;
7338 }
7339
7340 /**
7341  * ipr_set_supported_devs - Send Set Supported Devices for a device
7342  * @ipr_cmd:    ipr command struct
7343  *
7344  * This function sends a Set Supported Devices to the adapter
7345  *
7346  * Return value:
7347  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7348  **/
7349 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7350 {
7351         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7352         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7353         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7354         struct ipr_resource_entry *res = ipr_cmd->u.res;
7355
7356         ipr_cmd->job_step = ipr_ioa_reset_done;
7357
7358         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7359                 if (!ipr_is_scsi_disk(res))
7360                         continue;
7361
7362                 ipr_cmd->u.res = res;
7363                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7364
7365                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7366                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7367                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7368
7369                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7370                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7371                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7372                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7373
7374                 ipr_init_ioadl(ipr_cmd,
7375                                ioa_cfg->vpd_cbs_dma +
7376                                  offsetof(struct ipr_misc_cbs, supp_dev),
7377                                sizeof(struct ipr_supported_device),
7378                                IPR_IOADL_FLAGS_WRITE_LAST);
7379
7380                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7381                            IPR_SET_SUP_DEVICE_TIMEOUT);
7382
7383                 if (!ioa_cfg->sis64)
7384                         ipr_cmd->job_step = ipr_set_supported_devs;
7385                 LEAVE;
7386                 return IPR_RC_JOB_RETURN;
7387         }
7388
7389         LEAVE;
7390         return IPR_RC_JOB_CONTINUE;
7391 }
7392
7393 /**
7394  * ipr_get_mode_page - Locate specified mode page
7395  * @mode_pages: mode page buffer
7396  * @page_code:  page code to find
7397  * @len:                minimum required length for mode page
7398  *
7399  * Return value:
7400  *      pointer to mode page / NULL on failure
7401  **/
7402 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7403                                u32 page_code, u32 len)
7404 {
7405         struct ipr_mode_page_hdr *mode_hdr;
7406         u32 page_length;
7407         u32 length;
7408
7409         if (!mode_pages || (mode_pages->hdr.length == 0))
7410                 return NULL;
7411
7412         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7413         mode_hdr = (struct ipr_mode_page_hdr *)
7414                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7415
7416         while (length) {
7417                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7418                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7419                                 return mode_hdr;
7420                         break;
7421                 } else {
7422                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7423                                        mode_hdr->page_length);
7424                         length -= page_length;
7425                         mode_hdr = (struct ipr_mode_page_hdr *)
7426                                 ((unsigned long)mode_hdr + page_length);
7427                 }
7428         }
7429         return NULL;
7430 }
7431
7432 /**
7433  * ipr_check_term_power - Check for term power errors
7434  * @ioa_cfg:    ioa config struct
7435  * @mode_pages: IOAFP mode pages buffer
7436  *
7437  * Check the IOAFP's mode page 28 for term power errors
7438  *
7439  * Return value:
7440  *      nothing
7441  **/
7442 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7443                                  struct ipr_mode_pages *mode_pages)
7444 {
7445         int i;
7446         int entry_length;
7447         struct ipr_dev_bus_entry *bus;
7448         struct ipr_mode_page28 *mode_page;
7449
7450         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7451                                       sizeof(struct ipr_mode_page28));
7452
7453         entry_length = mode_page->entry_length;
7454
7455         bus = mode_page->bus;
7456
7457         for (i = 0; i < mode_page->num_entries; i++) {
7458                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7459                         dev_err(&ioa_cfg->pdev->dev,
7460                                 "Term power is absent on scsi bus %d\n",
7461                                 bus->res_addr.bus);
7462                 }
7463
7464                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7465         }
7466 }
7467
7468 /**
7469  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7470  * @ioa_cfg:    ioa config struct
7471  *
7472  * Looks through the config table checking for SES devices. If
7473  * the SES device is in the SES table indicating a maximum SCSI
7474  * bus speed, the speed is limited for the bus.
7475  *
7476  * Return value:
7477  *      none
7478  **/
7479 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7480 {
7481         u32 max_xfer_rate;
7482         int i;
7483
7484         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7485                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7486                                                        ioa_cfg->bus_attr[i].bus_width);
7487
7488                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7489                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7490         }
7491 }
7492
7493 /**
7494  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7495  * @ioa_cfg:    ioa config struct
7496  * @mode_pages: mode page 28 buffer
7497  *
7498  * Updates mode page 28 based on driver configuration
7499  *
7500  * Return value:
7501  *      none
7502  **/
7503 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7504                                           struct ipr_mode_pages *mode_pages)
7505 {
7506         int i, entry_length;
7507         struct ipr_dev_bus_entry *bus;
7508         struct ipr_bus_attributes *bus_attr;
7509         struct ipr_mode_page28 *mode_page;
7510
7511         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7512                                       sizeof(struct ipr_mode_page28));
7513
7514         entry_length = mode_page->entry_length;
7515
7516         /* Loop for each device bus entry */
7517         for (i = 0, bus = mode_page->bus;
7518              i < mode_page->num_entries;
7519              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7520                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7521                         dev_err(&ioa_cfg->pdev->dev,
7522                                 "Invalid resource address reported: 0x%08X\n",
7523                                 IPR_GET_PHYS_LOC(bus->res_addr));
7524                         continue;
7525                 }
7526
7527                 bus_attr = &ioa_cfg->bus_attr[i];
7528                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7529                 bus->bus_width = bus_attr->bus_width;
7530                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7531                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7532                 if (bus_attr->qas_enabled)
7533                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7534                 else
7535                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7536         }
7537 }
7538
7539 /**
7540  * ipr_build_mode_select - Build a mode select command
7541  * @ipr_cmd:    ipr command struct
7542  * @res_handle: resource handle to send command to
7543  * @parm:               Byte 2 of Mode Sense command
7544  * @dma_addr:   DMA buffer address
7545  * @xfer_len:   data transfer length
7546  *
7547  * Return value:
7548  *      none
7549  **/
7550 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7551                                   __be32 res_handle, u8 parm,
7552                                   dma_addr_t dma_addr, u8 xfer_len)
7553 {
7554         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7555
7556         ioarcb->res_handle = res_handle;
7557         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7558         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7559         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7560         ioarcb->cmd_pkt.cdb[1] = parm;
7561         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7562
7563         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7564 }
7565
7566 /**
7567  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7568  * @ipr_cmd:    ipr command struct
7569  *
7570  * This function sets up the SCSI bus attributes and sends
7571  * a Mode Select for Page 28 to activate them.
7572  *
7573  * Return value:
7574  *      IPR_RC_JOB_RETURN
7575  **/
7576 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7577 {
7578         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7579         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7580         int length;
7581
7582         ENTER;
7583         ipr_scsi_bus_speed_limit(ioa_cfg);
7584         ipr_check_term_power(ioa_cfg, mode_pages);
7585         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7586         length = mode_pages->hdr.length + 1;
7587         mode_pages->hdr.length = 0;
7588
7589         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7590                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7591                               length);
7592
7593         ipr_cmd->job_step = ipr_set_supported_devs;
7594         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7595                                     struct ipr_resource_entry, queue);
7596         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7597
7598         LEAVE;
7599         return IPR_RC_JOB_RETURN;
7600 }
7601
7602 /**
7603  * ipr_build_mode_sense - Builds a mode sense command
7604  * @ipr_cmd:    ipr command struct
7605  * @res:                resource entry struct
7606  * @parm:               Byte 2 of mode sense command
7607  * @dma_addr:   DMA address of mode sense buffer
7608  * @xfer_len:   Size of DMA buffer
7609  *
7610  * Return value:
7611  *      none
7612  **/
7613 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7614                                  __be32 res_handle,
7615                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7616 {
7617         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7618
7619         ioarcb->res_handle = res_handle;
7620         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7621         ioarcb->cmd_pkt.cdb[2] = parm;
7622         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7623         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7624
7625         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7626 }
7627
7628 /**
7629  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7630  * @ipr_cmd:    ipr command struct
7631  *
7632  * This function handles the failure of an IOA bringup command.
7633  *
7634  * Return value:
7635  *      IPR_RC_JOB_RETURN
7636  **/
7637 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7638 {
7639         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7640         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7641
7642         dev_err(&ioa_cfg->pdev->dev,
7643                 "0x%02X failed with IOASC: 0x%08X\n",
7644                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7645
7646         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7647         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7648         return IPR_RC_JOB_RETURN;
7649 }
7650
7651 /**
7652  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7653  * @ipr_cmd:    ipr command struct
7654  *
7655  * This function handles the failure of a Mode Sense to the IOAFP.
7656  * Some adapters do not handle all mode pages.
7657  *
7658  * Return value:
7659  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7660  **/
7661 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7662 {
7663         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7664         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7665
7666         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7667                 ipr_cmd->job_step = ipr_set_supported_devs;
7668                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7669                                             struct ipr_resource_entry, queue);
7670                 return IPR_RC_JOB_CONTINUE;
7671         }
7672
7673         return ipr_reset_cmd_failed(ipr_cmd);
7674 }
7675
7676 /**
7677  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7678  * @ipr_cmd:    ipr command struct
7679  *
7680  * This function send a Page 28 mode sense to the IOA to
7681  * retrieve SCSI bus attributes.
7682  *
7683  * Return value:
7684  *      IPR_RC_JOB_RETURN
7685  **/
7686 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7687 {
7688         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7689
7690         ENTER;
7691         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7692                              0x28, ioa_cfg->vpd_cbs_dma +
7693                              offsetof(struct ipr_misc_cbs, mode_pages),
7694                              sizeof(struct ipr_mode_pages));
7695
7696         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7697         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7698
7699         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7700
7701         LEAVE;
7702         return IPR_RC_JOB_RETURN;
7703 }
7704
7705 /**
7706  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7707  * @ipr_cmd:    ipr command struct
7708  *
7709  * This function enables dual IOA RAID support if possible.
7710  *
7711  * Return value:
7712  *      IPR_RC_JOB_RETURN
7713  **/
7714 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7715 {
7716         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7717         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7718         struct ipr_mode_page24 *mode_page;
7719         int length;
7720
7721         ENTER;
7722         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7723                                       sizeof(struct ipr_mode_page24));
7724
7725         if (mode_page)
7726                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7727
7728         length = mode_pages->hdr.length + 1;
7729         mode_pages->hdr.length = 0;
7730
7731         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7732                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7733                               length);
7734
7735         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7736         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7737
7738         LEAVE;
7739         return IPR_RC_JOB_RETURN;
7740 }
7741
7742 /**
7743  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7744  * @ipr_cmd:    ipr command struct
7745  *
7746  * This function handles the failure of a Mode Sense to the IOAFP.
7747  * Some adapters do not handle all mode pages.
7748  *
7749  * Return value:
7750  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7751  **/
7752 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7753 {
7754         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7755
7756         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7757                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7758                 return IPR_RC_JOB_CONTINUE;
7759         }
7760
7761         return ipr_reset_cmd_failed(ipr_cmd);
7762 }
7763
7764 /**
7765  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7766  * @ipr_cmd:    ipr command struct
7767  *
7768  * This function send a mode sense to the IOA to retrieve
7769  * the IOA Advanced Function Control mode page.
7770  *
7771  * Return value:
7772  *      IPR_RC_JOB_RETURN
7773  **/
7774 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7775 {
7776         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7777
7778         ENTER;
7779         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7780                              0x24, ioa_cfg->vpd_cbs_dma +
7781                              offsetof(struct ipr_misc_cbs, mode_pages),
7782                              sizeof(struct ipr_mode_pages));
7783
7784         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7785         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7786
7787         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7788
7789         LEAVE;
7790         return IPR_RC_JOB_RETURN;
7791 }
7792
7793 /**
7794  * ipr_init_res_table - Initialize the resource table
7795  * @ipr_cmd:    ipr command struct
7796  *
7797  * This function looks through the existing resource table, comparing
7798  * it with the config table. This function will take care of old/new
7799  * devices and schedule adding/removing them from the mid-layer
7800  * as appropriate.
7801  *
7802  * Return value:
7803  *      IPR_RC_JOB_CONTINUE
7804  **/
7805 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7806 {
7807         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7808         struct ipr_resource_entry *res, *temp;
7809         struct ipr_config_table_entry_wrapper cfgtew;
7810         int entries, found, flag, i;
7811         LIST_HEAD(old_res);
7812
7813         ENTER;
7814         if (ioa_cfg->sis64)
7815                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7816         else
7817                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7818
7819         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7820                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7821
7822         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7823                 list_move_tail(&res->queue, &old_res);
7824
7825         if (ioa_cfg->sis64)
7826                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7827         else
7828                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7829
7830         for (i = 0; i < entries; i++) {
7831                 if (ioa_cfg->sis64)
7832                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7833                 else
7834                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7835                 found = 0;
7836
7837                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7838                         if (ipr_is_same_device(res, &cfgtew)) {
7839                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7840                                 found = 1;
7841                                 break;
7842                         }
7843                 }
7844
7845                 if (!found) {
7846                         if (list_empty(&ioa_cfg->free_res_q)) {
7847                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7848                                 break;
7849                         }
7850
7851                         found = 1;
7852                         res = list_entry(ioa_cfg->free_res_q.next,
7853                                          struct ipr_resource_entry, queue);
7854                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7855                         ipr_init_res_entry(res, &cfgtew);
7856                         res->add_to_ml = 1;
7857                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7858                         res->sdev->allow_restart = 1;
7859
7860                 if (found)
7861                         ipr_update_res_entry(res, &cfgtew);
7862         }
7863
7864         list_for_each_entry_safe(res, temp, &old_res, queue) {
7865                 if (res->sdev) {
7866                         res->del_from_ml = 1;
7867                         res->res_handle = IPR_INVALID_RES_HANDLE;
7868                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7869                 }
7870         }
7871
7872         list_for_each_entry_safe(res, temp, &old_res, queue) {
7873                 ipr_clear_res_target(res);
7874                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7875         }
7876
7877         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7878                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7879         else
7880                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7881
7882         LEAVE;
7883         return IPR_RC_JOB_CONTINUE;
7884 }
7885
7886 /**
7887  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7888  * @ipr_cmd:    ipr command struct
7889  *
7890  * This function sends a Query IOA Configuration command
7891  * to the adapter to retrieve the IOA configuration table.
7892  *
7893  * Return value:
7894  *      IPR_RC_JOB_RETURN
7895  **/
7896 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7897 {
7898         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7899         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7900         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7901         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7902
7903         ENTER;
7904         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7905                 ioa_cfg->dual_raid = 1;
7906         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7907                  ucode_vpd->major_release, ucode_vpd->card_type,
7908                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7909         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7910         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7911
7912         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7913         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7914         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7915         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7916
7917         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7918                        IPR_IOADL_FLAGS_READ_LAST);
7919
7920         ipr_cmd->job_step = ipr_init_res_table;
7921
7922         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7923
7924         LEAVE;
7925         return IPR_RC_JOB_RETURN;
7926 }
7927
7928 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7929 {
7930         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7931
7932         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7933                 return IPR_RC_JOB_CONTINUE;
7934
7935         return ipr_reset_cmd_failed(ipr_cmd);
7936 }
7937
7938 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7939                                          __be32 res_handle, u8 sa_code)
7940 {
7941         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7942
7943         ioarcb->res_handle = res_handle;
7944         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7945         ioarcb->cmd_pkt.cdb[1] = sa_code;
7946         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7947 }
7948
7949 /**
7950  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7951  * action
7952  *
7953  * Return value:
7954  *      none
7955  **/
7956 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7957 {
7958         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7959         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7960         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7961
7962         ENTER;
7963
7964         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7965
7966         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7967                 ipr_build_ioa_service_action(ipr_cmd,
7968                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7969                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7970
7971                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7972
7973                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7974                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7975                            IPR_SET_SUP_DEVICE_TIMEOUT);
7976
7977                 LEAVE;
7978                 return IPR_RC_JOB_RETURN;
7979         }
7980
7981         LEAVE;
7982         return IPR_RC_JOB_CONTINUE;
7983 }
7984
7985 /**
7986  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7987  * @ipr_cmd:    ipr command struct
7988  *
7989  * This utility function sends an inquiry to the adapter.
7990  *
7991  * Return value:
7992  *      none
7993  **/
7994 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7995                               dma_addr_t dma_addr, u8 xfer_len)
7996 {
7997         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7998
7999         ENTER;
8000         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8001         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8002
8003         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8004         ioarcb->cmd_pkt.cdb[1] = flags;
8005         ioarcb->cmd_pkt.cdb[2] = page;
8006         ioarcb->cmd_pkt.cdb[4] = xfer_len;
8007
8008         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
8009
8010         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8011         LEAVE;
8012 }
8013
8014 /**
8015  * ipr_inquiry_page_supported - Is the given inquiry page supported
8016  * @page0:              inquiry page 0 buffer
8017  * @page:               page code.
8018  *
8019  * This function determines if the specified inquiry page is supported.
8020  *
8021  * Return value:
8022  *      1 if page is supported / 0 if not
8023  **/
8024 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8025 {
8026         int i;
8027
8028         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8029                 if (page0->page[i] == page)
8030                         return 1;
8031
8032         return 0;
8033 }
8034
8035 /**
8036  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8037  * @ipr_cmd:    ipr command struct
8038  *
8039  * This function sends a Page 0xC4 inquiry to the adapter
8040  * to retrieve software VPD information.
8041  *
8042  * Return value:
8043  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8044  **/
8045 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8046 {
8047         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8048         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8049         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8050
8051         ENTER;
8052         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8053         memset(pageC4, 0, sizeof(*pageC4));
8054
8055         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8056                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8057                                   (ioa_cfg->vpd_cbs_dma
8058                                    + offsetof(struct ipr_misc_cbs,
8059                                               pageC4_data)),
8060                                   sizeof(struct ipr_inquiry_pageC4));
8061                 return IPR_RC_JOB_RETURN;
8062         }
8063
8064         LEAVE;
8065         return IPR_RC_JOB_CONTINUE;
8066 }
8067
8068 /**
8069  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8070  * @ipr_cmd:    ipr command struct
8071  *
8072  * This function sends a Page 0xD0 inquiry to the adapter
8073  * to retrieve adapter capabilities.
8074  *
8075  * Return value:
8076  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8077  **/
8078 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8079 {
8080         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8081         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8082         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8083
8084         ENTER;
8085         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8086         memset(cap, 0, sizeof(*cap));
8087
8088         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8089                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8090                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8091                                   sizeof(struct ipr_inquiry_cap));
8092                 return IPR_RC_JOB_RETURN;
8093         }
8094
8095         LEAVE;
8096         return IPR_RC_JOB_CONTINUE;
8097 }
8098
8099 /**
8100  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8101  * @ipr_cmd:    ipr command struct
8102  *
8103  * This function sends a Page 3 inquiry to the adapter
8104  * to retrieve software VPD information.
8105  *
8106  * Return value:
8107  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8108  **/
8109 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8110 {
8111         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8112
8113         ENTER;
8114
8115         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8116
8117         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8118                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8119                           sizeof(struct ipr_inquiry_page3));
8120
8121         LEAVE;
8122         return IPR_RC_JOB_RETURN;
8123 }
8124
8125 /**
8126  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8127  * @ipr_cmd:    ipr command struct
8128  *
8129  * This function sends a Page 0 inquiry to the adapter
8130  * to retrieve supported inquiry pages.
8131  *
8132  * Return value:
8133  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8134  **/
8135 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8136 {
8137         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8138         char type[5];
8139
8140         ENTER;
8141
8142         /* Grab the type out of the VPD and store it away */
8143         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8144         type[4] = '\0';
8145         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8146
8147         if (ipr_invalid_adapter(ioa_cfg)) {
8148                 dev_err(&ioa_cfg->pdev->dev,
8149                         "Adapter not supported in this hardware configuration.\n");
8150
8151                 if (!ipr_testmode) {
8152                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8153                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8154                         list_add_tail(&ipr_cmd->queue,
8155                                         &ioa_cfg->hrrq->hrrq_free_q);
8156                         return IPR_RC_JOB_RETURN;
8157                 }
8158         }
8159
8160         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8161
8162         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8163                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8164                           sizeof(struct ipr_inquiry_page0));
8165
8166         LEAVE;
8167         return IPR_RC_JOB_RETURN;
8168 }
8169
8170 /**
8171  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8172  * @ipr_cmd:    ipr command struct
8173  *
8174  * This function sends a standard inquiry to the adapter.
8175  *
8176  * Return value:
8177  *      IPR_RC_JOB_RETURN
8178  **/
8179 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8180 {
8181         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8182
8183         ENTER;
8184         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8185
8186         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8187                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8188                           sizeof(struct ipr_ioa_vpd));
8189
8190         LEAVE;
8191         return IPR_RC_JOB_RETURN;
8192 }
8193
8194 /**
8195  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8196  * @ipr_cmd:    ipr command struct
8197  *
8198  * This function send an Identify Host Request Response Queue
8199  * command to establish the HRRQ with the adapter.
8200  *
8201  * Return value:
8202  *      IPR_RC_JOB_RETURN
8203  **/
8204 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8205 {
8206         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8207         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8208         struct ipr_hrr_queue *hrrq;
8209
8210         ENTER;
8211         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8212         if (ioa_cfg->identify_hrrq_index == 0)
8213                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8214
8215         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8216                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8217
8218                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8219                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8220
8221                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8222                 if (ioa_cfg->sis64)
8223                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8224
8225                 if (ioa_cfg->nvectors == 1)
8226                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8227                 else
8228                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8229
8230                 ioarcb->cmd_pkt.cdb[2] =
8231                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8232                 ioarcb->cmd_pkt.cdb[3] =
8233                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8234                 ioarcb->cmd_pkt.cdb[4] =
8235                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8236                 ioarcb->cmd_pkt.cdb[5] =
8237                         ((u64) hrrq->host_rrq_dma) & 0xff;
8238                 ioarcb->cmd_pkt.cdb[7] =
8239                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8240                 ioarcb->cmd_pkt.cdb[8] =
8241                         (sizeof(u32) * hrrq->size) & 0xff;
8242
8243                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8244                         ioarcb->cmd_pkt.cdb[9] =
8245                                         ioa_cfg->identify_hrrq_index;
8246
8247                 if (ioa_cfg->sis64) {
8248                         ioarcb->cmd_pkt.cdb[10] =
8249                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8250                         ioarcb->cmd_pkt.cdb[11] =
8251                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8252                         ioarcb->cmd_pkt.cdb[12] =
8253                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8254                         ioarcb->cmd_pkt.cdb[13] =
8255                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8256                 }
8257
8258                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8259                         ioarcb->cmd_pkt.cdb[14] =
8260                                         ioa_cfg->identify_hrrq_index;
8261
8262                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8263                            IPR_INTERNAL_TIMEOUT);
8264
8265                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8266                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8267
8268                 LEAVE;
8269                 return IPR_RC_JOB_RETURN;
8270         }
8271
8272         LEAVE;
8273         return IPR_RC_JOB_CONTINUE;
8274 }
8275
8276 /**
8277  * ipr_reset_timer_done - Adapter reset timer function
8278  * @ipr_cmd:    ipr command struct
8279  *
8280  * Description: This function is used in adapter reset processing
8281  * for timing events. If the reset_cmd pointer in the IOA
8282  * config struct is not this adapter's we are doing nested
8283  * resets and fail_all_ops will take care of freeing the
8284  * command block.
8285  *
8286  * Return value:
8287  *      none
8288  **/
8289 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8290 {
8291         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8292         unsigned long lock_flags = 0;
8293
8294         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8295
8296         if (ioa_cfg->reset_cmd == ipr_cmd) {
8297                 list_del(&ipr_cmd->queue);
8298                 ipr_cmd->done(ipr_cmd);
8299         }
8300
8301         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8302 }
8303
8304 /**
8305  * ipr_reset_start_timer - Start a timer for adapter reset job
8306  * @ipr_cmd:    ipr command struct
8307  * @timeout:    timeout value
8308  *
8309  * Description: This function is used in adapter reset processing
8310  * for timing events. If the reset_cmd pointer in the IOA
8311  * config struct is not this adapter's we are doing nested
8312  * resets and fail_all_ops will take care of freeing the
8313  * command block.
8314  *
8315  * Return value:
8316  *      none
8317  **/
8318 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8319                                   unsigned long timeout)
8320 {
8321
8322         ENTER;
8323         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8324         ipr_cmd->done = ipr_reset_ioa_job;
8325
8326         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8327         ipr_cmd->timer.expires = jiffies + timeout;
8328         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8329         add_timer(&ipr_cmd->timer);
8330 }
8331
8332 /**
8333  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8334  * @ioa_cfg:    ioa cfg struct
8335  *
8336  * Return value:
8337  *      nothing
8338  **/
8339 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8340 {
8341         struct ipr_hrr_queue *hrrq;
8342
8343         for_each_hrrq(hrrq, ioa_cfg) {
8344                 spin_lock(&hrrq->_lock);
8345                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8346
8347                 /* Initialize Host RRQ pointers */
8348                 hrrq->hrrq_start = hrrq->host_rrq;
8349                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8350                 hrrq->hrrq_curr = hrrq->hrrq_start;
8351                 hrrq->toggle_bit = 1;
8352                 spin_unlock(&hrrq->_lock);
8353         }
8354         wmb();
8355
8356         ioa_cfg->identify_hrrq_index = 0;
8357         if (ioa_cfg->hrrq_num == 1)
8358                 atomic_set(&ioa_cfg->hrrq_index, 0);
8359         else
8360                 atomic_set(&ioa_cfg->hrrq_index, 1);
8361
8362         /* Zero out config table */
8363         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8364 }
8365
8366 /**
8367  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8368  * @ipr_cmd:    ipr command struct
8369  *
8370  * Return value:
8371  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8372  **/
8373 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8374 {
8375         unsigned long stage, stage_time;
8376         u32 feedback;
8377         volatile u32 int_reg;
8378         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8379         u64 maskval = 0;
8380
8381         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8382         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8383         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8384
8385         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8386
8387         /* sanity check the stage_time value */
8388         if (stage_time == 0)
8389                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8390         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8391                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8392         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8393                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8394
8395         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8396                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8397                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8398                 stage_time = ioa_cfg->transop_timeout;
8399                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8400         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8401                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8402                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8403                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8404                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8405                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8406                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8407                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8408                         return IPR_RC_JOB_CONTINUE;
8409                 }
8410         }
8411
8412         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8413         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8414         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8415         ipr_cmd->done = ipr_reset_ioa_job;
8416         add_timer(&ipr_cmd->timer);
8417
8418         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8419
8420         return IPR_RC_JOB_RETURN;
8421 }
8422
8423 /**
8424  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8425  * @ipr_cmd:    ipr command struct
8426  *
8427  * This function reinitializes some control blocks and
8428  * enables destructive diagnostics on the adapter.
8429  *
8430  * Return value:
8431  *      IPR_RC_JOB_RETURN
8432  **/
8433 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8434 {
8435         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8436         volatile u32 int_reg;
8437         volatile u64 maskval;
8438         int i;
8439
8440         ENTER;
8441         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8442         ipr_init_ioa_mem(ioa_cfg);
8443
8444         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8445                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8446                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8447                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8448         }
8449         wmb();
8450         if (ioa_cfg->sis64) {
8451                 /* Set the adapter to the correct endian mode. */
8452                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8453                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8454         }
8455
8456         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8457
8458         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8459                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8460                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8461                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8462                 return IPR_RC_JOB_CONTINUE;
8463         }
8464
8465         /* Enable destructive diagnostics on IOA */
8466         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8467
8468         if (ioa_cfg->sis64) {
8469                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8470                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8471                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8472         } else
8473                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8474
8475         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8476
8477         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8478
8479         if (ioa_cfg->sis64) {
8480                 ipr_cmd->job_step = ipr_reset_next_stage;
8481                 return IPR_RC_JOB_CONTINUE;
8482         }
8483
8484         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8485         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8486         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8487         ipr_cmd->done = ipr_reset_ioa_job;
8488         add_timer(&ipr_cmd->timer);
8489         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8490
8491         LEAVE;
8492         return IPR_RC_JOB_RETURN;
8493 }
8494
8495 /**
8496  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8497  * @ipr_cmd:    ipr command struct
8498  *
8499  * This function is invoked when an adapter dump has run out
8500  * of processing time.
8501  *
8502  * Return value:
8503  *      IPR_RC_JOB_CONTINUE
8504  **/
8505 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8506 {
8507         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8508
8509         if (ioa_cfg->sdt_state == GET_DUMP)
8510                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8511         else if (ioa_cfg->sdt_state == READ_DUMP)
8512                 ioa_cfg->sdt_state = ABORT_DUMP;
8513
8514         ioa_cfg->dump_timeout = 1;
8515         ipr_cmd->job_step = ipr_reset_alert;
8516
8517         return IPR_RC_JOB_CONTINUE;
8518 }
8519
8520 /**
8521  * ipr_unit_check_no_data - Log a unit check/no data error log
8522  * @ioa_cfg:            ioa config struct
8523  *
8524  * Logs an error indicating the adapter unit checked, but for some
8525  * reason, we were unable to fetch the unit check buffer.
8526  *
8527  * Return value:
8528  *      nothing
8529  **/
8530 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8531 {
8532         ioa_cfg->errors_logged++;
8533         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8534 }
8535
8536 /**
8537  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8538  * @ioa_cfg:            ioa config struct
8539  *
8540  * Fetches the unit check buffer from the adapter by clocking the data
8541  * through the mailbox register.
8542  *
8543  * Return value:
8544  *      nothing
8545  **/
8546 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8547 {
8548         unsigned long mailbox;
8549         struct ipr_hostrcb *hostrcb;
8550         struct ipr_uc_sdt sdt;
8551         int rc, length;
8552         u32 ioasc;
8553
8554         mailbox = readl(ioa_cfg->ioa_mailbox);
8555
8556         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8557                 ipr_unit_check_no_data(ioa_cfg);
8558                 return;
8559         }
8560
8561         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8562         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8563                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8564
8565         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8566             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8567             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8568                 ipr_unit_check_no_data(ioa_cfg);
8569                 return;
8570         }
8571
8572         /* Find length of the first sdt entry (UC buffer) */
8573         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8574                 length = be32_to_cpu(sdt.entry[0].end_token);
8575         else
8576                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8577                           be32_to_cpu(sdt.entry[0].start_token)) &
8578                           IPR_FMT2_MBX_ADDR_MASK;
8579
8580         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8581                              struct ipr_hostrcb, queue);
8582         list_del_init(&hostrcb->queue);
8583         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8584
8585         rc = ipr_get_ldump_data_section(ioa_cfg,
8586                                         be32_to_cpu(sdt.entry[0].start_token),
8587                                         (__be32 *)&hostrcb->hcam,
8588                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8589
8590         if (!rc) {
8591                 ipr_handle_log_data(ioa_cfg, hostrcb);
8592                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8593                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8594                     ioa_cfg->sdt_state == GET_DUMP)
8595                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8596         } else
8597                 ipr_unit_check_no_data(ioa_cfg);
8598
8599         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8600 }
8601
8602 /**
8603  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8604  * @ipr_cmd:    ipr command struct
8605  *
8606  * Description: This function will call to get the unit check buffer.
8607  *
8608  * Return value:
8609  *      IPR_RC_JOB_RETURN
8610  **/
8611 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8612 {
8613         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8614
8615         ENTER;
8616         ioa_cfg->ioa_unit_checked = 0;
8617         ipr_get_unit_check_buffer(ioa_cfg);
8618         ipr_cmd->job_step = ipr_reset_alert;
8619         ipr_reset_start_timer(ipr_cmd, 0);
8620
8621         LEAVE;
8622         return IPR_RC_JOB_RETURN;
8623 }
8624
8625 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8626 {
8627         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8628
8629         ENTER;
8630
8631         if (ioa_cfg->sdt_state != GET_DUMP)
8632                 return IPR_RC_JOB_RETURN;
8633
8634         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8635             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8636              IPR_PCII_MAILBOX_STABLE)) {
8637
8638                 if (!ipr_cmd->u.time_left)
8639                         dev_err(&ioa_cfg->pdev->dev,
8640                                 "Timed out waiting for Mailbox register.\n");
8641
8642                 ioa_cfg->sdt_state = READ_DUMP;
8643                 ioa_cfg->dump_timeout = 0;
8644                 if (ioa_cfg->sis64)
8645                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8646                 else
8647                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8648                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8649                 schedule_work(&ioa_cfg->work_q);
8650
8651         } else {
8652                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8653                 ipr_reset_start_timer(ipr_cmd,
8654                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8655         }
8656
8657         LEAVE;
8658         return IPR_RC_JOB_RETURN;
8659 }
8660
8661 /**
8662  * ipr_reset_restore_cfg_space - Restore PCI config space.
8663  * @ipr_cmd:    ipr command struct
8664  *
8665  * Description: This function restores the saved PCI config space of
8666  * the adapter, fails all outstanding ops back to the callers, and
8667  * fetches the dump/unit check if applicable to this reset.
8668  *
8669  * Return value:
8670  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8671  **/
8672 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8673 {
8674         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8675         u32 int_reg;
8676
8677         ENTER;
8678         ioa_cfg->pdev->state_saved = true;
8679         pci_restore_state(ioa_cfg->pdev);
8680
8681         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8682                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8683                 return IPR_RC_JOB_CONTINUE;
8684         }
8685
8686         ipr_fail_all_ops(ioa_cfg);
8687
8688         if (ioa_cfg->sis64) {
8689                 /* Set the adapter to the correct endian mode. */
8690                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8691                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8692         }
8693
8694         if (ioa_cfg->ioa_unit_checked) {
8695                 if (ioa_cfg->sis64) {
8696                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8697                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8698                         return IPR_RC_JOB_RETURN;
8699                 } else {
8700                         ioa_cfg->ioa_unit_checked = 0;
8701                         ipr_get_unit_check_buffer(ioa_cfg);
8702                         ipr_cmd->job_step = ipr_reset_alert;
8703                         ipr_reset_start_timer(ipr_cmd, 0);
8704                         return IPR_RC_JOB_RETURN;
8705                 }
8706         }
8707
8708         if (ioa_cfg->in_ioa_bringdown) {
8709                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8710         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8711                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8712                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8713         } else {
8714                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8715         }
8716
8717         LEAVE;
8718         return IPR_RC_JOB_CONTINUE;
8719 }
8720
8721 /**
8722  * ipr_reset_bist_done - BIST has completed on the adapter.
8723  * @ipr_cmd:    ipr command struct
8724  *
8725  * Description: Unblock config space and resume the reset process.
8726  *
8727  * Return value:
8728  *      IPR_RC_JOB_CONTINUE
8729  **/
8730 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8731 {
8732         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8733
8734         ENTER;
8735         if (ioa_cfg->cfg_locked)
8736                 pci_cfg_access_unlock(ioa_cfg->pdev);
8737         ioa_cfg->cfg_locked = 0;
8738         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8739         LEAVE;
8740         return IPR_RC_JOB_CONTINUE;
8741 }
8742
8743 /**
8744  * ipr_reset_start_bist - Run BIST on the adapter.
8745  * @ipr_cmd:    ipr command struct
8746  *
8747  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8748  *
8749  * Return value:
8750  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8751  **/
8752 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8753 {
8754         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8755         int rc = PCIBIOS_SUCCESSFUL;
8756
8757         ENTER;
8758         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8759                 writel(IPR_UPROCI_SIS64_START_BIST,
8760                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8761         else
8762                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8763
8764         if (rc == PCIBIOS_SUCCESSFUL) {
8765                 ipr_cmd->job_step = ipr_reset_bist_done;
8766                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8767                 rc = IPR_RC_JOB_RETURN;
8768         } else {
8769                 if (ioa_cfg->cfg_locked)
8770                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8771                 ioa_cfg->cfg_locked = 0;
8772                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8773                 rc = IPR_RC_JOB_CONTINUE;
8774         }
8775
8776         LEAVE;
8777         return rc;
8778 }
8779
8780 /**
8781  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8782  * @ipr_cmd:    ipr command struct
8783  *
8784  * Description: This clears PCI reset to the adapter and delays two seconds.
8785  *
8786  * Return value:
8787  *      IPR_RC_JOB_RETURN
8788  **/
8789 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8790 {
8791         ENTER;
8792         ipr_cmd->job_step = ipr_reset_bist_done;
8793         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8794         LEAVE;
8795         return IPR_RC_JOB_RETURN;
8796 }
8797
8798 /**
8799  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8800  * @work:       work struct
8801  *
8802  * Description: This pulses warm reset to a slot.
8803  *
8804  **/
8805 static void ipr_reset_reset_work(struct work_struct *work)
8806 {
8807         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8808         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8809         struct pci_dev *pdev = ioa_cfg->pdev;
8810         unsigned long lock_flags = 0;
8811
8812         ENTER;
8813         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8814         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8815         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8816
8817         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8818         if (ioa_cfg->reset_cmd == ipr_cmd)
8819                 ipr_reset_ioa_job(ipr_cmd);
8820         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8821         LEAVE;
8822 }
8823
8824 /**
8825  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8826  * @ipr_cmd:    ipr command struct
8827  *
8828  * Description: This asserts PCI reset to the adapter.
8829  *
8830  * Return value:
8831  *      IPR_RC_JOB_RETURN
8832  **/
8833 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8834 {
8835         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8836
8837         ENTER;
8838         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8839         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8840         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8841         LEAVE;
8842         return IPR_RC_JOB_RETURN;
8843 }
8844
8845 /**
8846  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8847  * @ipr_cmd:    ipr command struct
8848  *
8849  * Description: This attempts to block config access to the IOA.
8850  *
8851  * Return value:
8852  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8853  **/
8854 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8855 {
8856         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8857         int rc = IPR_RC_JOB_CONTINUE;
8858
8859         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8860                 ioa_cfg->cfg_locked = 1;
8861                 ipr_cmd->job_step = ioa_cfg->reset;
8862         } else {
8863                 if (ipr_cmd->u.time_left) {
8864                         rc = IPR_RC_JOB_RETURN;
8865                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8866                         ipr_reset_start_timer(ipr_cmd,
8867                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8868                 } else {
8869                         ipr_cmd->job_step = ioa_cfg->reset;
8870                         dev_err(&ioa_cfg->pdev->dev,
8871                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8872                 }
8873         }
8874
8875         return rc;
8876 }
8877
8878 /**
8879  * ipr_reset_block_config_access - Block config access to the IOA
8880  * @ipr_cmd:    ipr command struct
8881  *
8882  * Description: This attempts to block config access to the IOA
8883  *
8884  * Return value:
8885  *      IPR_RC_JOB_CONTINUE
8886  **/
8887 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8888 {
8889         ipr_cmd->ioa_cfg->cfg_locked = 0;
8890         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8891         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8892         return IPR_RC_JOB_CONTINUE;
8893 }
8894
8895 /**
8896  * ipr_reset_allowed - Query whether or not IOA can be reset
8897  * @ioa_cfg:    ioa config struct
8898  *
8899  * Return value:
8900  *      0 if reset not allowed / non-zero if reset is allowed
8901  **/
8902 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8903 {
8904         volatile u32 temp_reg;
8905
8906         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8907         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8908 }
8909
8910 /**
8911  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8912  * @ipr_cmd:    ipr command struct
8913  *
8914  * Description: This function waits for adapter permission to run BIST,
8915  * then runs BIST. If the adapter does not give permission after a
8916  * reasonable time, we will reset the adapter anyway. The impact of
8917  * resetting the adapter without warning the adapter is the risk of
8918  * losing the persistent error log on the adapter. If the adapter is
8919  * reset while it is writing to the flash on the adapter, the flash
8920  * segment will have bad ECC and be zeroed.
8921  *
8922  * Return value:
8923  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8924  **/
8925 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8926 {
8927         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8928         int rc = IPR_RC_JOB_RETURN;
8929
8930         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8931                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8932                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8933         } else {
8934                 ipr_cmd->job_step = ipr_reset_block_config_access;
8935                 rc = IPR_RC_JOB_CONTINUE;
8936         }
8937
8938         return rc;
8939 }
8940
8941 /**
8942  * ipr_reset_alert - Alert the adapter of a pending reset
8943  * @ipr_cmd:    ipr command struct
8944  *
8945  * Description: This function alerts the adapter that it will be reset.
8946  * If memory space is not currently enabled, proceed directly
8947  * to running BIST on the adapter. The timer must always be started
8948  * so we guarantee we do not run BIST from ipr_isr.
8949  *
8950  * Return value:
8951  *      IPR_RC_JOB_RETURN
8952  **/
8953 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8954 {
8955         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8956         u16 cmd_reg;
8957         int rc;
8958
8959         ENTER;
8960         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8961
8962         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8963                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8964                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8965                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8966         } else {
8967                 ipr_cmd->job_step = ipr_reset_block_config_access;
8968         }
8969
8970         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8971         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8972
8973         LEAVE;
8974         return IPR_RC_JOB_RETURN;
8975 }
8976
8977 /**
8978  * ipr_reset_quiesce_done - Complete IOA disconnect
8979  * @ipr_cmd:    ipr command struct
8980  *
8981  * Description: Freeze the adapter to complete quiesce processing
8982  *
8983  * Return value:
8984  *      IPR_RC_JOB_CONTINUE
8985  **/
8986 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8987 {
8988         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8989
8990         ENTER;
8991         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8992         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8993         LEAVE;
8994         return IPR_RC_JOB_CONTINUE;
8995 }
8996
8997 /**
8998  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8999  * @ipr_cmd:    ipr command struct
9000  *
9001  * Description: Ensure nothing is outstanding to the IOA and
9002  *                      proceed with IOA disconnect. Otherwise reset the IOA.
9003  *
9004  * Return value:
9005  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9006  **/
9007 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9008 {
9009         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9010         struct ipr_cmnd *loop_cmd;
9011         struct ipr_hrr_queue *hrrq;
9012         int rc = IPR_RC_JOB_CONTINUE;
9013         int count = 0;
9014
9015         ENTER;
9016         ipr_cmd->job_step = ipr_reset_quiesce_done;
9017
9018         for_each_hrrq(hrrq, ioa_cfg) {
9019                 spin_lock(&hrrq->_lock);
9020                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9021                         count++;
9022                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9023                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9024                         rc = IPR_RC_JOB_RETURN;
9025                         break;
9026                 }
9027                 spin_unlock(&hrrq->_lock);
9028
9029                 if (count)
9030                         break;
9031         }
9032
9033         LEAVE;
9034         return rc;
9035 }
9036
9037 /**
9038  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9039  * @ipr_cmd:    ipr command struct
9040  *
9041  * Description: Cancel any oustanding HCAMs to the IOA.
9042  *
9043  * Return value:
9044  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9045  **/
9046 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9047 {
9048         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9049         int rc = IPR_RC_JOB_CONTINUE;
9050         struct ipr_cmd_pkt *cmd_pkt;
9051         struct ipr_cmnd *hcam_cmd;
9052         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9053
9054         ENTER;
9055         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9056
9057         if (!hrrq->ioa_is_dead) {
9058                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9059                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9060                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9061                                         continue;
9062
9063                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9064                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9065                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9066                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9067                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9068                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9069                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9070                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9071                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9072                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9073                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9074                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9075                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9076                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9077
9078                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9079                                            IPR_CANCEL_TIMEOUT);
9080
9081                                 rc = IPR_RC_JOB_RETURN;
9082                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9083                                 break;
9084                         }
9085                 }
9086         } else
9087                 ipr_cmd->job_step = ipr_reset_alert;
9088
9089         LEAVE;
9090         return rc;
9091 }
9092
9093 /**
9094  * ipr_reset_ucode_download_done - Microcode download completion
9095  * @ipr_cmd:    ipr command struct
9096  *
9097  * Description: This function unmaps the microcode download buffer.
9098  *
9099  * Return value:
9100  *      IPR_RC_JOB_CONTINUE
9101  **/
9102 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9103 {
9104         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9105         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9106
9107         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9108                      sglist->num_sg, DMA_TO_DEVICE);
9109
9110         ipr_cmd->job_step = ipr_reset_alert;
9111         return IPR_RC_JOB_CONTINUE;
9112 }
9113
9114 /**
9115  * ipr_reset_ucode_download - Download microcode to the adapter
9116  * @ipr_cmd:    ipr command struct
9117  *
9118  * Description: This function checks to see if it there is microcode
9119  * to download to the adapter. If there is, a download is performed.
9120  *
9121  * Return value:
9122  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9123  **/
9124 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9125 {
9126         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9127         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9128
9129         ENTER;
9130         ipr_cmd->job_step = ipr_reset_alert;
9131
9132         if (!sglist)
9133                 return IPR_RC_JOB_CONTINUE;
9134
9135         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9136         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9137         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9138         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9139         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9140         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9141         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9142
9143         if (ioa_cfg->sis64)
9144                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9145         else
9146                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9147         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9148
9149         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9150                    IPR_WRITE_BUFFER_TIMEOUT);
9151
9152         LEAVE;
9153         return IPR_RC_JOB_RETURN;
9154 }
9155
9156 /**
9157  * ipr_reset_shutdown_ioa - Shutdown the adapter
9158  * @ipr_cmd:    ipr command struct
9159  *
9160  * Description: This function issues an adapter shutdown of the
9161  * specified type to the specified adapter as part of the
9162  * adapter reset job.
9163  *
9164  * Return value:
9165  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9166  **/
9167 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9168 {
9169         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9170         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9171         unsigned long timeout;
9172         int rc = IPR_RC_JOB_CONTINUE;
9173
9174         ENTER;
9175         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9176                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9177         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9178                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9179                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9180                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9181                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9182                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9183
9184                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9185                         timeout = IPR_SHUTDOWN_TIMEOUT;
9186                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9187                         timeout = IPR_INTERNAL_TIMEOUT;
9188                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9189                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9190                 else
9191                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9192
9193                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9194
9195                 rc = IPR_RC_JOB_RETURN;
9196                 ipr_cmd->job_step = ipr_reset_ucode_download;
9197         } else
9198                 ipr_cmd->job_step = ipr_reset_alert;
9199
9200         LEAVE;
9201         return rc;
9202 }
9203
9204 /**
9205  * ipr_reset_ioa_job - Adapter reset job
9206  * @ipr_cmd:    ipr command struct
9207  *
9208  * Description: This function is the job router for the adapter reset job.
9209  *
9210  * Return value:
9211  *      none
9212  **/
9213 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9214 {
9215         u32 rc, ioasc;
9216         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9217
9218         do {
9219                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9220
9221                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9222                         /*
9223                          * We are doing nested adapter resets and this is
9224                          * not the current reset job.
9225                          */
9226                         list_add_tail(&ipr_cmd->queue,
9227                                         &ipr_cmd->hrrq->hrrq_free_q);
9228                         return;
9229                 }
9230
9231                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9232                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9233                         if (rc == IPR_RC_JOB_RETURN)
9234                                 return;
9235                 }
9236
9237                 ipr_reinit_ipr_cmnd(ipr_cmd);
9238                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9239                 rc = ipr_cmd->job_step(ipr_cmd);
9240         } while (rc == IPR_RC_JOB_CONTINUE);
9241 }
9242
9243 /**
9244  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9245  * @ioa_cfg:            ioa config struct
9246  * @job_step:           first job step of reset job
9247  * @shutdown_type:      shutdown type
9248  *
9249  * Description: This function will initiate the reset of the given adapter
9250  * starting at the selected job step.
9251  * If the caller needs to wait on the completion of the reset,
9252  * the caller must sleep on the reset_wait_q.
9253  *
9254  * Return value:
9255  *      none
9256  **/
9257 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9258                                     int (*job_step) (struct ipr_cmnd *),
9259                                     enum ipr_shutdown_type shutdown_type)
9260 {
9261         struct ipr_cmnd *ipr_cmd;
9262         int i;
9263
9264         ioa_cfg->in_reset_reload = 1;
9265         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9266                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9267                 ioa_cfg->hrrq[i].allow_cmds = 0;
9268                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9269         }
9270         wmb();
9271         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9272                 ioa_cfg->scsi_unblock = 0;
9273                 ioa_cfg->scsi_blocked = 1;
9274                 scsi_block_requests(ioa_cfg->host);
9275         }
9276
9277         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9278         ioa_cfg->reset_cmd = ipr_cmd;
9279         ipr_cmd->job_step = job_step;
9280         ipr_cmd->u.shutdown_type = shutdown_type;
9281
9282         ipr_reset_ioa_job(ipr_cmd);
9283 }
9284
9285 /**
9286  * ipr_initiate_ioa_reset - Initiate an adapter reset
9287  * @ioa_cfg:            ioa config struct
9288  * @shutdown_type:      shutdown type
9289  *
9290  * Description: This function will initiate the reset of the given adapter.
9291  * If the caller needs to wait on the completion of the reset,
9292  * the caller must sleep on the reset_wait_q.
9293  *
9294  * Return value:
9295  *      none
9296  **/
9297 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9298                                    enum ipr_shutdown_type shutdown_type)
9299 {
9300         int i;
9301
9302         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9303                 return;
9304
9305         if (ioa_cfg->in_reset_reload) {
9306                 if (ioa_cfg->sdt_state == GET_DUMP)
9307                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9308                 else if (ioa_cfg->sdt_state == READ_DUMP)
9309                         ioa_cfg->sdt_state = ABORT_DUMP;
9310         }
9311
9312         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9313                 dev_err(&ioa_cfg->pdev->dev,
9314                         "IOA taken offline - error recovery failed\n");
9315
9316                 ioa_cfg->reset_retries = 0;
9317                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9318                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9319                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9320                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9321                 }
9322                 wmb();
9323
9324                 if (ioa_cfg->in_ioa_bringdown) {
9325                         ioa_cfg->reset_cmd = NULL;
9326                         ioa_cfg->in_reset_reload = 0;
9327                         ipr_fail_all_ops(ioa_cfg);
9328                         wake_up_all(&ioa_cfg->reset_wait_q);
9329
9330                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9331                                 ioa_cfg->scsi_unblock = 1;
9332                                 schedule_work(&ioa_cfg->work_q);
9333                         }
9334                         return;
9335                 } else {
9336                         ioa_cfg->in_ioa_bringdown = 1;
9337                         shutdown_type = IPR_SHUTDOWN_NONE;
9338                 }
9339         }
9340
9341         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9342                                 shutdown_type);
9343 }
9344
9345 /**
9346  * ipr_reset_freeze - Hold off all I/O activity
9347  * @ipr_cmd:    ipr command struct
9348  *
9349  * Description: If the PCI slot is frozen, hold off all I/O
9350  * activity; then, as soon as the slot is available again,
9351  * initiate an adapter reset.
9352  */
9353 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9354 {
9355         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9356         int i;
9357
9358         /* Disallow new interrupts, avoid loop */
9359         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9360                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9361                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9362                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9363         }
9364         wmb();
9365         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9366         ipr_cmd->done = ipr_reset_ioa_job;
9367         return IPR_RC_JOB_RETURN;
9368 }
9369
9370 /**
9371  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9372  * @pdev:       PCI device struct
9373  *
9374  * Description: This routine is called to tell us that the MMIO
9375  * access to the IOA has been restored
9376  */
9377 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9378 {
9379         unsigned long flags = 0;
9380         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9381
9382         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9383         if (!ioa_cfg->probe_done)
9384                 pci_save_state(pdev);
9385         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9386         return PCI_ERS_RESULT_NEED_RESET;
9387 }
9388
9389 /**
9390  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9391  * @pdev:       PCI device struct
9392  *
9393  * Description: This routine is called to tell us that the PCI bus
9394  * is down. Can't do anything here, except put the device driver
9395  * into a holding pattern, waiting for the PCI bus to come back.
9396  */
9397 static void ipr_pci_frozen(struct pci_dev *pdev)
9398 {
9399         unsigned long flags = 0;
9400         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9401
9402         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9403         if (ioa_cfg->probe_done)
9404                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9405         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9406 }
9407
9408 /**
9409  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9410  * @pdev:       PCI device struct
9411  *
9412  * Description: This routine is called by the pci error recovery
9413  * code after the PCI slot has been reset, just before we
9414  * should resume normal operations.
9415  */
9416 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9417 {
9418         unsigned long flags = 0;
9419         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9420
9421         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9422         if (ioa_cfg->probe_done) {
9423                 if (ioa_cfg->needs_warm_reset)
9424                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9425                 else
9426                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9427                                                 IPR_SHUTDOWN_NONE);
9428         } else
9429                 wake_up_all(&ioa_cfg->eeh_wait_q);
9430         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9431         return PCI_ERS_RESULT_RECOVERED;
9432 }
9433
9434 /**
9435  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9436  * @pdev:       PCI device struct
9437  *
9438  * Description: This routine is called when the PCI bus has
9439  * permanently failed.
9440  */
9441 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9442 {
9443         unsigned long flags = 0;
9444         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9445         int i;
9446
9447         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9448         if (ioa_cfg->probe_done) {
9449                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9450                         ioa_cfg->sdt_state = ABORT_DUMP;
9451                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9452                 ioa_cfg->in_ioa_bringdown = 1;
9453                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9454                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9455                         ioa_cfg->hrrq[i].allow_cmds = 0;
9456                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9457                 }
9458                 wmb();
9459                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9460         } else
9461                 wake_up_all(&ioa_cfg->eeh_wait_q);
9462         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9463 }
9464
9465 /**
9466  * ipr_pci_error_detected - Called when a PCI error is detected.
9467  * @pdev:       PCI device struct
9468  * @state:      PCI channel state
9469  *
9470  * Description: Called when a PCI error is detected.
9471  *
9472  * Return value:
9473  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9474  */
9475 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9476                                                pci_channel_state_t state)
9477 {
9478         switch (state) {
9479         case pci_channel_io_frozen:
9480                 ipr_pci_frozen(pdev);
9481                 return PCI_ERS_RESULT_CAN_RECOVER;
9482         case pci_channel_io_perm_failure:
9483                 ipr_pci_perm_failure(pdev);
9484                 return PCI_ERS_RESULT_DISCONNECT;
9485                 break;
9486         default:
9487                 break;
9488         }
9489         return PCI_ERS_RESULT_NEED_RESET;
9490 }
9491
9492 /**
9493  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9494  * @ioa_cfg:    ioa cfg struct
9495  *
9496  * Description: This is the second phase of adapter initialization
9497  * This function takes care of initilizing the adapter to the point
9498  * where it can accept new commands.
9499
9500  * Return value:
9501  *      0 on success / -EIO on failure
9502  **/
9503 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9504 {
9505         int rc = 0;
9506         unsigned long host_lock_flags = 0;
9507
9508         ENTER;
9509         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9510         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9511         ioa_cfg->probe_done = 1;
9512         if (ioa_cfg->needs_hard_reset) {
9513                 ioa_cfg->needs_hard_reset = 0;
9514                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9515         } else
9516                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9517                                         IPR_SHUTDOWN_NONE);
9518         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9519
9520         LEAVE;
9521         return rc;
9522 }
9523
9524 /**
9525  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9526  * @ioa_cfg:    ioa config struct
9527  *
9528  * Return value:
9529  *      none
9530  **/
9531 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9532 {
9533         int i;
9534
9535         if (ioa_cfg->ipr_cmnd_list) {
9536                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9537                         if (ioa_cfg->ipr_cmnd_list[i])
9538                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9539                                               ioa_cfg->ipr_cmnd_list[i],
9540                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9541
9542                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9543                 }
9544         }
9545
9546         if (ioa_cfg->ipr_cmd_pool)
9547                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9548
9549         kfree(ioa_cfg->ipr_cmnd_list);
9550         kfree(ioa_cfg->ipr_cmnd_list_dma);
9551         ioa_cfg->ipr_cmnd_list = NULL;
9552         ioa_cfg->ipr_cmnd_list_dma = NULL;
9553         ioa_cfg->ipr_cmd_pool = NULL;
9554 }
9555
9556 /**
9557  * ipr_free_mem - Frees memory allocated for an adapter
9558  * @ioa_cfg:    ioa cfg struct
9559  *
9560  * Return value:
9561  *      nothing
9562  **/
9563 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9564 {
9565         int i;
9566
9567         kfree(ioa_cfg->res_entries);
9568         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9569                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9570         ipr_free_cmd_blks(ioa_cfg);
9571
9572         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9573                 dma_free_coherent(&ioa_cfg->pdev->dev,
9574                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9575                                   ioa_cfg->hrrq[i].host_rrq,
9576                                   ioa_cfg->hrrq[i].host_rrq_dma);
9577
9578         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9579                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9580
9581         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9582                 dma_free_coherent(&ioa_cfg->pdev->dev,
9583                                   sizeof(struct ipr_hostrcb),
9584                                   ioa_cfg->hostrcb[i],
9585                                   ioa_cfg->hostrcb_dma[i]);
9586         }
9587
9588         ipr_free_dump(ioa_cfg);
9589         kfree(ioa_cfg->trace);
9590 }
9591
9592 /**
9593  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9594  * @ioa_cfg:    ipr cfg struct
9595  *
9596  * This function frees all allocated IRQs for the
9597  * specified adapter.
9598  *
9599  * Return value:
9600  *      none
9601  **/
9602 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9603 {
9604         struct pci_dev *pdev = ioa_cfg->pdev;
9605         int i;
9606
9607         for (i = 0; i < ioa_cfg->nvectors; i++)
9608                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9609         pci_free_irq_vectors(pdev);
9610 }
9611
9612 /**
9613  * ipr_free_all_resources - Free all allocated resources for an adapter.
9614  * @ipr_cmd:    ipr command struct
9615  *
9616  * This function frees all allocated resources for the
9617  * specified adapter.
9618  *
9619  * Return value:
9620  *      none
9621  **/
9622 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9623 {
9624         struct pci_dev *pdev = ioa_cfg->pdev;
9625
9626         ENTER;
9627         ipr_free_irqs(ioa_cfg);
9628         if (ioa_cfg->reset_work_q)
9629                 destroy_workqueue(ioa_cfg->reset_work_q);
9630         iounmap(ioa_cfg->hdw_dma_regs);
9631         pci_release_regions(pdev);
9632         ipr_free_mem(ioa_cfg);
9633         scsi_host_put(ioa_cfg->host);
9634         pci_disable_device(pdev);
9635         LEAVE;
9636 }
9637
9638 /**
9639  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9640  * @ioa_cfg:    ioa config struct
9641  *
9642  * Return value:
9643  *      0 on success / -ENOMEM on allocation failure
9644  **/
9645 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9646 {
9647         struct ipr_cmnd *ipr_cmd;
9648         struct ipr_ioarcb *ioarcb;
9649         dma_addr_t dma_addr;
9650         int i, entries_each_hrrq, hrrq_id = 0;
9651
9652         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9653                                                 sizeof(struct ipr_cmnd), 512, 0);
9654
9655         if (!ioa_cfg->ipr_cmd_pool)
9656                 return -ENOMEM;
9657
9658         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9659         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9660
9661         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9662                 ipr_free_cmd_blks(ioa_cfg);
9663                 return -ENOMEM;
9664         }
9665
9666         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9667                 if (ioa_cfg->hrrq_num > 1) {
9668                         if (i == 0) {
9669                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9670                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9671                                         ioa_cfg->hrrq[i].max_cmd_id =
9672                                                 (entries_each_hrrq - 1);
9673                         } else {
9674                                 entries_each_hrrq =
9675                                         IPR_NUM_BASE_CMD_BLKS/
9676                                         (ioa_cfg->hrrq_num - 1);
9677                                 ioa_cfg->hrrq[i].min_cmd_id =
9678                                         IPR_NUM_INTERNAL_CMD_BLKS +
9679                                         (i - 1) * entries_each_hrrq;
9680                                 ioa_cfg->hrrq[i].max_cmd_id =
9681                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9682                                         i * entries_each_hrrq - 1);
9683                         }
9684                 } else {
9685                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9686                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9687                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9688                 }
9689                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9690         }
9691
9692         BUG_ON(ioa_cfg->hrrq_num == 0);
9693
9694         i = IPR_NUM_CMD_BLKS -
9695                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9696         if (i > 0) {
9697                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9698                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9699         }
9700
9701         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9702                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9703
9704                 if (!ipr_cmd) {
9705                         ipr_free_cmd_blks(ioa_cfg);
9706                         return -ENOMEM;
9707                 }
9708
9709                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9710                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9711                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9712
9713                 ioarcb = &ipr_cmd->ioarcb;
9714                 ipr_cmd->dma_addr = dma_addr;
9715                 if (ioa_cfg->sis64)
9716                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9717                 else
9718                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9719
9720                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9721                 if (ioa_cfg->sis64) {
9722                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9723                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9724                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9725                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9726                 } else {
9727                         ioarcb->write_ioadl_addr =
9728                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9729                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9730                         ioarcb->ioasa_host_pci_addr =
9731                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9732                 }
9733                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9734                 ipr_cmd->cmd_index = i;
9735                 ipr_cmd->ioa_cfg = ioa_cfg;
9736                 ipr_cmd->sense_buffer_dma = dma_addr +
9737                         offsetof(struct ipr_cmnd, sense_buffer);
9738
9739                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9740                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9741                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9742                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9743                         hrrq_id++;
9744         }
9745
9746         return 0;
9747 }
9748
9749 /**
9750  * ipr_alloc_mem - Allocate memory for an adapter
9751  * @ioa_cfg:    ioa config struct
9752  *
9753  * Return value:
9754  *      0 on success / non-zero for error
9755  **/
9756 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9757 {
9758         struct pci_dev *pdev = ioa_cfg->pdev;
9759         int i, rc = -ENOMEM;
9760
9761         ENTER;
9762         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9763                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9764
9765         if (!ioa_cfg->res_entries)
9766                 goto out;
9767
9768         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9769                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9770                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9771         }
9772
9773         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9774                                               sizeof(struct ipr_misc_cbs),
9775                                               &ioa_cfg->vpd_cbs_dma,
9776                                               GFP_KERNEL);
9777
9778         if (!ioa_cfg->vpd_cbs)
9779                 goto out_free_res_entries;
9780
9781         if (ipr_alloc_cmd_blks(ioa_cfg))
9782                 goto out_free_vpd_cbs;
9783
9784         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9785                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9786                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9787                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9788                                         GFP_KERNEL);
9789
9790                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9791                         while (--i >= 0)
9792                                 dma_free_coherent(&pdev->dev,
9793                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9794                                         ioa_cfg->hrrq[i].host_rrq,
9795                                         ioa_cfg->hrrq[i].host_rrq_dma);
9796                         goto out_ipr_free_cmd_blocks;
9797                 }
9798                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9799         }
9800
9801         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9802                                                   ioa_cfg->cfg_table_size,
9803                                                   &ioa_cfg->cfg_table_dma,
9804                                                   GFP_KERNEL);
9805
9806         if (!ioa_cfg->u.cfg_table)
9807                 goto out_free_host_rrq;
9808
9809         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9810                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9811                                                          sizeof(struct ipr_hostrcb),
9812                                                          &ioa_cfg->hostrcb_dma[i],
9813                                                          GFP_KERNEL);
9814
9815                 if (!ioa_cfg->hostrcb[i])
9816                         goto out_free_hostrcb_dma;
9817
9818                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9819                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9820                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9821                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9822         }
9823
9824         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9825                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9826
9827         if (!ioa_cfg->trace)
9828                 goto out_free_hostrcb_dma;
9829
9830         rc = 0;
9831 out:
9832         LEAVE;
9833         return rc;
9834
9835 out_free_hostrcb_dma:
9836         while (i-- > 0) {
9837                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9838                                   ioa_cfg->hostrcb[i],
9839                                   ioa_cfg->hostrcb_dma[i]);
9840         }
9841         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9842                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9843 out_free_host_rrq:
9844         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9845                 dma_free_coherent(&pdev->dev,
9846                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9847                                   ioa_cfg->hrrq[i].host_rrq,
9848                                   ioa_cfg->hrrq[i].host_rrq_dma);
9849         }
9850 out_ipr_free_cmd_blocks:
9851         ipr_free_cmd_blks(ioa_cfg);
9852 out_free_vpd_cbs:
9853         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9854                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9855 out_free_res_entries:
9856         kfree(ioa_cfg->res_entries);
9857         goto out;
9858 }
9859
9860 /**
9861  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9862  * @ioa_cfg:    ioa config struct
9863  *
9864  * Return value:
9865  *      none
9866  **/
9867 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9868 {
9869         int i;
9870
9871         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9872                 ioa_cfg->bus_attr[i].bus = i;
9873                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9874                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9875                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9876                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9877                 else
9878                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9879         }
9880 }
9881
9882 /**
9883  * ipr_init_regs - Initialize IOA registers
9884  * @ioa_cfg:    ioa config struct
9885  *
9886  * Return value:
9887  *      none
9888  **/
9889 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9890 {
9891         const struct ipr_interrupt_offsets *p;
9892         struct ipr_interrupts *t;
9893         void __iomem *base;
9894
9895         p = &ioa_cfg->chip_cfg->regs;
9896         t = &ioa_cfg->regs;
9897         base = ioa_cfg->hdw_dma_regs;
9898
9899         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9900         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9901         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9902         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9903         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9904         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9905         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9906         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9907         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9908         t->ioarrin_reg = base + p->ioarrin_reg;
9909         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9910         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9911         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9912         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9913         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9914         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9915
9916         if (ioa_cfg->sis64) {
9917                 t->init_feedback_reg = base + p->init_feedback_reg;
9918                 t->dump_addr_reg = base + p->dump_addr_reg;
9919                 t->dump_data_reg = base + p->dump_data_reg;
9920                 t->endian_swap_reg = base + p->endian_swap_reg;
9921         }
9922 }
9923
9924 /**
9925  * ipr_init_ioa_cfg - Initialize IOA config struct
9926  * @ioa_cfg:    ioa config struct
9927  * @host:               scsi host struct
9928  * @pdev:               PCI dev struct
9929  *
9930  * Return value:
9931  *      none
9932  **/
9933 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9934                              struct Scsi_Host *host, struct pci_dev *pdev)
9935 {
9936         int i;
9937
9938         ioa_cfg->host = host;
9939         ioa_cfg->pdev = pdev;
9940         ioa_cfg->log_level = ipr_log_level;
9941         ioa_cfg->doorbell = IPR_DOORBELL;
9942         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9943         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9944         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9945         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9946         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9947         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9948
9949         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9950         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9951         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9952         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9953         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9954         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9955         INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9956         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9957         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9958         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9959         ioa_cfg->sdt_state = INACTIVE;
9960
9961         ipr_initialize_bus_attr(ioa_cfg);
9962         ioa_cfg->max_devs_supported = ipr_max_devs;
9963
9964         if (ioa_cfg->sis64) {
9965                 host->max_channel = IPR_MAX_SIS64_BUSES;
9966                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9967                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9968                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9969                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9970                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9971                                            + ((sizeof(struct ipr_config_table_entry64)
9972                                                * ioa_cfg->max_devs_supported)));
9973         } else {
9974                 host->max_channel = IPR_VSET_BUS;
9975                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9976                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9977                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9978                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9979                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9980                                            + ((sizeof(struct ipr_config_table_entry)
9981                                                * ioa_cfg->max_devs_supported)));
9982         }
9983
9984         host->unique_id = host->host_no;
9985         host->max_cmd_len = IPR_MAX_CDB_LEN;
9986         host->can_queue = ioa_cfg->max_cmds;
9987         pci_set_drvdata(pdev, ioa_cfg);
9988
9989         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9990                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9991                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9992                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9993                 if (i == 0)
9994                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9995                 else
9996                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9997         }
9998 }
9999
10000 /**
10001  * ipr_get_chip_info - Find adapter chip information
10002  * @dev_id:             PCI device id struct
10003  *
10004  * Return value:
10005  *      ptr to chip information on success / NULL on failure
10006  **/
10007 static const struct ipr_chip_t *
10008 ipr_get_chip_info(const struct pci_device_id *dev_id)
10009 {
10010         int i;
10011
10012         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10013                 if (ipr_chip[i].vendor == dev_id->vendor &&
10014                     ipr_chip[i].device == dev_id->device)
10015                         return &ipr_chip[i];
10016         return NULL;
10017 }
10018
10019 /**
10020  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10021  *                                              during probe time
10022  * @ioa_cfg:    ioa config struct
10023  *
10024  * Return value:
10025  *      None
10026  **/
10027 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10028 {
10029         struct pci_dev *pdev = ioa_cfg->pdev;
10030
10031         if (pci_channel_offline(pdev)) {
10032                 wait_event_timeout(ioa_cfg->eeh_wait_q,
10033                                    !pci_channel_offline(pdev),
10034                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10035                 pci_restore_state(pdev);
10036         }
10037 }
10038
10039 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10040 {
10041         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10042
10043         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10044                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10045                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10046                 ioa_cfg->vectors_info[vec_idx].
10047                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10048         }
10049 }
10050
10051 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10052                 struct pci_dev *pdev)
10053 {
10054         int i, rc;
10055
10056         for (i = 1; i < ioa_cfg->nvectors; i++) {
10057                 rc = request_irq(pci_irq_vector(pdev, i),
10058                         ipr_isr_mhrrq,
10059                         0,
10060                         ioa_cfg->vectors_info[i].desc,
10061                         &ioa_cfg->hrrq[i]);
10062                 if (rc) {
10063                         while (--i > 0)
10064                                 free_irq(pci_irq_vector(pdev, i),
10065                                         &ioa_cfg->hrrq[i]);
10066                         return rc;
10067                 }
10068         }
10069         return 0;
10070 }
10071
10072 /**
10073  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10074  * @pdev:               PCI device struct
10075  *
10076  * Description: Simply set the msi_received flag to 1 indicating that
10077  * Message Signaled Interrupts are supported.
10078  *
10079  * Return value:
10080  *      0 on success / non-zero on failure
10081  **/
10082 static irqreturn_t ipr_test_intr(int irq, void *devp)
10083 {
10084         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10085         unsigned long lock_flags = 0;
10086         irqreturn_t rc = IRQ_HANDLED;
10087
10088         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10089         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10090
10091         ioa_cfg->msi_received = 1;
10092         wake_up(&ioa_cfg->msi_wait_q);
10093
10094         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10095         return rc;
10096 }
10097
10098 /**
10099  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10100  * @pdev:               PCI device struct
10101  *
10102  * Description: This routine sets up and initiates a test interrupt to determine
10103  * if the interrupt is received via the ipr_test_intr() service routine.
10104  * If the tests fails, the driver will fall back to LSI.
10105  *
10106  * Return value:
10107  *      0 on success / non-zero on failure
10108  **/
10109 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10110 {
10111         int rc;
10112         volatile u32 int_reg;
10113         unsigned long lock_flags = 0;
10114         int irq = pci_irq_vector(pdev, 0);
10115
10116         ENTER;
10117
10118         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10119         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10120         ioa_cfg->msi_received = 0;
10121         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10122         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10123         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10124         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10125
10126         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10127         if (rc) {
10128                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10129                 return rc;
10130         } else if (ipr_debug)
10131                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10132
10133         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10134         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10135         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10136         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10137         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10138
10139         if (!ioa_cfg->msi_received) {
10140                 /* MSI test failed */
10141                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10142                 rc = -EOPNOTSUPP;
10143         } else if (ipr_debug)
10144                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10145
10146         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10147
10148         free_irq(irq, ioa_cfg);
10149
10150         LEAVE;
10151
10152         return rc;
10153 }
10154
10155  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10156  * @pdev:               PCI device struct
10157  * @dev_id:             PCI device id struct
10158  *
10159  * Return value:
10160  *      0 on success / non-zero on failure
10161  **/
10162 static int ipr_probe_ioa(struct pci_dev *pdev,
10163                          const struct pci_device_id *dev_id)
10164 {
10165         struct ipr_ioa_cfg *ioa_cfg;
10166         struct Scsi_Host *host;
10167         unsigned long ipr_regs_pci;
10168         void __iomem *ipr_regs;
10169         int rc = PCIBIOS_SUCCESSFUL;
10170         volatile u32 mask, uproc, interrupts;
10171         unsigned long lock_flags, driver_lock_flags;
10172         unsigned int irq_flag;
10173
10174         ENTER;
10175
10176         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10177         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10178
10179         if (!host) {
10180                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10181                 rc = -ENOMEM;
10182                 goto out;
10183         }
10184
10185         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10186         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10187         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10188
10189         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10190
10191         if (!ioa_cfg->ipr_chip) {
10192                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10193                         dev_id->vendor, dev_id->device);
10194                 goto out_scsi_host_put;
10195         }
10196
10197         /* set SIS 32 or SIS 64 */
10198         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10199         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10200         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10201         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10202
10203         if (ipr_transop_timeout)
10204                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10205         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10206                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10207         else
10208                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10209
10210         ioa_cfg->revid = pdev->revision;
10211
10212         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10213
10214         ipr_regs_pci = pci_resource_start(pdev, 0);
10215
10216         rc = pci_request_regions(pdev, IPR_NAME);
10217         if (rc < 0) {
10218                 dev_err(&pdev->dev,
10219                         "Couldn't register memory range of registers\n");
10220                 goto out_scsi_host_put;
10221         }
10222
10223         rc = pci_enable_device(pdev);
10224
10225         if (rc || pci_channel_offline(pdev)) {
10226                 if (pci_channel_offline(pdev)) {
10227                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10228                         rc = pci_enable_device(pdev);
10229                 }
10230
10231                 if (rc) {
10232                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10233                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10234                         goto out_release_regions;
10235                 }
10236         }
10237
10238         ipr_regs = pci_ioremap_bar(pdev, 0);
10239
10240         if (!ipr_regs) {
10241                 dev_err(&pdev->dev,
10242                         "Couldn't map memory range of registers\n");
10243                 rc = -ENOMEM;
10244                 goto out_disable;
10245         }
10246
10247         ioa_cfg->hdw_dma_regs = ipr_regs;
10248         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10249         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10250
10251         ipr_init_regs(ioa_cfg);
10252
10253         if (ioa_cfg->sis64) {
10254                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10255                 if (rc < 0) {
10256                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10257                         rc = dma_set_mask_and_coherent(&pdev->dev,
10258                                                        DMA_BIT_MASK(32));
10259                 }
10260         } else
10261                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10262
10263         if (rc < 0) {
10264                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10265                 goto cleanup_nomem;
10266         }
10267
10268         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10269                                    ioa_cfg->chip_cfg->cache_line_size);
10270
10271         if (rc != PCIBIOS_SUCCESSFUL) {
10272                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10273                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10274                 rc = -EIO;
10275                 goto cleanup_nomem;
10276         }
10277
10278         /* Issue MMIO read to ensure card is not in EEH */
10279         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10280         ipr_wait_for_pci_err_recovery(ioa_cfg);
10281
10282         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10283                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10284                         IPR_MAX_MSIX_VECTORS);
10285                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10286         }
10287
10288         irq_flag = PCI_IRQ_LEGACY;
10289         if (ioa_cfg->ipr_chip->has_msi)
10290                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10291         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10292         if (rc < 0) {
10293                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10294                 goto cleanup_nomem;
10295         }
10296         ioa_cfg->nvectors = rc;
10297
10298         if (!pdev->msi_enabled && !pdev->msix_enabled)
10299                 ioa_cfg->clear_isr = 1;
10300
10301         pci_set_master(pdev);
10302
10303         if (pci_channel_offline(pdev)) {
10304                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10305                 pci_set_master(pdev);
10306                 if (pci_channel_offline(pdev)) {
10307                         rc = -EIO;
10308                         goto out_msi_disable;
10309                 }
10310         }
10311
10312         if (pdev->msi_enabled || pdev->msix_enabled) {
10313                 rc = ipr_test_msi(ioa_cfg, pdev);
10314                 switch (rc) {
10315                 case 0:
10316                         dev_info(&pdev->dev,
10317                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10318                                 pdev->msix_enabled ? "-X" : "");
10319                         break;
10320                 case -EOPNOTSUPP:
10321                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10322                         pci_free_irq_vectors(pdev);
10323
10324                         ioa_cfg->nvectors = 1;
10325                         ioa_cfg->clear_isr = 1;
10326                         break;
10327                 default:
10328                         goto out_msi_disable;
10329                 }
10330         }
10331
10332         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10333                                 (unsigned int)num_online_cpus(),
10334                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10335
10336         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10337                 goto out_msi_disable;
10338
10339         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10340                 goto out_msi_disable;
10341
10342         rc = ipr_alloc_mem(ioa_cfg);
10343         if (rc < 0) {
10344                 dev_err(&pdev->dev,
10345                         "Couldn't allocate enough memory for device driver!\n");
10346                 goto out_msi_disable;
10347         }
10348
10349         /* Save away PCI config space for use following IOA reset */
10350         rc = pci_save_state(pdev);
10351
10352         if (rc != PCIBIOS_SUCCESSFUL) {
10353                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10354                 rc = -EIO;
10355                 goto cleanup_nolog;
10356         }
10357
10358         /*
10359          * If HRRQ updated interrupt is not masked, or reset alert is set,
10360          * the card is in an unknown state and needs a hard reset
10361          */
10362         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10363         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10364         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10365         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10366                 ioa_cfg->needs_hard_reset = 1;
10367         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10368                 ioa_cfg->needs_hard_reset = 1;
10369         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10370                 ioa_cfg->ioa_unit_checked = 1;
10371
10372         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10373         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10374         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10375
10376         if (pdev->msi_enabled || pdev->msix_enabled) {
10377                 name_msi_vectors(ioa_cfg);
10378                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10379                         ioa_cfg->vectors_info[0].desc,
10380                         &ioa_cfg->hrrq[0]);
10381                 if (!rc)
10382                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10383         } else {
10384                 rc = request_irq(pdev->irq, ipr_isr,
10385                          IRQF_SHARED,
10386                          IPR_NAME, &ioa_cfg->hrrq[0]);
10387         }
10388         if (rc) {
10389                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10390                         pdev->irq, rc);
10391                 goto cleanup_nolog;
10392         }
10393
10394         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10395             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10396                 ioa_cfg->needs_warm_reset = 1;
10397                 ioa_cfg->reset = ipr_reset_slot_reset;
10398
10399                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10400                                                                 WQ_MEM_RECLAIM, host->host_no);
10401
10402                 if (!ioa_cfg->reset_work_q) {
10403                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10404                         rc = -ENOMEM;
10405                         goto out_free_irq;
10406                 }
10407         } else
10408                 ioa_cfg->reset = ipr_reset_start_bist;
10409
10410         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10411         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10412         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10413
10414         LEAVE;
10415 out:
10416         return rc;
10417
10418 out_free_irq:
10419         ipr_free_irqs(ioa_cfg);
10420 cleanup_nolog:
10421         ipr_free_mem(ioa_cfg);
10422 out_msi_disable:
10423         ipr_wait_for_pci_err_recovery(ioa_cfg);
10424         pci_free_irq_vectors(pdev);
10425 cleanup_nomem:
10426         iounmap(ipr_regs);
10427 out_disable:
10428         pci_disable_device(pdev);
10429 out_release_regions:
10430         pci_release_regions(pdev);
10431 out_scsi_host_put:
10432         scsi_host_put(host);
10433         goto out;
10434 }
10435
10436 /**
10437  * ipr_initiate_ioa_bringdown - Bring down an adapter
10438  * @ioa_cfg:            ioa config struct
10439  * @shutdown_type:      shutdown type
10440  *
10441  * Description: This function will initiate bringing down the adapter.
10442  * This consists of issuing an IOA shutdown to the adapter
10443  * to flush the cache, and running BIST.
10444  * If the caller needs to wait on the completion of the reset,
10445  * the caller must sleep on the reset_wait_q.
10446  *
10447  * Return value:
10448  *      none
10449  **/
10450 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10451                                        enum ipr_shutdown_type shutdown_type)
10452 {
10453         ENTER;
10454         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10455                 ioa_cfg->sdt_state = ABORT_DUMP;
10456         ioa_cfg->reset_retries = 0;
10457         ioa_cfg->in_ioa_bringdown = 1;
10458         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10459         LEAVE;
10460 }
10461
10462 /**
10463  * __ipr_remove - Remove a single adapter
10464  * @pdev:       pci device struct
10465  *
10466  * Adapter hot plug remove entry point.
10467  *
10468  * Return value:
10469  *      none
10470  **/
10471 static void __ipr_remove(struct pci_dev *pdev)
10472 {
10473         unsigned long host_lock_flags = 0;
10474         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10475         int i;
10476         unsigned long driver_lock_flags;
10477         ENTER;
10478
10479         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10480         while (ioa_cfg->in_reset_reload) {
10481                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10482                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10483                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10484         }
10485
10486         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10487                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10488                 ioa_cfg->hrrq[i].removing_ioa = 1;
10489                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10490         }
10491         wmb();
10492         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10493
10494         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10495         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10496         flush_work(&ioa_cfg->work_q);
10497         if (ioa_cfg->reset_work_q)
10498                 flush_workqueue(ioa_cfg->reset_work_q);
10499         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10500         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10501
10502         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10503         list_del(&ioa_cfg->queue);
10504         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10505
10506         if (ioa_cfg->sdt_state == ABORT_DUMP)
10507                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10508         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10509
10510         ipr_free_all_resources(ioa_cfg);
10511
10512         LEAVE;
10513 }
10514
10515 /**
10516  * ipr_remove - IOA hot plug remove entry point
10517  * @pdev:       pci device struct
10518  *
10519  * Adapter hot plug remove entry point.
10520  *
10521  * Return value:
10522  *      none
10523  **/
10524 static void ipr_remove(struct pci_dev *pdev)
10525 {
10526         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10527
10528         ENTER;
10529
10530         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10531                               &ipr_trace_attr);
10532         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10533                              &ipr_dump_attr);
10534         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10535                         &ipr_ioa_async_err_log);
10536         scsi_remove_host(ioa_cfg->host);
10537
10538         __ipr_remove(pdev);
10539
10540         LEAVE;
10541 }
10542
10543 /**
10544  * ipr_probe - Adapter hot plug add entry point
10545  *
10546  * Return value:
10547  *      0 on success / non-zero on failure
10548  **/
10549 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10550 {
10551         struct ipr_ioa_cfg *ioa_cfg;
10552         unsigned long flags;
10553         int rc, i;
10554
10555         rc = ipr_probe_ioa(pdev, dev_id);
10556
10557         if (rc)
10558                 return rc;
10559
10560         ioa_cfg = pci_get_drvdata(pdev);
10561         rc = ipr_probe_ioa_part2(ioa_cfg);
10562
10563         if (rc) {
10564                 __ipr_remove(pdev);
10565                 return rc;
10566         }
10567
10568         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10569
10570         if (rc) {
10571                 __ipr_remove(pdev);
10572                 return rc;
10573         }
10574
10575         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10576                                    &ipr_trace_attr);
10577
10578         if (rc) {
10579                 scsi_remove_host(ioa_cfg->host);
10580                 __ipr_remove(pdev);
10581                 return rc;
10582         }
10583
10584         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10585                         &ipr_ioa_async_err_log);
10586
10587         if (rc) {
10588                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10589                                 &ipr_dump_attr);
10590                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10591                                 &ipr_trace_attr);
10592                 scsi_remove_host(ioa_cfg->host);
10593                 __ipr_remove(pdev);
10594                 return rc;
10595         }
10596
10597         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10598                                    &ipr_dump_attr);
10599
10600         if (rc) {
10601                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10602                                       &ipr_ioa_async_err_log);
10603                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10604                                       &ipr_trace_attr);
10605                 scsi_remove_host(ioa_cfg->host);
10606                 __ipr_remove(pdev);
10607                 return rc;
10608         }
10609         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10610         ioa_cfg->scan_enabled = 1;
10611         schedule_work(&ioa_cfg->work_q);
10612         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10613
10614         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10615
10616         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10617                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10618                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10619                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10620                 }
10621         }
10622
10623         scsi_scan_host(ioa_cfg->host);
10624
10625         return 0;
10626 }
10627
10628 /**
10629  * ipr_shutdown - Shutdown handler.
10630  * @pdev:       pci device struct
10631  *
10632  * This function is invoked upon system shutdown/reboot. It will issue
10633  * an adapter shutdown to the adapter to flush the write cache.
10634  *
10635  * Return value:
10636  *      none
10637  **/
10638 static void ipr_shutdown(struct pci_dev *pdev)
10639 {
10640         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10641         unsigned long lock_flags = 0;
10642         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10643         int i;
10644
10645         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10646         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10647                 ioa_cfg->iopoll_weight = 0;
10648                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10649                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10650         }
10651
10652         while (ioa_cfg->in_reset_reload) {
10653                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10654                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10655                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10656         }
10657
10658         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10659                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10660
10661         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10662         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10663         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10664         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10665                 ipr_free_irqs(ioa_cfg);
10666                 pci_disable_device(ioa_cfg->pdev);
10667         }
10668 }
10669
10670 static struct pci_device_id ipr_pci_table[] = {
10671         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10672                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10673         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10674                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10675         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10676                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10677         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10678                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10679         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10680                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10681         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10682                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10683         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10684                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10685         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10686                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10687                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10688         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10689               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10690         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10691               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10692               IPR_USE_LONG_TRANSOP_TIMEOUT },
10693         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10694               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10695               IPR_USE_LONG_TRANSOP_TIMEOUT },
10696         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10697               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10698         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10699               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10700               IPR_USE_LONG_TRANSOP_TIMEOUT},
10701         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10702               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10703               IPR_USE_LONG_TRANSOP_TIMEOUT },
10704         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10705               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10706               IPR_USE_LONG_TRANSOP_TIMEOUT },
10707         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10708               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10709         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10710               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10711         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10712               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10713               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10714         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10715                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10716         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10717                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10718         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10719                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10720                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10721         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10722                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10723                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10724         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10725                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10726         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10727                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10728         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10729                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10730         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10731                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10732         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10733                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10734         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10735                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10736         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10737                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10738         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10739                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10740         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10741                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10742         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10743                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10744         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10745                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10746         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10747                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10748         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10749                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10750         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10751                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10752         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10753                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10754         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10755                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10756         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10757                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10758         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10759                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10760         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10761                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10762         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10763                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10764         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10765                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10766         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10767                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10768         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10769                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10770         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10771                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10772         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10773                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10774         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10775                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10776         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10777                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10778         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10779                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10780         { }
10781 };
10782 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10783
10784 static const struct pci_error_handlers ipr_err_handler = {
10785         .error_detected = ipr_pci_error_detected,
10786         .mmio_enabled = ipr_pci_mmio_enabled,
10787         .slot_reset = ipr_pci_slot_reset,
10788 };
10789
10790 static struct pci_driver ipr_driver = {
10791         .name = IPR_NAME,
10792         .id_table = ipr_pci_table,
10793         .probe = ipr_probe,
10794         .remove = ipr_remove,
10795         .shutdown = ipr_shutdown,
10796         .err_handler = &ipr_err_handler,
10797 };
10798
10799 /**
10800  * ipr_halt_done - Shutdown prepare completion
10801  *
10802  * Return value:
10803  *      none
10804  **/
10805 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10806 {
10807         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10808 }
10809
10810 /**
10811  * ipr_halt - Issue shutdown prepare to all adapters
10812  *
10813  * Return value:
10814  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10815  **/
10816 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10817 {
10818         struct ipr_cmnd *ipr_cmd;
10819         struct ipr_ioa_cfg *ioa_cfg;
10820         unsigned long flags = 0, driver_lock_flags;
10821
10822         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10823                 return NOTIFY_DONE;
10824
10825         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10826
10827         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10828                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10829                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10830                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10831                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10832                         continue;
10833                 }
10834
10835                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10836                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10837                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10838                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10839                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10840
10841                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10842                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10843         }
10844         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10845
10846         return NOTIFY_OK;
10847 }
10848
10849 static struct notifier_block ipr_notifier = {
10850         ipr_halt, NULL, 0
10851 };
10852
10853 /**
10854  * ipr_init - Module entry point
10855  *
10856  * Return value:
10857  *      0 on success / negative value on failure
10858  **/
10859 static int __init ipr_init(void)
10860 {
10861         int rc;
10862
10863         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10864                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10865
10866         register_reboot_notifier(&ipr_notifier);
10867         rc = pci_register_driver(&ipr_driver);
10868         if (rc) {
10869                 unregister_reboot_notifier(&ipr_notifier);
10870                 return rc;
10871         }
10872
10873         return 0;
10874 }
10875
10876 /**
10877  * ipr_exit - Module unload
10878  *
10879  * Module unload entry point.
10880  *
10881  * Return value:
10882  *      none
10883  **/
10884 static void __exit ipr_exit(void)
10885 {
10886         unregister_reboot_notifier(&ipr_notifier);
10887         pci_unregister_driver(&ipr_driver);
10888 }
10889
10890 module_init(ipr_init);
10891 module_exit(ipr_exit);