GNU Linux-libre 4.19.295-gnu1
[releases.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439         "4086: SAS Adapter Hardware Configuration Error"},
440         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "3140: Device bus not ready to ready transition"},
442         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset"},
444         {0x06290500, 0, 0,
445         "FFFE: SCSI bus transition to single ended"},
446         {0x06290600, 0, 0,
447         "FFFE: SCSI bus transition to LVD"},
448         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "FFFB: SCSI bus was reset by another initiator"},
450         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "3029: A device replacement has occurred"},
452         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453         "4102: Device bus fabric performance degradation"},
454         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9051: IOA cache data exists for a missing or failed device"},
456         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9025: Disk unit is not supported at its physical location"},
460         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3020: IOA detected a SCSI bus configuration error"},
462         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
463         "3150: SCSI bus configuration error"},
464         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9074: Asymmetric advanced function disk configuration"},
466         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4040: Incomplete multipath connection between IOA and enclosure"},
468         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4041: Incomplete multipath connection between enclosure and device"},
470         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9075: Incomplete multipath connection between IOA and remote IOA"},
472         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
473         "9076: Configuration error, missing remote IOA"},
474         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4050: Enclosure does not support a required multipath function"},
476         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4121: Configuration error, required cable is missing"},
478         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4122: Cable is not plugged into the correct location on remote IOA"},
480         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4123: Configuration error, invalid cable vital product data"},
482         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4124: Configuration error, both cable ends are plugged into the same IOA"},
484         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485         "4070: Logically bad block written on device"},
486         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9041: Array protection temporarily suspended"},
488         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9042: Corrupt array parity detected on specified device"},
490         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9030: Array no longer protected due to missing or failed disk unit"},
492         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9071: Link operational transition"},
494         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9072: Link not operational transition"},
496         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "9032: Array exposed but still protected"},
498         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
499         "70DD: Device forced failed by disrupt device command"},
500         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4061: Multipath redundancy level got better"},
502         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
503         "4060: Multipath redundancy level got worse"},
504         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
505         "9083: Device raw mode enabled"},
506         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
507         "9084: Device raw mode disabled"},
508         {0x07270000, 0, 0,
509         "Failure due to other device"},
510         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9008: IOA does not support functions expected by devices"},
512         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9010: Cache data associated with attached devices cannot be found"},
514         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9011: Cache data belongs to devices other than those attached"},
516         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9020: Array missing 2 or more devices with only 1 device present"},
518         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9021: Array missing 2 or more devices with 2 or more devices present"},
520         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9022: Exposed array is missing a required device"},
522         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9023: Array member(s) not at required physical locations"},
524         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9024: Array not functional due to present hardware configuration"},
526         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9026: Array not functional due to present hardware configuration"},
528         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9027: Array is missing a device and parity is out of sync"},
530         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9028: Maximum number of arrays already exist"},
532         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9050: Required cache data cannot be located for a disk unit"},
534         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9052: Cache data exists for a device that has been modified"},
536         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9054: IOA resources not available due to previous problems"},
538         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9092: Disk unit requires initialization before use"},
540         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9029: Incorrect hardware configuration change has been detected"},
542         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9060: One or more disk pairs are missing from an array"},
544         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9061: One or more disks are missing from an array"},
546         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9062: One or more disks are missing from an array"},
548         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
549         "9063: Maximum number of functional arrays has been exceeded"},
550         {0x07279A00, 0, 0,
551         "Data protect, other volume set problem"},
552         {0x0B260000, 0, 0,
553         "Aborted command, invalid descriptor"},
554         {0x0B3F9000, 0, 0,
555         "Target operating conditions have changed, dual adapter takeover"},
556         {0x0B530200, 0, 0,
557         "Aborted command, medium removal prevented"},
558         {0x0B5A0000, 0, 0,
559         "Command terminated by host"},
560         {0x0B5B8000, 0, 0,
561         "Aborted command, command terminated by host"}
562 };
563
564 static const struct ipr_ses_table_entry ipr_ses_table[] = {
565         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
566         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
567         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
572         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
573         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
576         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578 };
579
580 /*
581  *  Function Prototypes
582  */
583 static int ipr_reset_alert(struct ipr_cmnd *);
584 static void ipr_process_ccn(struct ipr_cmnd *);
585 static void ipr_process_error(struct ipr_cmnd *);
586 static void ipr_reset_ioa_job(struct ipr_cmnd *);
587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588                                    enum ipr_shutdown_type);
589
590 #ifdef CONFIG_SCSI_IPR_TRACE
591 /**
592  * ipr_trc_hook - Add a trace entry to the driver trace
593  * @ipr_cmd:    ipr command struct
594  * @type:               trace type
595  * @add_data:   additional data
596  *
597  * Return value:
598  *      none
599  **/
600 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601                          u8 type, u32 add_data)
602 {
603         struct ipr_trace_entry *trace_entry;
604         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
605         unsigned int trace_index;
606
607         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608         trace_entry = &ioa_cfg->trace[trace_index];
609         trace_entry->time = jiffies;
610         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611         trace_entry->type = type;
612         if (ipr_cmd->ioa_cfg->sis64)
613                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614         else
615                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
616         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
617         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618         trace_entry->u.add_data = add_data;
619         wmb();
620 }
621 #else
622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
623 #endif
624
625 /**
626  * ipr_lock_and_done - Acquire lock and complete command
627  * @ipr_cmd:    ipr command struct
628  *
629  * Return value:
630  *      none
631  **/
632 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633 {
634         unsigned long lock_flags;
635         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638         ipr_cmd->done(ipr_cmd);
639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640 }
641
642 /**
643  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644  * @ipr_cmd:    ipr command struct
645  *
646  * Return value:
647  *      none
648  **/
649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650 {
651         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
652         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
654         dma_addr_t dma_addr = ipr_cmd->dma_addr;
655         int hrrq_id;
656
657         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
658         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
659         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
660         ioarcb->data_transfer_length = 0;
661         ioarcb->read_data_transfer_length = 0;
662         ioarcb->ioadl_len = 0;
663         ioarcb->read_ioadl_len = 0;
664
665         if (ipr_cmd->ioa_cfg->sis64) {
666                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
668                 ioasa64->u.gata.status = 0;
669         } else {
670                 ioarcb->write_ioadl_addr =
671                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
673                 ioasa->u.gata.status = 0;
674         }
675
676         ioasa->hdr.ioasc = 0;
677         ioasa->hdr.residual_data_len = 0;
678         ipr_cmd->scsi_cmd = NULL;
679         ipr_cmd->qc = NULL;
680         ipr_cmd->sense_buffer[0] = 0;
681         ipr_cmd->dma_use_sg = 0;
682 }
683
684 /**
685  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686  * @ipr_cmd:    ipr command struct
687  *
688  * Return value:
689  *      none
690  **/
691 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692                               void (*fast_done) (struct ipr_cmnd *))
693 {
694         ipr_reinit_ipr_cmnd(ipr_cmd);
695         ipr_cmd->u.scratch = 0;
696         ipr_cmd->sibling = NULL;
697         ipr_cmd->eh_comp = NULL;
698         ipr_cmd->fast_done = fast_done;
699         timer_setup(&ipr_cmd->timer, NULL, 0);
700 }
701
702 /**
703  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
704  * @ioa_cfg:    ioa config struct
705  *
706  * Return value:
707  *      pointer to ipr command struct
708  **/
709 static
710 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
711 {
712         struct ipr_cmnd *ipr_cmd = NULL;
713
714         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716                         struct ipr_cmnd, queue);
717                 list_del(&ipr_cmd->queue);
718         }
719
720
721         return ipr_cmd;
722 }
723
724 /**
725  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726  * @ioa_cfg:    ioa config struct
727  *
728  * Return value:
729  *      pointer to ipr command struct
730  **/
731 static
732 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733 {
734         struct ipr_cmnd *ipr_cmd =
735                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
736         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
737         return ipr_cmd;
738 }
739
740 /**
741  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742  * @ioa_cfg:    ioa config struct
743  * @clr_ints:     interrupts to clear
744  *
745  * This function masks all interrupts on the adapter, then clears the
746  * interrupts specified in the mask
747  *
748  * Return value:
749  *      none
750  **/
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752                                           u32 clr_ints)
753 {
754         volatile u32 int_reg;
755         int i;
756
757         /* Stop new interrupts */
758         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759                 spin_lock(&ioa_cfg->hrrq[i]._lock);
760                 ioa_cfg->hrrq[i].allow_interrupts = 0;
761                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762         }
763
764         /* Set interrupt mask to stop all new interrupts */
765         if (ioa_cfg->sis64)
766                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
767         else
768                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
769
770         /* Clear any pending interrupts */
771         if (ioa_cfg->sis64)
772                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
773         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
774         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
775 }
776
777 /**
778  * ipr_save_pcix_cmd_reg - Save PCI-X command register
779  * @ioa_cfg:    ioa config struct
780  *
781  * Return value:
782  *      0 on success / -EIO on failure
783  **/
784 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
785 {
786         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
787
788         if (pcix_cmd_reg == 0)
789                 return 0;
790
791         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
792                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
793                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
794                 return -EIO;
795         }
796
797         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
798         return 0;
799 }
800
801 /**
802  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
803  * @ioa_cfg:    ioa config struct
804  *
805  * Return value:
806  *      0 on success / -EIO on failure
807  **/
808 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
809 {
810         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
811
812         if (pcix_cmd_reg) {
813                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
814                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
815                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
816                         return -EIO;
817                 }
818         }
819
820         return 0;
821 }
822
823 /**
824  * __ipr_sata_eh_done - done function for aborted SATA commands
825  * @ipr_cmd:    ipr command struct
826  *
827  * This function is invoked for ops generated to SATA
828  * devices which are being aborted.
829  *
830  * Return value:
831  *      none
832  **/
833 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
834 {
835         struct ata_queued_cmd *qc = ipr_cmd->qc;
836         struct ipr_sata_port *sata_port = qc->ap->private_data;
837
838         qc->err_mask |= AC_ERR_OTHER;
839         sata_port->ioasa.status |= ATA_BUSY;
840         ata_qc_complete(qc);
841         if (ipr_cmd->eh_comp)
842                 complete(ipr_cmd->eh_comp);
843         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
844 }
845
846 /**
847  * ipr_sata_eh_done - done function for aborted SATA commands
848  * @ipr_cmd:    ipr command struct
849  *
850  * This function is invoked for ops generated to SATA
851  * devices which are being aborted.
852  *
853  * Return value:
854  *      none
855  **/
856 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
857 {
858         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
859         unsigned long hrrq_flags;
860
861         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
862         __ipr_sata_eh_done(ipr_cmd);
863         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
864 }
865
866 /**
867  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
868  * @ipr_cmd:    ipr command struct
869  *
870  * This function is invoked by the interrupt handler for
871  * ops generated by the SCSI mid-layer which are being aborted.
872  *
873  * Return value:
874  *      none
875  **/
876 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
877 {
878         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
879
880         scsi_cmd->result |= (DID_ERROR << 16);
881
882         scsi_dma_unmap(ipr_cmd->scsi_cmd);
883         scsi_cmd->scsi_done(scsi_cmd);
884         if (ipr_cmd->eh_comp)
885                 complete(ipr_cmd->eh_comp);
886         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
887 }
888
889 /**
890  * ipr_scsi_eh_done - mid-layer done function for aborted ops
891  * @ipr_cmd:    ipr command struct
892  *
893  * This function is invoked by the interrupt handler for
894  * ops generated by the SCSI mid-layer which are being aborted.
895  *
896  * Return value:
897  *      none
898  **/
899 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
900 {
901         unsigned long hrrq_flags;
902         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
903
904         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
905         __ipr_scsi_eh_done(ipr_cmd);
906         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
907 }
908
909 /**
910  * ipr_fail_all_ops - Fails all outstanding ops.
911  * @ioa_cfg:    ioa config struct
912  *
913  * This function fails all outstanding ops.
914  *
915  * Return value:
916  *      none
917  **/
918 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
919 {
920         struct ipr_cmnd *ipr_cmd, *temp;
921         struct ipr_hrr_queue *hrrq;
922
923         ENTER;
924         for_each_hrrq(hrrq, ioa_cfg) {
925                 spin_lock(&hrrq->_lock);
926                 list_for_each_entry_safe(ipr_cmd,
927                                         temp, &hrrq->hrrq_pending_q, queue) {
928                         list_del(&ipr_cmd->queue);
929
930                         ipr_cmd->s.ioasa.hdr.ioasc =
931                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
932                         ipr_cmd->s.ioasa.hdr.ilid =
933                                 cpu_to_be32(IPR_DRIVER_ILID);
934
935                         if (ipr_cmd->scsi_cmd)
936                                 ipr_cmd->done = __ipr_scsi_eh_done;
937                         else if (ipr_cmd->qc)
938                                 ipr_cmd->done = __ipr_sata_eh_done;
939
940                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
941                                      IPR_IOASC_IOA_WAS_RESET);
942                         del_timer(&ipr_cmd->timer);
943                         ipr_cmd->done(ipr_cmd);
944                 }
945                 spin_unlock(&hrrq->_lock);
946         }
947         LEAVE;
948 }
949
950 /**
951  * ipr_send_command -  Send driver initiated requests.
952  * @ipr_cmd:            ipr command struct
953  *
954  * This function sends a command to the adapter using the correct write call.
955  * In the case of sis64, calculate the ioarcb size required. Then or in the
956  * appropriate bits.
957  *
958  * Return value:
959  *      none
960  **/
961 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
962 {
963         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
964         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
965
966         if (ioa_cfg->sis64) {
967                 /* The default size is 256 bytes */
968                 send_dma_addr |= 0x1;
969
970                 /* If the number of ioadls * size of ioadl > 128 bytes,
971                    then use a 512 byte ioarcb */
972                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
973                         send_dma_addr |= 0x4;
974                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
975         } else
976                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
977 }
978
979 /**
980  * ipr_do_req -  Send driver initiated requests.
981  * @ipr_cmd:            ipr command struct
982  * @done:                       done function
983  * @timeout_func:       timeout function
984  * @timeout:            timeout value
985  *
986  * This function sends the specified command to the adapter with the
987  * timeout given. The done function is invoked on command completion.
988  *
989  * Return value:
990  *      none
991  **/
992 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
993                        void (*done) (struct ipr_cmnd *),
994                        void (*timeout_func) (struct timer_list *), u32 timeout)
995 {
996         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
997
998         ipr_cmd->done = done;
999
1000         ipr_cmd->timer.expires = jiffies + timeout;
1001         ipr_cmd->timer.function = timeout_func;
1002
1003         add_timer(&ipr_cmd->timer);
1004
1005         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
1007         ipr_send_command(ipr_cmd);
1008 }
1009
1010 /**
1011  * ipr_internal_cmd_done - Op done function for an internally generated op.
1012  * @ipr_cmd:    ipr command struct
1013  *
1014  * This function is the op done function for an internally generated,
1015  * blocking op. It simply wakes the sleeping thread.
1016  *
1017  * Return value:
1018  *      none
1019  **/
1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021 {
1022         if (ipr_cmd->sibling)
1023                 ipr_cmd->sibling = NULL;
1024         else
1025                 complete(&ipr_cmd->completion);
1026 }
1027
1028 /**
1029  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030  * @ipr_cmd:    ipr command struct
1031  * @dma_addr:   dma address
1032  * @len:        transfer length
1033  * @flags:      ioadl flag value
1034  *
1035  * This function initializes an ioadl in the case where there is only a single
1036  * descriptor.
1037  *
1038  * Return value:
1039  *      nothing
1040  **/
1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042                            u32 len, int flags)
1043 {
1044         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047         ipr_cmd->dma_use_sg = 1;
1048
1049         if (ipr_cmd->ioa_cfg->sis64) {
1050                 ioadl64->flags = cpu_to_be32(flags);
1051                 ioadl64->data_len = cpu_to_be32(len);
1052                 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054                 ipr_cmd->ioarcb.ioadl_len =
1055                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057         } else {
1058                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059                 ioadl->address = cpu_to_be32(dma_addr);
1060
1061                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062                         ipr_cmd->ioarcb.read_ioadl_len =
1063                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065                 } else {
1066                         ipr_cmd->ioarcb.ioadl_len =
1067                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069                 }
1070         }
1071 }
1072
1073 /**
1074  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075  * @ipr_cmd:    ipr command struct
1076  * @timeout_func:       function to invoke if command times out
1077  * @timeout:    timeout
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083                                   void (*timeout_func) (struct timer_list *),
1084                                   u32 timeout)
1085 {
1086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088         init_completion(&ipr_cmd->completion);
1089         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091         spin_unlock_irq(ioa_cfg->host->host_lock);
1092         wait_for_completion(&ipr_cmd->completion);
1093         spin_lock_irq(ioa_cfg->host->host_lock);
1094 }
1095
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097 {
1098         unsigned int hrrq;
1099
1100         if (ioa_cfg->hrrq_num == 1)
1101                 hrrq = 0;
1102         else {
1103                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105         }
1106         return hrrq;
1107 }
1108
1109 /**
1110  * ipr_send_hcam - Send an HCAM to the adapter.
1111  * @ioa_cfg:    ioa config struct
1112  * @type:               HCAM type
1113  * @hostrcb:    hostrcb struct
1114  *
1115  * This function will send a Host Controlled Async command to the adapter.
1116  * If HCAMs are currently not allowed to be issued to the adapter, it will
1117  * place the hostrcb on the free queue.
1118  *
1119  * Return value:
1120  *      none
1121  **/
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123                           struct ipr_hostrcb *hostrcb)
1124 {
1125         struct ipr_cmnd *ipr_cmd;
1126         struct ipr_ioarcb *ioarcb;
1127
1128         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1129                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1130                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1131                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133                 ipr_cmd->u.hostrcb = hostrcb;
1134                 ioarcb = &ipr_cmd->ioarcb;
1135
1136                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139                 ioarcb->cmd_pkt.cdb[1] = type;
1140                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
1143                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1145
1146                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147                         ipr_cmd->done = ipr_process_ccn;
1148                 else
1149                         ipr_cmd->done = ipr_process_error;
1150
1151                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
1153                 ipr_send_command(ipr_cmd);
1154         } else {
1155                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156         }
1157 }
1158
1159 /**
1160  * ipr_update_ata_class - Update the ata class in the resource entry
1161  * @res:        resource entry struct
1162  * @proto:      cfgte device bus protocol value
1163  *
1164  * Return value:
1165  *      none
1166  **/
1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1168 {
1169         switch (proto) {
1170         case IPR_PROTO_SATA:
1171         case IPR_PROTO_SAS_STP:
1172                 res->ata_class = ATA_DEV_ATA;
1173                 break;
1174         case IPR_PROTO_SATA_ATAPI:
1175         case IPR_PROTO_SAS_STP_ATAPI:
1176                 res->ata_class = ATA_DEV_ATAPI;
1177                 break;
1178         default:
1179                 res->ata_class = ATA_DEV_UNKNOWN;
1180                 break;
1181         };
1182 }
1183
1184 /**
1185  * ipr_init_res_entry - Initialize a resource entry struct.
1186  * @res:        resource entry struct
1187  * @cfgtew:     config table entry wrapper struct
1188  *
1189  * Return value:
1190  *      none
1191  **/
1192 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193                                struct ipr_config_table_entry_wrapper *cfgtew)
1194 {
1195         int found = 0;
1196         unsigned int proto;
1197         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198         struct ipr_resource_entry *gscsi_res = NULL;
1199
1200         res->needs_sync_complete = 0;
1201         res->in_erp = 0;
1202         res->add_to_ml = 0;
1203         res->del_from_ml = 0;
1204         res->resetting_device = 0;
1205         res->reset_occurred = 0;
1206         res->sdev = NULL;
1207         res->sata_port = NULL;
1208
1209         if (ioa_cfg->sis64) {
1210                 proto = cfgtew->u.cfgte64->proto;
1211                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1213                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1214                 res->type = cfgtew->u.cfgte64->res_type;
1215
1216                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217                         sizeof(res->res_path));
1218
1219                 res->bus = 0;
1220                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221                         sizeof(res->dev_lun.scsi_lun));
1222                 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227                                         found = 1;
1228                                         res->target = gscsi_res->target;
1229                                         break;
1230                                 }
1231                         }
1232                         if (!found) {
1233                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234                                                                   ioa_cfg->max_devs_supported);
1235                                 set_bit(res->target, ioa_cfg->target_ids);
1236                         }
1237                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239                         res->target = 0;
1240                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243                                                           ioa_cfg->max_devs_supported);
1244                         set_bit(res->target, ioa_cfg->array_ids);
1245                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246                         res->bus = IPR_VSET_VIRTUAL_BUS;
1247                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248                                                           ioa_cfg->max_devs_supported);
1249                         set_bit(res->target, ioa_cfg->vset_ids);
1250                 } else {
1251                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252                                                           ioa_cfg->max_devs_supported);
1253                         set_bit(res->target, ioa_cfg->target_ids);
1254                 }
1255         } else {
1256                 proto = cfgtew->u.cfgte->proto;
1257                 res->qmodel = IPR_QUEUEING_MODEL(res);
1258                 res->flags = cfgtew->u.cfgte->flags;
1259                 if (res->flags & IPR_IS_IOA_RESOURCE)
1260                         res->type = IPR_RES_TYPE_IOAFP;
1261                 else
1262                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265                 res->target = cfgtew->u.cfgte->res_addr.target;
1266                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1267                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1268         }
1269
1270         ipr_update_ata_class(res, proto);
1271 }
1272
1273 /**
1274  * ipr_is_same_device - Determine if two devices are the same.
1275  * @res:        resource entry struct
1276  * @cfgtew:     config table entry wrapper struct
1277  *
1278  * Return value:
1279  *      1 if the devices are the same / 0 otherwise
1280  **/
1281 static int ipr_is_same_device(struct ipr_resource_entry *res,
1282                               struct ipr_config_table_entry_wrapper *cfgtew)
1283 {
1284         if (res->ioa_cfg->sis64) {
1285                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1287                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1288                                         sizeof(cfgtew->u.cfgte64->lun))) {
1289                         return 1;
1290                 }
1291         } else {
1292                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293                     res->target == cfgtew->u.cfgte->res_addr.target &&
1294                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1295                         return 1;
1296         }
1297
1298         return 0;
1299 }
1300
1301 /**
1302  * __ipr_format_res_path - Format the resource path for printing.
1303  * @res_path:   resource path
1304  * @buf:        buffer
1305  * @len:        length of buffer provided
1306  *
1307  * Return value:
1308  *      pointer to buffer
1309  **/
1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1311 {
1312         int i;
1313         char *p = buffer;
1314
1315         *p = '\0';
1316         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1319
1320         return buffer;
1321 }
1322
1323 /**
1324  * ipr_format_res_path - Format the resource path for printing.
1325  * @ioa_cfg:    ioa config struct
1326  * @res_path:   resource path
1327  * @buf:        buffer
1328  * @len:        length of buffer provided
1329  *
1330  * Return value:
1331  *      pointer to buffer
1332  **/
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334                                  u8 *res_path, char *buffer, int len)
1335 {
1336         char *p = buffer;
1337
1338         *p = '\0';
1339         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340         __ipr_format_res_path(res_path, p, len - (buffer - p));
1341         return buffer;
1342 }
1343
1344 /**
1345  * ipr_update_res_entry - Update the resource entry.
1346  * @res:        resource entry struct
1347  * @cfgtew:     config table entry wrapper struct
1348  *
1349  * Return value:
1350  *      none
1351  **/
1352 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353                                  struct ipr_config_table_entry_wrapper *cfgtew)
1354 {
1355         char buffer[IPR_MAX_RES_PATH_LENGTH];
1356         unsigned int proto;
1357         int new_path = 0;
1358
1359         if (res->ioa_cfg->sis64) {
1360                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1362                 res->type = cfgtew->u.cfgte64->res_type;
1363
1364                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365                         sizeof(struct ipr_std_inq_data));
1366
1367                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368                 proto = cfgtew->u.cfgte64->proto;
1369                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373                         sizeof(res->dev_lun.scsi_lun));
1374
1375                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376                                         sizeof(res->res_path))) {
1377                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378                                 sizeof(res->res_path));
1379                         new_path = 1;
1380                 }
1381
1382                 if (res->sdev && new_path)
1383                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1384                                     ipr_format_res_path(res->ioa_cfg,
1385                                         res->res_path, buffer, sizeof(buffer)));
1386         } else {
1387                 res->flags = cfgtew->u.cfgte->flags;
1388                 if (res->flags & IPR_IS_IOA_RESOURCE)
1389                         res->type = IPR_RES_TYPE_IOAFP;
1390                 else
1391                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394                         sizeof(struct ipr_std_inq_data));
1395
1396                 res->qmodel = IPR_QUEUEING_MODEL(res);
1397                 proto = cfgtew->u.cfgte->proto;
1398                 res->res_handle = cfgtew->u.cfgte->res_handle;
1399         }
1400
1401         ipr_update_ata_class(res, proto);
1402 }
1403
1404 /**
1405  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406  *                        for the resource.
1407  * @res:        resource entry struct
1408  * @cfgtew:     config table entry wrapper struct
1409  *
1410  * Return value:
1411  *      none
1412  **/
1413 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414 {
1415         struct ipr_resource_entry *gscsi_res = NULL;
1416         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418         if (!ioa_cfg->sis64)
1419                 return;
1420
1421         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422                 clear_bit(res->target, ioa_cfg->array_ids);
1423         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424                 clear_bit(res->target, ioa_cfg->vset_ids);
1425         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428                                 return;
1429                 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431         } else if (res->bus == 0)
1432                 clear_bit(res->target, ioa_cfg->target_ids);
1433 }
1434
1435 /**
1436  * ipr_handle_config_change - Handle a config change from the adapter
1437  * @ioa_cfg:    ioa config struct
1438  * @hostrcb:    hostrcb
1439  *
1440  * Return value:
1441  *      none
1442  **/
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1444                                      struct ipr_hostrcb *hostrcb)
1445 {
1446         struct ipr_resource_entry *res = NULL;
1447         struct ipr_config_table_entry_wrapper cfgtew;
1448         __be32 cc_res_handle;
1449
1450         u32 is_ndn = 1;
1451
1452         if (ioa_cfg->sis64) {
1453                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455         } else {
1456                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458         }
1459
1460         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1461                 if (res->res_handle == cc_res_handle) {
1462                         is_ndn = 0;
1463                         break;
1464                 }
1465         }
1466
1467         if (is_ndn) {
1468                 if (list_empty(&ioa_cfg->free_res_q)) {
1469                         ipr_send_hcam(ioa_cfg,
1470                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471                                       hostrcb);
1472                         return;
1473                 }
1474
1475                 res = list_entry(ioa_cfg->free_res_q.next,
1476                                  struct ipr_resource_entry, queue);
1477
1478                 list_del(&res->queue);
1479                 ipr_init_res_entry(res, &cfgtew);
1480                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481         }
1482
1483         ipr_update_res_entry(res, &cfgtew);
1484
1485         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486                 if (res->sdev) {
1487                         res->del_from_ml = 1;
1488                         res->res_handle = IPR_INVALID_RES_HANDLE;
1489                         schedule_work(&ioa_cfg->work_q);
1490                 } else {
1491                         ipr_clear_res_target(res);
1492                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1493                 }
1494         } else if (!res->sdev || res->del_from_ml) {
1495                 res->add_to_ml = 1;
1496                 schedule_work(&ioa_cfg->work_q);
1497         }
1498
1499         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500 }
1501
1502 /**
1503  * ipr_process_ccn - Op done function for a CCN.
1504  * @ipr_cmd:    ipr command struct
1505  *
1506  * This function is the op done function for a configuration
1507  * change notification host controlled async from the adapter.
1508  *
1509  * Return value:
1510  *      none
1511  **/
1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513 {
1514         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1516         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1517
1518         list_del_init(&hostrcb->queue);
1519         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1520
1521         if (ioasc) {
1522                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1524                         dev_err(&ioa_cfg->pdev->dev,
1525                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528         } else {
1529                 ipr_handle_config_change(ioa_cfg, hostrcb);
1530         }
1531 }
1532
1533 /**
1534  * strip_whitespace - Strip and pad trailing whitespace.
1535  * @i:          size of buffer
1536  * @buf:        string to modify
1537  *
1538  * This function will strip all trailing whitespace and
1539  * NUL terminate the string.
1540  *
1541  **/
1542 static void strip_whitespace(int i, char *buf)
1543 {
1544         if (i < 1)
1545                 return;
1546         i--;
1547         while (i && buf[i] == ' ')
1548                 i--;
1549         buf[i+1] = '\0';
1550 }
1551
1552 /**
1553  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1554  * @prefix:             string to print at start of printk
1555  * @hostrcb:    hostrcb pointer
1556  * @vpd:                vendor/product id/sn struct
1557  *
1558  * Return value:
1559  *      none
1560  **/
1561 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1562                                 struct ipr_vpd *vpd)
1563 {
1564         char vendor_id[IPR_VENDOR_ID_LEN + 1];
1565         char product_id[IPR_PROD_ID_LEN + 1];
1566         char sn[IPR_SERIAL_NUM_LEN + 1];
1567
1568         memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569         strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
1570
1571         memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572         strip_whitespace(IPR_PROD_ID_LEN, product_id);
1573
1574         memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
1575         strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
1576
1577         ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
1578                      vendor_id, product_id, sn);
1579 }
1580
1581 /**
1582  * ipr_log_vpd - Log the passed VPD to the error log.
1583  * @vpd:                vendor/product id/sn struct
1584  *
1585  * Return value:
1586  *      none
1587  **/
1588 static void ipr_log_vpd(struct ipr_vpd *vpd)
1589 {
1590         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1591                     + IPR_SERIAL_NUM_LEN];
1592
1593         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1594         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1595                IPR_PROD_ID_LEN);
1596         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1597         ipr_err("Vendor/Product ID: %s\n", buffer);
1598
1599         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1600         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1601         ipr_err("    Serial Number: %s\n", buffer);
1602 }
1603
1604 /**
1605  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1606  * @prefix:             string to print at start of printk
1607  * @hostrcb:    hostrcb pointer
1608  * @vpd:                vendor/product id/sn/wwn struct
1609  *
1610  * Return value:
1611  *      none
1612  **/
1613 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1614                                     struct ipr_ext_vpd *vpd)
1615 {
1616         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1617         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1618                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1619 }
1620
1621 /**
1622  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1623  * @vpd:                vendor/product id/sn/wwn struct
1624  *
1625  * Return value:
1626  *      none
1627  **/
1628 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1629 {
1630         ipr_log_vpd(&vpd->vpd);
1631         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1632                 be32_to_cpu(vpd->wwid[1]));
1633 }
1634
1635 /**
1636  * ipr_log_enhanced_cache_error - Log a cache error.
1637  * @ioa_cfg:    ioa config struct
1638  * @hostrcb:    hostrcb struct
1639  *
1640  * Return value:
1641  *      none
1642  **/
1643 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1644                                          struct ipr_hostrcb *hostrcb)
1645 {
1646         struct ipr_hostrcb_type_12_error *error;
1647
1648         if (ioa_cfg->sis64)
1649                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1650         else
1651                 error = &hostrcb->hcam.u.error.u.type_12_error;
1652
1653         ipr_err("-----Current Configuration-----\n");
1654         ipr_err("Cache Directory Card Information:\n");
1655         ipr_log_ext_vpd(&error->ioa_vpd);
1656         ipr_err("Adapter Card Information:\n");
1657         ipr_log_ext_vpd(&error->cfc_vpd);
1658
1659         ipr_err("-----Expected Configuration-----\n");
1660         ipr_err("Cache Directory Card Information:\n");
1661         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1662         ipr_err("Adapter Card Information:\n");
1663         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1664
1665         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1666                      be32_to_cpu(error->ioa_data[0]),
1667                      be32_to_cpu(error->ioa_data[1]),
1668                      be32_to_cpu(error->ioa_data[2]));
1669 }
1670
1671 /**
1672  * ipr_log_cache_error - Log a cache error.
1673  * @ioa_cfg:    ioa config struct
1674  * @hostrcb:    hostrcb struct
1675  *
1676  * Return value:
1677  *      none
1678  **/
1679 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1680                                 struct ipr_hostrcb *hostrcb)
1681 {
1682         struct ipr_hostrcb_type_02_error *error =
1683                 &hostrcb->hcam.u.error.u.type_02_error;
1684
1685         ipr_err("-----Current Configuration-----\n");
1686         ipr_err("Cache Directory Card Information:\n");
1687         ipr_log_vpd(&error->ioa_vpd);
1688         ipr_err("Adapter Card Information:\n");
1689         ipr_log_vpd(&error->cfc_vpd);
1690
1691         ipr_err("-----Expected Configuration-----\n");
1692         ipr_err("Cache Directory Card Information:\n");
1693         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1694         ipr_err("Adapter Card Information:\n");
1695         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1696
1697         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1698                      be32_to_cpu(error->ioa_data[0]),
1699                      be32_to_cpu(error->ioa_data[1]),
1700                      be32_to_cpu(error->ioa_data[2]));
1701 }
1702
1703 /**
1704  * ipr_log_enhanced_config_error - Log a configuration error.
1705  * @ioa_cfg:    ioa config struct
1706  * @hostrcb:    hostrcb struct
1707  *
1708  * Return value:
1709  *      none
1710  **/
1711 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1712                                           struct ipr_hostrcb *hostrcb)
1713 {
1714         int errors_logged, i;
1715         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1716         struct ipr_hostrcb_type_13_error *error;
1717
1718         error = &hostrcb->hcam.u.error.u.type_13_error;
1719         errors_logged = be32_to_cpu(error->errors_logged);
1720
1721         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722                 be32_to_cpu(error->errors_detected), errors_logged);
1723
1724         dev_entry = error->dev;
1725
1726         for (i = 0; i < errors_logged; i++, dev_entry++) {
1727                 ipr_err_separator;
1728
1729                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1730                 ipr_log_ext_vpd(&dev_entry->vpd);
1731
1732                 ipr_err("-----New Device Information-----\n");
1733                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1734
1735                 ipr_err("Cache Directory Card Information:\n");
1736                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1737
1738                 ipr_err("Adapter Card Information:\n");
1739                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1740         }
1741 }
1742
1743 /**
1744  * ipr_log_sis64_config_error - Log a device error.
1745  * @ioa_cfg:    ioa config struct
1746  * @hostrcb:    hostrcb struct
1747  *
1748  * Return value:
1749  *      none
1750  **/
1751 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1752                                        struct ipr_hostrcb *hostrcb)
1753 {
1754         int errors_logged, i;
1755         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1756         struct ipr_hostrcb_type_23_error *error;
1757         char buffer[IPR_MAX_RES_PATH_LENGTH];
1758
1759         error = &hostrcb->hcam.u.error64.u.type_23_error;
1760         errors_logged = be32_to_cpu(error->errors_logged);
1761
1762         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763                 be32_to_cpu(error->errors_detected), errors_logged);
1764
1765         dev_entry = error->dev;
1766
1767         for (i = 0; i < errors_logged; i++, dev_entry++) {
1768                 ipr_err_separator;
1769
1770                 ipr_err("Device %d : %s", i + 1,
1771                         __ipr_format_res_path(dev_entry->res_path,
1772                                               buffer, sizeof(buffer)));
1773                 ipr_log_ext_vpd(&dev_entry->vpd);
1774
1775                 ipr_err("-----New Device Information-----\n");
1776                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1777
1778                 ipr_err("Cache Directory Card Information:\n");
1779                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1780
1781                 ipr_err("Adapter Card Information:\n");
1782                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1783         }
1784 }
1785
1786 /**
1787  * ipr_log_config_error - Log a configuration error.
1788  * @ioa_cfg:    ioa config struct
1789  * @hostrcb:    hostrcb struct
1790  *
1791  * Return value:
1792  *      none
1793  **/
1794 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1795                                  struct ipr_hostrcb *hostrcb)
1796 {
1797         int errors_logged, i;
1798         struct ipr_hostrcb_device_data_entry *dev_entry;
1799         struct ipr_hostrcb_type_03_error *error;
1800
1801         error = &hostrcb->hcam.u.error.u.type_03_error;
1802         errors_logged = be32_to_cpu(error->errors_logged);
1803
1804         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1805                 be32_to_cpu(error->errors_detected), errors_logged);
1806
1807         dev_entry = error->dev;
1808
1809         for (i = 0; i < errors_logged; i++, dev_entry++) {
1810                 ipr_err_separator;
1811
1812                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1813                 ipr_log_vpd(&dev_entry->vpd);
1814
1815                 ipr_err("-----New Device Information-----\n");
1816                 ipr_log_vpd(&dev_entry->new_vpd);
1817
1818                 ipr_err("Cache Directory Card Information:\n");
1819                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1820
1821                 ipr_err("Adapter Card Information:\n");
1822                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1823
1824                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1825                         be32_to_cpu(dev_entry->ioa_data[0]),
1826                         be32_to_cpu(dev_entry->ioa_data[1]),
1827                         be32_to_cpu(dev_entry->ioa_data[2]),
1828                         be32_to_cpu(dev_entry->ioa_data[3]),
1829                         be32_to_cpu(dev_entry->ioa_data[4]));
1830         }
1831 }
1832
1833 /**
1834  * ipr_log_enhanced_array_error - Log an array configuration error.
1835  * @ioa_cfg:    ioa config struct
1836  * @hostrcb:    hostrcb struct
1837  *
1838  * Return value:
1839  *      none
1840  **/
1841 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842                                          struct ipr_hostrcb *hostrcb)
1843 {
1844         int i, num_entries;
1845         struct ipr_hostrcb_type_14_error *error;
1846         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1847         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1848
1849         error = &hostrcb->hcam.u.error.u.type_14_error;
1850
1851         ipr_err_separator;
1852
1853         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854                 error->protection_level,
1855                 ioa_cfg->host->host_no,
1856                 error->last_func_vset_res_addr.bus,
1857                 error->last_func_vset_res_addr.target,
1858                 error->last_func_vset_res_addr.lun);
1859
1860         ipr_err_separator;
1861
1862         array_entry = error->array_member;
1863         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1864                             ARRAY_SIZE(error->array_member));
1865
1866         for (i = 0; i < num_entries; i++, array_entry++) {
1867                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1868                         continue;
1869
1870                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1871                         ipr_err("Exposed Array Member %d:\n", i);
1872                 else
1873                         ipr_err("Array Member %d:\n", i);
1874
1875                 ipr_log_ext_vpd(&array_entry->vpd);
1876                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1877                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1878                                  "Expected Location");
1879
1880                 ipr_err_separator;
1881         }
1882 }
1883
1884 /**
1885  * ipr_log_array_error - Log an array configuration error.
1886  * @ioa_cfg:    ioa config struct
1887  * @hostrcb:    hostrcb struct
1888  *
1889  * Return value:
1890  *      none
1891  **/
1892 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1893                                 struct ipr_hostrcb *hostrcb)
1894 {
1895         int i;
1896         struct ipr_hostrcb_type_04_error *error;
1897         struct ipr_hostrcb_array_data_entry *array_entry;
1898         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1899
1900         error = &hostrcb->hcam.u.error.u.type_04_error;
1901
1902         ipr_err_separator;
1903
1904         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1905                 error->protection_level,
1906                 ioa_cfg->host->host_no,
1907                 error->last_func_vset_res_addr.bus,
1908                 error->last_func_vset_res_addr.target,
1909                 error->last_func_vset_res_addr.lun);
1910
1911         ipr_err_separator;
1912
1913         array_entry = error->array_member;
1914
1915         for (i = 0; i < 18; i++) {
1916                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1917                         continue;
1918
1919                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1920                         ipr_err("Exposed Array Member %d:\n", i);
1921                 else
1922                         ipr_err("Array Member %d:\n", i);
1923
1924                 ipr_log_vpd(&array_entry->vpd);
1925
1926                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1927                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1928                                  "Expected Location");
1929
1930                 ipr_err_separator;
1931
1932                 if (i == 9)
1933                         array_entry = error->array_member2;
1934                 else
1935                         array_entry++;
1936         }
1937 }
1938
1939 /**
1940  * ipr_log_hex_data - Log additional hex IOA error data.
1941  * @ioa_cfg:    ioa config struct
1942  * @data:               IOA error data
1943  * @len:                data length
1944  *
1945  * Return value:
1946  *      none
1947  **/
1948 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1949 {
1950         int i;
1951
1952         if (len == 0)
1953                 return;
1954
1955         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1956                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1957
1958         for (i = 0; i < len / 4; i += 4) {
1959                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1960                         be32_to_cpu(data[i]),
1961                         be32_to_cpu(data[i+1]),
1962                         be32_to_cpu(data[i+2]),
1963                         be32_to_cpu(data[i+3]));
1964         }
1965 }
1966
1967 /**
1968  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1969  * @ioa_cfg:    ioa config struct
1970  * @hostrcb:    hostrcb struct
1971  *
1972  * Return value:
1973  *      none
1974  **/
1975 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1976                                             struct ipr_hostrcb *hostrcb)
1977 {
1978         struct ipr_hostrcb_type_17_error *error;
1979
1980         if (ioa_cfg->sis64)
1981                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1982         else
1983                 error = &hostrcb->hcam.u.error.u.type_17_error;
1984
1985         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1986         strim(error->failure_reason);
1987
1988         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1989                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1990         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1991         ipr_log_hex_data(ioa_cfg, error->data,
1992                          be32_to_cpu(hostrcb->hcam.length) -
1993                          (offsetof(struct ipr_hostrcb_error, u) +
1994                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1995 }
1996
1997 /**
1998  * ipr_log_dual_ioa_error - Log a dual adapter error.
1999  * @ioa_cfg:    ioa config struct
2000  * @hostrcb:    hostrcb struct
2001  *
2002  * Return value:
2003  *      none
2004  **/
2005 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2006                                    struct ipr_hostrcb *hostrcb)
2007 {
2008         struct ipr_hostrcb_type_07_error *error;
2009
2010         error = &hostrcb->hcam.u.error.u.type_07_error;
2011         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2012         strim(error->failure_reason);
2013
2014         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2015                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2016         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2017         ipr_log_hex_data(ioa_cfg, error->data,
2018                          be32_to_cpu(hostrcb->hcam.length) -
2019                          (offsetof(struct ipr_hostrcb_error, u) +
2020                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2021 }
2022
2023 static const struct {
2024         u8 active;
2025         char *desc;
2026 } path_active_desc[] = {
2027         { IPR_PATH_NO_INFO, "Path" },
2028         { IPR_PATH_ACTIVE, "Active path" },
2029         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2030 };
2031
2032 static const struct {
2033         u8 state;
2034         char *desc;
2035 } path_state_desc[] = {
2036         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2037         { IPR_PATH_HEALTHY, "is healthy" },
2038         { IPR_PATH_DEGRADED, "is degraded" },
2039         { IPR_PATH_FAILED, "is failed" }
2040 };
2041
2042 /**
2043  * ipr_log_fabric_path - Log a fabric path error
2044  * @hostrcb:    hostrcb struct
2045  * @fabric:             fabric descriptor
2046  *
2047  * Return value:
2048  *      none
2049  **/
2050 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2051                                 struct ipr_hostrcb_fabric_desc *fabric)
2052 {
2053         int i, j;
2054         u8 path_state = fabric->path_state;
2055         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2056         u8 state = path_state & IPR_PATH_STATE_MASK;
2057
2058         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2059                 if (path_active_desc[i].active != active)
2060                         continue;
2061
2062                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2063                         if (path_state_desc[j].state != state)
2064                                 continue;
2065
2066                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2067                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2068                                              path_active_desc[i].desc, path_state_desc[j].desc,
2069                                              fabric->ioa_port);
2070                         } else if (fabric->cascaded_expander == 0xff) {
2071                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2072                                              path_active_desc[i].desc, path_state_desc[j].desc,
2073                                              fabric->ioa_port, fabric->phy);
2074                         } else if (fabric->phy == 0xff) {
2075                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2076                                              path_active_desc[i].desc, path_state_desc[j].desc,
2077                                              fabric->ioa_port, fabric->cascaded_expander);
2078                         } else {
2079                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2080                                              path_active_desc[i].desc, path_state_desc[j].desc,
2081                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2082                         }
2083                         return;
2084                 }
2085         }
2086
2087         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2088                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2089 }
2090
2091 /**
2092  * ipr_log64_fabric_path - Log a fabric path error
2093  * @hostrcb:    hostrcb struct
2094  * @fabric:             fabric descriptor
2095  *
2096  * Return value:
2097  *      none
2098  **/
2099 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2100                                   struct ipr_hostrcb64_fabric_desc *fabric)
2101 {
2102         int i, j;
2103         u8 path_state = fabric->path_state;
2104         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2105         u8 state = path_state & IPR_PATH_STATE_MASK;
2106         char buffer[IPR_MAX_RES_PATH_LENGTH];
2107
2108         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2109                 if (path_active_desc[i].active != active)
2110                         continue;
2111
2112                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2113                         if (path_state_desc[j].state != state)
2114                                 continue;
2115
2116                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2117                                      path_active_desc[i].desc, path_state_desc[j].desc,
2118                                      ipr_format_res_path(hostrcb->ioa_cfg,
2119                                                 fabric->res_path,
2120                                                 buffer, sizeof(buffer)));
2121                         return;
2122                 }
2123         }
2124
2125         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2126                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2127                                     buffer, sizeof(buffer)));
2128 }
2129
2130 static const struct {
2131         u8 type;
2132         char *desc;
2133 } path_type_desc[] = {
2134         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2135         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2136         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2137         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2138 };
2139
2140 static const struct {
2141         u8 status;
2142         char *desc;
2143 } path_status_desc[] = {
2144         { IPR_PATH_CFG_NO_PROB, "Functional" },
2145         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2146         { IPR_PATH_CFG_FAILED, "Failed" },
2147         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2148         { IPR_PATH_NOT_DETECTED, "Missing" },
2149         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2150 };
2151
2152 static const char *link_rate[] = {
2153         "unknown",
2154         "disabled",
2155         "phy reset problem",
2156         "spinup hold",
2157         "port selector",
2158         "unknown",
2159         "unknown",
2160         "unknown",
2161         "1.5Gbps",
2162         "3.0Gbps",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown",
2167         "unknown",
2168         "unknown"
2169 };
2170
2171 /**
2172  * ipr_log_path_elem - Log a fabric path element.
2173  * @hostrcb:    hostrcb struct
2174  * @cfg:                fabric path element struct
2175  *
2176  * Return value:
2177  *      none
2178  **/
2179 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2180                               struct ipr_hostrcb_config_element *cfg)
2181 {
2182         int i, j;
2183         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2184         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2185
2186         if (type == IPR_PATH_CFG_NOT_EXIST)
2187                 return;
2188
2189         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2190                 if (path_type_desc[i].type != type)
2191                         continue;
2192
2193                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2194                         if (path_status_desc[j].status != status)
2195                                 continue;
2196
2197                         if (type == IPR_PATH_CFG_IOA_PORT) {
2198                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2199                                              path_status_desc[j].desc, path_type_desc[i].desc,
2200                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2201                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2202                         } else {
2203                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2204                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2205                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2206                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2207                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2208                                 } else if (cfg->cascaded_expander == 0xff) {
2209                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2210                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2211                                                      path_type_desc[i].desc, cfg->phy,
2212                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2213                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2214                                 } else if (cfg->phy == 0xff) {
2215                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2216                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2217                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2218                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2220                                 } else {
2221                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2222                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2223                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2224                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2225                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2226                                 }
2227                         }
2228                         return;
2229                 }
2230         }
2231
2232         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2233                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2234                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2235                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2236 }
2237
2238 /**
2239  * ipr_log64_path_elem - Log a fabric path element.
2240  * @hostrcb:    hostrcb struct
2241  * @cfg:                fabric path element struct
2242  *
2243  * Return value:
2244  *      none
2245  **/
2246 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2247                                 struct ipr_hostrcb64_config_element *cfg)
2248 {
2249         int i, j;
2250         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2251         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2252         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2253         char buffer[IPR_MAX_RES_PATH_LENGTH];
2254
2255         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2256                 return;
2257
2258         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2259                 if (path_type_desc[i].type != type)
2260                         continue;
2261
2262                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2263                         if (path_status_desc[j].status != status)
2264                                 continue;
2265
2266                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2267                                      path_status_desc[j].desc, path_type_desc[i].desc,
2268                                      ipr_format_res_path(hostrcb->ioa_cfg,
2269                                         cfg->res_path, buffer, sizeof(buffer)),
2270                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2271                                         be32_to_cpu(cfg->wwid[0]),
2272                                         be32_to_cpu(cfg->wwid[1]));
2273                         return;
2274                 }
2275         }
2276         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2277                      "WWN=%08X%08X\n", cfg->type_status,
2278                      ipr_format_res_path(hostrcb->ioa_cfg,
2279                         cfg->res_path, buffer, sizeof(buffer)),
2280                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2281                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2282 }
2283
2284 /**
2285  * ipr_log_fabric_error - Log a fabric error.
2286  * @ioa_cfg:    ioa config struct
2287  * @hostrcb:    hostrcb struct
2288  *
2289  * Return value:
2290  *      none
2291  **/
2292 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2293                                  struct ipr_hostrcb *hostrcb)
2294 {
2295         struct ipr_hostrcb_type_20_error *error;
2296         struct ipr_hostrcb_fabric_desc *fabric;
2297         struct ipr_hostrcb_config_element *cfg;
2298         int i, add_len;
2299
2300         error = &hostrcb->hcam.u.error.u.type_20_error;
2301         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2302         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2303
2304         add_len = be32_to_cpu(hostrcb->hcam.length) -
2305                 (offsetof(struct ipr_hostrcb_error, u) +
2306                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2307
2308         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2309                 ipr_log_fabric_path(hostrcb, fabric);
2310                 for_each_fabric_cfg(fabric, cfg)
2311                         ipr_log_path_elem(hostrcb, cfg);
2312
2313                 add_len -= be16_to_cpu(fabric->length);
2314                 fabric = (struct ipr_hostrcb_fabric_desc *)
2315                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2316         }
2317
2318         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2319 }
2320
2321 /**
2322  * ipr_log_sis64_array_error - Log a sis64 array error.
2323  * @ioa_cfg:    ioa config struct
2324  * @hostrcb:    hostrcb struct
2325  *
2326  * Return value:
2327  *      none
2328  **/
2329 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2330                                       struct ipr_hostrcb *hostrcb)
2331 {
2332         int i, num_entries;
2333         struct ipr_hostrcb_type_24_error *error;
2334         struct ipr_hostrcb64_array_data_entry *array_entry;
2335         char buffer[IPR_MAX_RES_PATH_LENGTH];
2336         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2337
2338         error = &hostrcb->hcam.u.error64.u.type_24_error;
2339
2340         ipr_err_separator;
2341
2342         ipr_err("RAID %s Array Configuration: %s\n",
2343                 error->protection_level,
2344                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2345                         buffer, sizeof(buffer)));
2346
2347         ipr_err_separator;
2348
2349         array_entry = error->array_member;
2350         num_entries = min_t(u32, error->num_entries,
2351                             ARRAY_SIZE(error->array_member));
2352
2353         for (i = 0; i < num_entries; i++, array_entry++) {
2354
2355                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2356                         continue;
2357
2358                 if (error->exposed_mode_adn == i)
2359                         ipr_err("Exposed Array Member %d:\n", i);
2360                 else
2361                         ipr_err("Array Member %d:\n", i);
2362
2363                 ipr_err("Array Member %d:\n", i);
2364                 ipr_log_ext_vpd(&array_entry->vpd);
2365                 ipr_err("Current Location: %s\n",
2366                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2367                                 buffer, sizeof(buffer)));
2368                 ipr_err("Expected Location: %s\n",
2369                          ipr_format_res_path(ioa_cfg,
2370                                 array_entry->expected_res_path,
2371                                 buffer, sizeof(buffer)));
2372
2373                 ipr_err_separator;
2374         }
2375 }
2376
2377 /**
2378  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2379  * @ioa_cfg:    ioa config struct
2380  * @hostrcb:    hostrcb struct
2381  *
2382  * Return value:
2383  *      none
2384  **/
2385 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2386                                        struct ipr_hostrcb *hostrcb)
2387 {
2388         struct ipr_hostrcb_type_30_error *error;
2389         struct ipr_hostrcb64_fabric_desc *fabric;
2390         struct ipr_hostrcb64_config_element *cfg;
2391         int i, add_len;
2392
2393         error = &hostrcb->hcam.u.error64.u.type_30_error;
2394
2395         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2396         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2397
2398         add_len = be32_to_cpu(hostrcb->hcam.length) -
2399                 (offsetof(struct ipr_hostrcb64_error, u) +
2400                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2401
2402         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2403                 ipr_log64_fabric_path(hostrcb, fabric);
2404                 for_each_fabric_cfg(fabric, cfg)
2405                         ipr_log64_path_elem(hostrcb, cfg);
2406
2407                 add_len -= be16_to_cpu(fabric->length);
2408                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2409                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2410         }
2411
2412         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2413 }
2414
2415 /**
2416  * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2417  * @ioa_cfg:    ioa config struct
2418  * @hostrcb:    hostrcb struct
2419  *
2420  * Return value:
2421  *      none
2422  **/
2423 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2424                                        struct ipr_hostrcb *hostrcb)
2425 {
2426         struct ipr_hostrcb_type_41_error *error;
2427
2428         error = &hostrcb->hcam.u.error64.u.type_41_error;
2429
2430         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2431         ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2432         ipr_log_hex_data(ioa_cfg, error->data,
2433                          be32_to_cpu(hostrcb->hcam.length) -
2434                          (offsetof(struct ipr_hostrcb_error, u) +
2435                           offsetof(struct ipr_hostrcb_type_41_error, data)));
2436 }
2437 /**
2438  * ipr_log_generic_error - Log an adapter error.
2439  * @ioa_cfg:    ioa config struct
2440  * @hostrcb:    hostrcb struct
2441  *
2442  * Return value:
2443  *      none
2444  **/
2445 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2446                                   struct ipr_hostrcb *hostrcb)
2447 {
2448         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2449                          be32_to_cpu(hostrcb->hcam.length));
2450 }
2451
2452 /**
2453  * ipr_log_sis64_device_error - Log a cache error.
2454  * @ioa_cfg:    ioa config struct
2455  * @hostrcb:    hostrcb struct
2456  *
2457  * Return value:
2458  *      none
2459  **/
2460 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2461                                          struct ipr_hostrcb *hostrcb)
2462 {
2463         struct ipr_hostrcb_type_21_error *error;
2464         char buffer[IPR_MAX_RES_PATH_LENGTH];
2465
2466         error = &hostrcb->hcam.u.error64.u.type_21_error;
2467
2468         ipr_err("-----Failing Device Information-----\n");
2469         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2470                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2471                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2472         ipr_err("Device Resource Path: %s\n",
2473                 __ipr_format_res_path(error->res_path,
2474                                       buffer, sizeof(buffer)));
2475         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2476         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2477         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2478         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2479         ipr_err("SCSI Sense Data:\n");
2480         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2481         ipr_err("SCSI Command Descriptor Block: \n");
2482         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2483
2484         ipr_err("Additional IOA Data:\n");
2485         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2486 }
2487
2488 /**
2489  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2490  * @ioasc:      IOASC
2491  *
2492  * This function will return the index of into the ipr_error_table
2493  * for the specified IOASC. If the IOASC is not in the table,
2494  * 0 will be returned, which points to the entry used for unknown errors.
2495  *
2496  * Return value:
2497  *      index into the ipr_error_table
2498  **/
2499 static u32 ipr_get_error(u32 ioasc)
2500 {
2501         int i;
2502
2503         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2504                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2505                         return i;
2506
2507         return 0;
2508 }
2509
2510 /**
2511  * ipr_handle_log_data - Log an adapter error.
2512  * @ioa_cfg:    ioa config struct
2513  * @hostrcb:    hostrcb struct
2514  *
2515  * This function logs an adapter error to the system.
2516  *
2517  * Return value:
2518  *      none
2519  **/
2520 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2521                                 struct ipr_hostrcb *hostrcb)
2522 {
2523         u32 ioasc;
2524         int error_index;
2525         struct ipr_hostrcb_type_21_error *error;
2526
2527         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2528                 return;
2529
2530         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2531                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2532
2533         if (ioa_cfg->sis64)
2534                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2535         else
2536                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2537
2538         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2539             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2540                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2541                 scsi_report_bus_reset(ioa_cfg->host,
2542                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2543         }
2544
2545         error_index = ipr_get_error(ioasc);
2546
2547         if (!ipr_error_table[error_index].log_hcam)
2548                 return;
2549
2550         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2551             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2552                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2553
2554                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2555                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2556                                 return;
2557         }
2558
2559         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2560
2561         /* Set indication we have logged an error */
2562         ioa_cfg->errors_logged++;
2563
2564         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2565                 return;
2566         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2567                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2568
2569         switch (hostrcb->hcam.overlay_id) {
2570         case IPR_HOST_RCB_OVERLAY_ID_2:
2571                 ipr_log_cache_error(ioa_cfg, hostrcb);
2572                 break;
2573         case IPR_HOST_RCB_OVERLAY_ID_3:
2574                 ipr_log_config_error(ioa_cfg, hostrcb);
2575                 break;
2576         case IPR_HOST_RCB_OVERLAY_ID_4:
2577         case IPR_HOST_RCB_OVERLAY_ID_6:
2578                 ipr_log_array_error(ioa_cfg, hostrcb);
2579                 break;
2580         case IPR_HOST_RCB_OVERLAY_ID_7:
2581                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2582                 break;
2583         case IPR_HOST_RCB_OVERLAY_ID_12:
2584                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2585                 break;
2586         case IPR_HOST_RCB_OVERLAY_ID_13:
2587                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2588                 break;
2589         case IPR_HOST_RCB_OVERLAY_ID_14:
2590         case IPR_HOST_RCB_OVERLAY_ID_16:
2591                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2592                 break;
2593         case IPR_HOST_RCB_OVERLAY_ID_17:
2594                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2595                 break;
2596         case IPR_HOST_RCB_OVERLAY_ID_20:
2597                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2598                 break;
2599         case IPR_HOST_RCB_OVERLAY_ID_21:
2600                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2601                 break;
2602         case IPR_HOST_RCB_OVERLAY_ID_23:
2603                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2604                 break;
2605         case IPR_HOST_RCB_OVERLAY_ID_24:
2606         case IPR_HOST_RCB_OVERLAY_ID_26:
2607                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2608                 break;
2609         case IPR_HOST_RCB_OVERLAY_ID_30:
2610                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2611                 break;
2612         case IPR_HOST_RCB_OVERLAY_ID_41:
2613                 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2614                 break;
2615         case IPR_HOST_RCB_OVERLAY_ID_1:
2616         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2617         default:
2618                 ipr_log_generic_error(ioa_cfg, hostrcb);
2619                 break;
2620         }
2621 }
2622
2623 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2624 {
2625         struct ipr_hostrcb *hostrcb;
2626
2627         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2628                                         struct ipr_hostrcb, queue);
2629
2630         if (unlikely(!hostrcb)) {
2631                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2632                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2633                                                 struct ipr_hostrcb, queue);
2634         }
2635
2636         list_del_init(&hostrcb->queue);
2637         return hostrcb;
2638 }
2639
2640 /**
2641  * ipr_process_error - Op done function for an adapter error log.
2642  * @ipr_cmd:    ipr command struct
2643  *
2644  * This function is the op done function for an error log host
2645  * controlled async from the adapter. It will log the error and
2646  * send the HCAM back to the adapter.
2647  *
2648  * Return value:
2649  *      none
2650  **/
2651 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2652 {
2653         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2654         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2655         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2656         u32 fd_ioasc;
2657
2658         if (ioa_cfg->sis64)
2659                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2660         else
2661                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2662
2663         list_del_init(&hostrcb->queue);
2664         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2665
2666         if (!ioasc) {
2667                 ipr_handle_log_data(ioa_cfg, hostrcb);
2668                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2669                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2670         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2671                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2672                 dev_err(&ioa_cfg->pdev->dev,
2673                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2674         }
2675
2676         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2677         schedule_work(&ioa_cfg->work_q);
2678         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2679
2680         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2681 }
2682
2683 /**
2684  * ipr_timeout -  An internally generated op has timed out.
2685  * @ipr_cmd:    ipr command struct
2686  *
2687  * This function blocks host requests and initiates an
2688  * adapter reset.
2689  *
2690  * Return value:
2691  *      none
2692  **/
2693 static void ipr_timeout(struct timer_list *t)
2694 {
2695         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2696         unsigned long lock_flags = 0;
2697         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2698
2699         ENTER;
2700         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2701
2702         ioa_cfg->errors_logged++;
2703         dev_err(&ioa_cfg->pdev->dev,
2704                 "Adapter being reset due to command timeout.\n");
2705
2706         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2707                 ioa_cfg->sdt_state = GET_DUMP;
2708
2709         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2710                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2711
2712         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2713         LEAVE;
2714 }
2715
2716 /**
2717  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2718  * @ipr_cmd:    ipr command struct
2719  *
2720  * This function blocks host requests and initiates an
2721  * adapter reset.
2722  *
2723  * Return value:
2724  *      none
2725  **/
2726 static void ipr_oper_timeout(struct timer_list *t)
2727 {
2728         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2729         unsigned long lock_flags = 0;
2730         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2731
2732         ENTER;
2733         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2734
2735         ioa_cfg->errors_logged++;
2736         dev_err(&ioa_cfg->pdev->dev,
2737                 "Adapter timed out transitioning to operational.\n");
2738
2739         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2740                 ioa_cfg->sdt_state = GET_DUMP;
2741
2742         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2743                 if (ipr_fastfail)
2744                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2745                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2746         }
2747
2748         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2749         LEAVE;
2750 }
2751
2752 /**
2753  * ipr_find_ses_entry - Find matching SES in SES table
2754  * @res:        resource entry struct of SES
2755  *
2756  * Return value:
2757  *      pointer to SES table entry / NULL on failure
2758  **/
2759 static const struct ipr_ses_table_entry *
2760 ipr_find_ses_entry(struct ipr_resource_entry *res)
2761 {
2762         int i, j, matches;
2763         struct ipr_std_inq_vpids *vpids;
2764         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2765
2766         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2767                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2768                         if (ste->compare_product_id_byte[j] == 'X') {
2769                                 vpids = &res->std_inq_data.vpids;
2770                                 if (vpids->product_id[j] == ste->product_id[j])
2771                                         matches++;
2772                                 else
2773                                         break;
2774                         } else
2775                                 matches++;
2776                 }
2777
2778                 if (matches == IPR_PROD_ID_LEN)
2779                         return ste;
2780         }
2781
2782         return NULL;
2783 }
2784
2785 /**
2786  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2787  * @ioa_cfg:    ioa config struct
2788  * @bus:                SCSI bus
2789  * @bus_width:  bus width
2790  *
2791  * Return value:
2792  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2793  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2794  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2795  *      max 160MHz = max 320MB/sec).
2796  **/
2797 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2798 {
2799         struct ipr_resource_entry *res;
2800         const struct ipr_ses_table_entry *ste;
2801         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2802
2803         /* Loop through each config table entry in the config table buffer */
2804         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2805                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2806                         continue;
2807
2808                 if (bus != res->bus)
2809                         continue;
2810
2811                 if (!(ste = ipr_find_ses_entry(res)))
2812                         continue;
2813
2814                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2815         }
2816
2817         return max_xfer_rate;
2818 }
2819
2820 /**
2821  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2822  * @ioa_cfg:            ioa config struct
2823  * @max_delay:          max delay in micro-seconds to wait
2824  *
2825  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2826  *
2827  * Return value:
2828  *      0 on success / other on failure
2829  **/
2830 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2831 {
2832         volatile u32 pcii_reg;
2833         int delay = 1;
2834
2835         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2836         while (delay < max_delay) {
2837                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2838
2839                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2840                         return 0;
2841
2842                 /* udelay cannot be used if delay is more than a few milliseconds */
2843                 if ((delay / 1000) > MAX_UDELAY_MS)
2844                         mdelay(delay / 1000);
2845                 else
2846                         udelay(delay);
2847
2848                 delay += delay;
2849         }
2850         return -EIO;
2851 }
2852
2853 /**
2854  * ipr_get_sis64_dump_data_section - Dump IOA memory
2855  * @ioa_cfg:                    ioa config struct
2856  * @start_addr:                 adapter address to dump
2857  * @dest:                       destination kernel buffer
2858  * @length_in_words:            length to dump in 4 byte words
2859  *
2860  * Return value:
2861  *      0 on success
2862  **/
2863 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2864                                            u32 start_addr,
2865                                            __be32 *dest, u32 length_in_words)
2866 {
2867         int i;
2868
2869         for (i = 0; i < length_in_words; i++) {
2870                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2871                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2872                 dest++;
2873         }
2874
2875         return 0;
2876 }
2877
2878 /**
2879  * ipr_get_ldump_data_section - Dump IOA memory
2880  * @ioa_cfg:                    ioa config struct
2881  * @start_addr:                 adapter address to dump
2882  * @dest:                               destination kernel buffer
2883  * @length_in_words:    length to dump in 4 byte words
2884  *
2885  * Return value:
2886  *      0 on success / -EIO on failure
2887  **/
2888 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2889                                       u32 start_addr,
2890                                       __be32 *dest, u32 length_in_words)
2891 {
2892         volatile u32 temp_pcii_reg;
2893         int i, delay = 0;
2894
2895         if (ioa_cfg->sis64)
2896                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2897                                                        dest, length_in_words);
2898
2899         /* Write IOA interrupt reg starting LDUMP state  */
2900         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2901                ioa_cfg->regs.set_uproc_interrupt_reg32);
2902
2903         /* Wait for IO debug acknowledge */
2904         if (ipr_wait_iodbg_ack(ioa_cfg,
2905                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2906                 dev_err(&ioa_cfg->pdev->dev,
2907                         "IOA dump long data transfer timeout\n");
2908                 return -EIO;
2909         }
2910
2911         /* Signal LDUMP interlocked - clear IO debug ack */
2912         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2913                ioa_cfg->regs.clr_interrupt_reg);
2914
2915         /* Write Mailbox with starting address */
2916         writel(start_addr, ioa_cfg->ioa_mailbox);
2917
2918         /* Signal address valid - clear IOA Reset alert */
2919         writel(IPR_UPROCI_RESET_ALERT,
2920                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2921
2922         for (i = 0; i < length_in_words; i++) {
2923                 /* Wait for IO debug acknowledge */
2924                 if (ipr_wait_iodbg_ack(ioa_cfg,
2925                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2926                         dev_err(&ioa_cfg->pdev->dev,
2927                                 "IOA dump short data transfer timeout\n");
2928                         return -EIO;
2929                 }
2930
2931                 /* Read data from mailbox and increment destination pointer */
2932                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2933                 dest++;
2934
2935                 /* For all but the last word of data, signal data received */
2936                 if (i < (length_in_words - 1)) {
2937                         /* Signal dump data received - Clear IO debug Ack */
2938                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2939                                ioa_cfg->regs.clr_interrupt_reg);
2940                 }
2941         }
2942
2943         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2944         writel(IPR_UPROCI_RESET_ALERT,
2945                ioa_cfg->regs.set_uproc_interrupt_reg32);
2946
2947         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2948                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2949
2950         /* Signal dump data received - Clear IO debug Ack */
2951         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2952                ioa_cfg->regs.clr_interrupt_reg);
2953
2954         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2955         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2956                 temp_pcii_reg =
2957                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2958
2959                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2960                         return 0;
2961
2962                 udelay(10);
2963                 delay += 10;
2964         }
2965
2966         return 0;
2967 }
2968
2969 #ifdef CONFIG_SCSI_IPR_DUMP
2970 /**
2971  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2972  * @ioa_cfg:            ioa config struct
2973  * @pci_address:        adapter address
2974  * @length:                     length of data to copy
2975  *
2976  * Copy data from PCI adapter to kernel buffer.
2977  * Note: length MUST be a 4 byte multiple
2978  * Return value:
2979  *      0 on success / other on failure
2980  **/
2981 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2982                         unsigned long pci_address, u32 length)
2983 {
2984         int bytes_copied = 0;
2985         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2986         __be32 *page;
2987         unsigned long lock_flags = 0;
2988         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2989
2990         if (ioa_cfg->sis64)
2991                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2992         else
2993                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2994
2995         while (bytes_copied < length &&
2996                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2997                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2998                     ioa_dump->page_offset == 0) {
2999                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
3000
3001                         if (!page) {
3002                                 ipr_trace;
3003                                 return bytes_copied;
3004                         }
3005
3006                         ioa_dump->page_offset = 0;
3007                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
3008                         ioa_dump->next_page_index++;
3009                 } else
3010                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
3011
3012                 rem_len = length - bytes_copied;
3013                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
3014                 cur_len = min(rem_len, rem_page_len);
3015
3016                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3017                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3018                         rc = -EIO;
3019                 } else {
3020                         rc = ipr_get_ldump_data_section(ioa_cfg,
3021                                                         pci_address + bytes_copied,
3022                                                         &page[ioa_dump->page_offset / 4],
3023                                                         (cur_len / sizeof(u32)));
3024                 }
3025                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3026
3027                 if (!rc) {
3028                         ioa_dump->page_offset += cur_len;
3029                         bytes_copied += cur_len;
3030                 } else {
3031                         ipr_trace;
3032                         break;
3033                 }
3034                 schedule();
3035         }
3036
3037         return bytes_copied;
3038 }
3039
3040 /**
3041  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3042  * @hdr:        dump entry header struct
3043  *
3044  * Return value:
3045  *      nothing
3046  **/
3047 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3048 {
3049         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3050         hdr->num_elems = 1;
3051         hdr->offset = sizeof(*hdr);
3052         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3053 }
3054
3055 /**
3056  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3057  * @ioa_cfg:    ioa config struct
3058  * @driver_dump:        driver dump struct
3059  *
3060  * Return value:
3061  *      nothing
3062  **/
3063 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3064                                    struct ipr_driver_dump *driver_dump)
3065 {
3066         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3067
3068         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3069         driver_dump->ioa_type_entry.hdr.len =
3070                 sizeof(struct ipr_dump_ioa_type_entry) -
3071                 sizeof(struct ipr_dump_entry_header);
3072         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3073         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3074         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3075         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3076                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3077                 ucode_vpd->minor_release[1];
3078         driver_dump->hdr.num_entries++;
3079 }
3080
3081 /**
3082  * ipr_dump_version_data - Fill in the driver version in the dump.
3083  * @ioa_cfg:    ioa config struct
3084  * @driver_dump:        driver dump struct
3085  *
3086  * Return value:
3087  *      nothing
3088  **/
3089 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3090                                   struct ipr_driver_dump *driver_dump)
3091 {
3092         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3093         driver_dump->version_entry.hdr.len =
3094                 sizeof(struct ipr_dump_version_entry) -
3095                 sizeof(struct ipr_dump_entry_header);
3096         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3097         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3098         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3099         driver_dump->hdr.num_entries++;
3100 }
3101
3102 /**
3103  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3104  * @ioa_cfg:    ioa config struct
3105  * @driver_dump:        driver dump struct
3106  *
3107  * Return value:
3108  *      nothing
3109  **/
3110 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3111                                    struct ipr_driver_dump *driver_dump)
3112 {
3113         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3114         driver_dump->trace_entry.hdr.len =
3115                 sizeof(struct ipr_dump_trace_entry) -
3116                 sizeof(struct ipr_dump_entry_header);
3117         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3118         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3119         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3120         driver_dump->hdr.num_entries++;
3121 }
3122
3123 /**
3124  * ipr_dump_location_data - Fill in the IOA location in the dump.
3125  * @ioa_cfg:    ioa config struct
3126  * @driver_dump:        driver dump struct
3127  *
3128  * Return value:
3129  *      nothing
3130  **/
3131 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3132                                    struct ipr_driver_dump *driver_dump)
3133 {
3134         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3135         driver_dump->location_entry.hdr.len =
3136                 sizeof(struct ipr_dump_location_entry) -
3137                 sizeof(struct ipr_dump_entry_header);
3138         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3139         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3140         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3141         driver_dump->hdr.num_entries++;
3142 }
3143
3144 /**
3145  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3146  * @ioa_cfg:    ioa config struct
3147  * @dump:               dump struct
3148  *
3149  * Return value:
3150  *      nothing
3151  **/
3152 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3153 {
3154         unsigned long start_addr, sdt_word;
3155         unsigned long lock_flags = 0;
3156         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3157         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3158         u32 num_entries, max_num_entries, start_off, end_off;
3159         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3160         struct ipr_sdt *sdt;
3161         int valid = 1;
3162         int i;
3163
3164         ENTER;
3165
3166         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3167
3168         if (ioa_cfg->sdt_state != READ_DUMP) {
3169                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3170                 return;
3171         }
3172
3173         if (ioa_cfg->sis64) {
3174                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3175                 ssleep(IPR_DUMP_DELAY_SECONDS);
3176                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3177         }
3178
3179         start_addr = readl(ioa_cfg->ioa_mailbox);
3180
3181         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3182                 dev_err(&ioa_cfg->pdev->dev,
3183                         "Invalid dump table format: %lx\n", start_addr);
3184                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3185                 return;
3186         }
3187
3188         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3189
3190         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3191
3192         /* Initialize the overall dump header */
3193         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3194         driver_dump->hdr.num_entries = 1;
3195         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3196         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3197         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3198         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3199
3200         ipr_dump_version_data(ioa_cfg, driver_dump);
3201         ipr_dump_location_data(ioa_cfg, driver_dump);
3202         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3203         ipr_dump_trace_data(ioa_cfg, driver_dump);
3204
3205         /* Update dump_header */
3206         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3207
3208         /* IOA Dump entry */
3209         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3210         ioa_dump->hdr.len = 0;
3211         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3212         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3213
3214         /* First entries in sdt are actually a list of dump addresses and
3215          lengths to gather the real dump data.  sdt represents the pointer
3216          to the ioa generated dump table.  Dump data will be extracted based
3217          on entries in this table */
3218         sdt = &ioa_dump->sdt;
3219
3220         if (ioa_cfg->sis64) {
3221                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3222                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3223         } else {
3224                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3225                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3226         }
3227
3228         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3229                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3230         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3231                                         bytes_to_copy / sizeof(__be32));
3232
3233         /* Smart Dump table is ready to use and the first entry is valid */
3234         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3235             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3236                 dev_err(&ioa_cfg->pdev->dev,
3237                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3238                         rc, be32_to_cpu(sdt->hdr.state));
3239                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3240                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3241                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3242                 return;
3243         }
3244
3245         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3246
3247         if (num_entries > max_num_entries)
3248                 num_entries = max_num_entries;
3249
3250         /* Update dump length to the actual data to be copied */
3251         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3252         if (ioa_cfg->sis64)
3253                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3254         else
3255                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3256
3257         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3258
3259         for (i = 0; i < num_entries; i++) {
3260                 if (ioa_dump->hdr.len > max_dump_size) {
3261                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3262                         break;
3263                 }
3264
3265                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3266                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3267                         if (ioa_cfg->sis64)
3268                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3269                         else {
3270                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3271                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3272
3273                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3274                                         bytes_to_copy = end_off - start_off;
3275                                 else
3276                                         valid = 0;
3277                         }
3278                         if (valid) {
3279                                 if (bytes_to_copy > max_dump_size) {
3280                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3281                                         continue;
3282                                 }
3283
3284                                 /* Copy data from adapter to driver buffers */
3285                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3286                                                             bytes_to_copy);
3287
3288                                 ioa_dump->hdr.len += bytes_copied;
3289
3290                                 if (bytes_copied != bytes_to_copy) {
3291                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3292                                         break;
3293                                 }
3294                         }
3295                 }
3296         }
3297
3298         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3299
3300         /* Update dump_header */
3301         driver_dump->hdr.len += ioa_dump->hdr.len;
3302         wmb();
3303         ioa_cfg->sdt_state = DUMP_OBTAINED;
3304         LEAVE;
3305 }
3306
3307 #else
3308 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3309 #endif
3310
3311 /**
3312  * ipr_release_dump - Free adapter dump memory
3313  * @kref:       kref struct
3314  *
3315  * Return value:
3316  *      nothing
3317  **/
3318 static void ipr_release_dump(struct kref *kref)
3319 {
3320         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3321         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3322         unsigned long lock_flags = 0;
3323         int i;
3324
3325         ENTER;
3326         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3327         ioa_cfg->dump = NULL;
3328         ioa_cfg->sdt_state = INACTIVE;
3329         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3330
3331         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3332                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3333
3334         vfree(dump->ioa_dump.ioa_data);
3335         kfree(dump);
3336         LEAVE;
3337 }
3338
3339 static void ipr_add_remove_thread(struct work_struct *work)
3340 {
3341         unsigned long lock_flags;
3342         struct ipr_resource_entry *res;
3343         struct scsi_device *sdev;
3344         struct ipr_ioa_cfg *ioa_cfg =
3345                 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3346         u8 bus, target, lun;
3347         int did_work;
3348
3349         ENTER;
3350         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3351
3352 restart:
3353         do {
3354                 did_work = 0;
3355                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3356                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3357                         return;
3358                 }
3359
3360                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3361                         if (res->del_from_ml && res->sdev) {
3362                                 did_work = 1;
3363                                 sdev = res->sdev;
3364                                 if (!scsi_device_get(sdev)) {
3365                                         if (!res->add_to_ml)
3366                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3367                                         else
3368                                                 res->del_from_ml = 0;
3369                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3370                                         scsi_remove_device(sdev);
3371                                         scsi_device_put(sdev);
3372                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3373                                 }
3374                                 break;
3375                         }
3376                 }
3377         } while (did_work);
3378
3379         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3380                 if (res->add_to_ml) {
3381                         bus = res->bus;
3382                         target = res->target;
3383                         lun = res->lun;
3384                         res->add_to_ml = 0;
3385                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3386                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3387                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3388                         goto restart;
3389                 }
3390         }
3391
3392         ioa_cfg->scan_done = 1;
3393         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3394         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3395         LEAVE;
3396 }
3397
3398 /**
3399  * ipr_worker_thread - Worker thread
3400  * @work:               ioa config struct
3401  *
3402  * Called at task level from a work thread. This function takes care
3403  * of adding and removing device from the mid-layer as configuration
3404  * changes are detected by the adapter.
3405  *
3406  * Return value:
3407  *      nothing
3408  **/
3409 static void ipr_worker_thread(struct work_struct *work)
3410 {
3411         unsigned long lock_flags;
3412         struct ipr_dump *dump;
3413         struct ipr_ioa_cfg *ioa_cfg =
3414                 container_of(work, struct ipr_ioa_cfg, work_q);
3415
3416         ENTER;
3417         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3418
3419         if (ioa_cfg->sdt_state == READ_DUMP) {
3420                 dump = ioa_cfg->dump;
3421                 if (!dump) {
3422                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3423                         return;
3424                 }
3425                 kref_get(&dump->kref);
3426                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3427                 ipr_get_ioa_dump(ioa_cfg, dump);
3428                 kref_put(&dump->kref, ipr_release_dump);
3429
3430                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3431                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3432                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3433                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3434                 return;
3435         }
3436
3437         if (ioa_cfg->scsi_unblock) {
3438                 ioa_cfg->scsi_unblock = 0;
3439                 ioa_cfg->scsi_blocked = 0;
3440                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3441                 scsi_unblock_requests(ioa_cfg->host);
3442                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3443                 if (ioa_cfg->scsi_blocked)
3444                         scsi_block_requests(ioa_cfg->host);
3445         }
3446
3447         if (!ioa_cfg->scan_enabled) {
3448                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3449                 return;
3450         }
3451
3452         schedule_work(&ioa_cfg->scsi_add_work_q);
3453
3454         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3455         LEAVE;
3456 }
3457
3458 #ifdef CONFIG_SCSI_IPR_TRACE
3459 /**
3460  * ipr_read_trace - Dump the adapter trace
3461  * @filp:               open sysfs file
3462  * @kobj:               kobject struct
3463  * @bin_attr:           bin_attribute struct
3464  * @buf:                buffer
3465  * @off:                offset
3466  * @count:              buffer size
3467  *
3468  * Return value:
3469  *      number of bytes printed to buffer
3470  **/
3471 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3472                               struct bin_attribute *bin_attr,
3473                               char *buf, loff_t off, size_t count)
3474 {
3475         struct device *dev = container_of(kobj, struct device, kobj);
3476         struct Scsi_Host *shost = class_to_shost(dev);
3477         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3478         unsigned long lock_flags = 0;
3479         ssize_t ret;
3480
3481         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3482         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3483                                 IPR_TRACE_SIZE);
3484         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3485
3486         return ret;
3487 }
3488
3489 static struct bin_attribute ipr_trace_attr = {
3490         .attr = {
3491                 .name = "trace",
3492                 .mode = S_IRUGO,
3493         },
3494         .size = 0,
3495         .read = ipr_read_trace,
3496 };
3497 #endif
3498
3499 /**
3500  * ipr_show_fw_version - Show the firmware version
3501  * @dev:        class device struct
3502  * @buf:        buffer
3503  *
3504  * Return value:
3505  *      number of bytes printed to buffer
3506  **/
3507 static ssize_t ipr_show_fw_version(struct device *dev,
3508                                    struct device_attribute *attr, char *buf)
3509 {
3510         struct Scsi_Host *shost = class_to_shost(dev);
3511         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3512         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3513         unsigned long lock_flags = 0;
3514         int len;
3515
3516         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3517         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3518                        ucode_vpd->major_release, ucode_vpd->card_type,
3519                        ucode_vpd->minor_release[0],
3520                        ucode_vpd->minor_release[1]);
3521         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3522         return len;
3523 }
3524
3525 static struct device_attribute ipr_fw_version_attr = {
3526         .attr = {
3527                 .name =         "fw_version",
3528                 .mode =         S_IRUGO,
3529         },
3530         .show = ipr_show_fw_version,
3531 };
3532
3533 /**
3534  * ipr_show_log_level - Show the adapter's error logging level
3535  * @dev:        class device struct
3536  * @buf:        buffer
3537  *
3538  * Return value:
3539  *      number of bytes printed to buffer
3540  **/
3541 static ssize_t ipr_show_log_level(struct device *dev,
3542                                    struct device_attribute *attr, char *buf)
3543 {
3544         struct Scsi_Host *shost = class_to_shost(dev);
3545         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3546         unsigned long lock_flags = 0;
3547         int len;
3548
3549         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3550         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3551         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3552         return len;
3553 }
3554
3555 /**
3556  * ipr_store_log_level - Change the adapter's error logging level
3557  * @dev:        class device struct
3558  * @buf:        buffer
3559  *
3560  * Return value:
3561  *      number of bytes printed to buffer
3562  **/
3563 static ssize_t ipr_store_log_level(struct device *dev,
3564                                    struct device_attribute *attr,
3565                                    const char *buf, size_t count)
3566 {
3567         struct Scsi_Host *shost = class_to_shost(dev);
3568         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3569         unsigned long lock_flags = 0;
3570
3571         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3572         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3573         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3574         return strlen(buf);
3575 }
3576
3577 static struct device_attribute ipr_log_level_attr = {
3578         .attr = {
3579                 .name =         "log_level",
3580                 .mode =         S_IRUGO | S_IWUSR,
3581         },
3582         .show = ipr_show_log_level,
3583         .store = ipr_store_log_level
3584 };
3585
3586 /**
3587  * ipr_store_diagnostics - IOA Diagnostics interface
3588  * @dev:        device struct
3589  * @buf:        buffer
3590  * @count:      buffer size
3591  *
3592  * This function will reset the adapter and wait a reasonable
3593  * amount of time for any errors that the adapter might log.
3594  *
3595  * Return value:
3596  *      count on success / other on failure
3597  **/
3598 static ssize_t ipr_store_diagnostics(struct device *dev,
3599                                      struct device_attribute *attr,
3600                                      const char *buf, size_t count)
3601 {
3602         struct Scsi_Host *shost = class_to_shost(dev);
3603         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3604         unsigned long lock_flags = 0;
3605         int rc = count;
3606
3607         if (!capable(CAP_SYS_ADMIN))
3608                 return -EACCES;
3609
3610         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3611         while (ioa_cfg->in_reset_reload) {
3612                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3613                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3614                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3615         }
3616
3617         ioa_cfg->errors_logged = 0;
3618         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3619
3620         if (ioa_cfg->in_reset_reload) {
3621                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3622                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3623
3624                 /* Wait for a second for any errors to be logged */
3625                 msleep(1000);
3626         } else {
3627                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3628                 return -EIO;
3629         }
3630
3631         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3632         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3633                 rc = -EIO;
3634         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3635
3636         return rc;
3637 }
3638
3639 static struct device_attribute ipr_diagnostics_attr = {
3640         .attr = {
3641                 .name =         "run_diagnostics",
3642                 .mode =         S_IWUSR,
3643         },
3644         .store = ipr_store_diagnostics
3645 };
3646
3647 /**
3648  * ipr_show_adapter_state - Show the adapter's state
3649  * @class_dev:  device struct
3650  * @buf:        buffer
3651  *
3652  * Return value:
3653  *      number of bytes printed to buffer
3654  **/
3655 static ssize_t ipr_show_adapter_state(struct device *dev,
3656                                       struct device_attribute *attr, char *buf)
3657 {
3658         struct Scsi_Host *shost = class_to_shost(dev);
3659         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3660         unsigned long lock_flags = 0;
3661         int len;
3662
3663         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3664         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3665                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3666         else
3667                 len = snprintf(buf, PAGE_SIZE, "online\n");
3668         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3669         return len;
3670 }
3671
3672 /**
3673  * ipr_store_adapter_state - Change adapter state
3674  * @dev:        device struct
3675  * @buf:        buffer
3676  * @count:      buffer size
3677  *
3678  * This function will change the adapter's state.
3679  *
3680  * Return value:
3681  *      count on success / other on failure
3682  **/
3683 static ssize_t ipr_store_adapter_state(struct device *dev,
3684                                        struct device_attribute *attr,
3685                                        const char *buf, size_t count)
3686 {
3687         struct Scsi_Host *shost = class_to_shost(dev);
3688         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3689         unsigned long lock_flags;
3690         int result = count, i;
3691
3692         if (!capable(CAP_SYS_ADMIN))
3693                 return -EACCES;
3694
3695         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3696         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3697             !strncmp(buf, "online", 6)) {
3698                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3699                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3700                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3701                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3702                 }
3703                 wmb();
3704                 ioa_cfg->reset_retries = 0;
3705                 ioa_cfg->in_ioa_bringdown = 0;
3706                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3707         }
3708         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3709         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3710
3711         return result;
3712 }
3713
3714 static struct device_attribute ipr_ioa_state_attr = {
3715         .attr = {
3716                 .name =         "online_state",
3717                 .mode =         S_IRUGO | S_IWUSR,
3718         },
3719         .show = ipr_show_adapter_state,
3720         .store = ipr_store_adapter_state
3721 };
3722
3723 /**
3724  * ipr_store_reset_adapter - Reset the adapter
3725  * @dev:        device struct
3726  * @buf:        buffer
3727  * @count:      buffer size
3728  *
3729  * This function will reset the adapter.
3730  *
3731  * Return value:
3732  *      count on success / other on failure
3733  **/
3734 static ssize_t ipr_store_reset_adapter(struct device *dev,
3735                                        struct device_attribute *attr,
3736                                        const char *buf, size_t count)
3737 {
3738         struct Scsi_Host *shost = class_to_shost(dev);
3739         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3740         unsigned long lock_flags;
3741         int result = count;
3742
3743         if (!capable(CAP_SYS_ADMIN))
3744                 return -EACCES;
3745
3746         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3747         if (!ioa_cfg->in_reset_reload)
3748                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3749         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3750         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3751
3752         return result;
3753 }
3754
3755 static struct device_attribute ipr_ioa_reset_attr = {
3756         .attr = {
3757                 .name =         "reset_host",
3758                 .mode =         S_IWUSR,
3759         },
3760         .store = ipr_store_reset_adapter
3761 };
3762
3763 static int ipr_iopoll(struct irq_poll *iop, int budget);
3764  /**
3765  * ipr_show_iopoll_weight - Show ipr polling mode
3766  * @dev:        class device struct
3767  * @buf:        buffer
3768  *
3769  * Return value:
3770  *      number of bytes printed to buffer
3771  **/
3772 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3773                                    struct device_attribute *attr, char *buf)
3774 {
3775         struct Scsi_Host *shost = class_to_shost(dev);
3776         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3777         unsigned long lock_flags = 0;
3778         int len;
3779
3780         spin_lock_irqsave(shost->host_lock, lock_flags);
3781         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3782         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3783
3784         return len;
3785 }
3786
3787 /**
3788  * ipr_store_iopoll_weight - Change the adapter's polling mode
3789  * @dev:        class device struct
3790  * @buf:        buffer
3791  *
3792  * Return value:
3793  *      number of bytes printed to buffer
3794  **/
3795 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3796                                         struct device_attribute *attr,
3797                                         const char *buf, size_t count)
3798 {
3799         struct Scsi_Host *shost = class_to_shost(dev);
3800         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3801         unsigned long user_iopoll_weight;
3802         unsigned long lock_flags = 0;
3803         int i;
3804
3805         if (!ioa_cfg->sis64) {
3806                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3807                 return -EINVAL;
3808         }
3809         if (kstrtoul(buf, 10, &user_iopoll_weight))
3810                 return -EINVAL;
3811
3812         if (user_iopoll_weight > 256) {
3813                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3814                 return -EINVAL;
3815         }
3816
3817         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3818                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3819                 return strlen(buf);
3820         }
3821
3822         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3823                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3824                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3825         }
3826
3827         spin_lock_irqsave(shost->host_lock, lock_flags);
3828         ioa_cfg->iopoll_weight = user_iopoll_weight;
3829         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3830                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3831                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3832                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3833                 }
3834         }
3835         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3836
3837         return strlen(buf);
3838 }
3839
3840 static struct device_attribute ipr_iopoll_weight_attr = {
3841         .attr = {
3842                 .name =         "iopoll_weight",
3843                 .mode =         S_IRUGO | S_IWUSR,
3844         },
3845         .show = ipr_show_iopoll_weight,
3846         .store = ipr_store_iopoll_weight
3847 };
3848
3849 /**
3850  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3851  * @buf_len:            buffer length
3852  *
3853  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3854  * list to use for microcode download
3855  *
3856  * Return value:
3857  *      pointer to sglist / NULL on failure
3858  **/
3859 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3860 {
3861         int sg_size, order;
3862         struct ipr_sglist *sglist;
3863
3864         /* Get the minimum size per scatter/gather element */
3865         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3866
3867         /* Get the actual size per element */
3868         order = get_order(sg_size);
3869
3870         /* Allocate a scatter/gather list for the DMA */
3871         sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3872         if (sglist == NULL) {
3873                 ipr_trace;
3874                 return NULL;
3875         }
3876         sglist->order = order;
3877         sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3878                                               &sglist->num_sg);
3879         if (!sglist->scatterlist) {
3880                 kfree(sglist);
3881                 return NULL;
3882         }
3883
3884         return sglist;
3885 }
3886
3887 /**
3888  * ipr_free_ucode_buffer - Frees a microcode download buffer
3889  * @p_dnld:             scatter/gather list pointer
3890  *
3891  * Free a DMA'able ucode download buffer previously allocated with
3892  * ipr_alloc_ucode_buffer
3893  *
3894  * Return value:
3895  *      nothing
3896  **/
3897 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3898 {
3899         sgl_free_order(sglist->scatterlist, sglist->order);
3900         kfree(sglist);
3901 }
3902
3903 /**
3904  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3905  * @sglist:             scatter/gather list pointer
3906  * @buffer:             buffer pointer
3907  * @len:                buffer length
3908  *
3909  * Copy a microcode image from a user buffer into a buffer allocated by
3910  * ipr_alloc_ucode_buffer
3911  *
3912  * Return value:
3913  *      0 on success / other on failure
3914  **/
3915 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3916                                  u8 *buffer, u32 len)
3917 {
3918         int bsize_elem, i, result = 0;
3919         struct scatterlist *scatterlist;
3920         void *kaddr;
3921
3922         /* Determine the actual number of bytes per element */
3923         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3924
3925         scatterlist = sglist->scatterlist;
3926
3927         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3928                 struct page *page = sg_page(&scatterlist[i]);
3929
3930                 kaddr = kmap(page);
3931                 memcpy(kaddr, buffer, bsize_elem);
3932                 kunmap(page);
3933
3934                 scatterlist[i].length = bsize_elem;
3935
3936                 if (result != 0) {
3937                         ipr_trace;
3938                         return result;
3939                 }
3940         }
3941
3942         if (len % bsize_elem) {
3943                 struct page *page = sg_page(&scatterlist[i]);
3944
3945                 kaddr = kmap(page);
3946                 memcpy(kaddr, buffer, len % bsize_elem);
3947                 kunmap(page);
3948
3949                 scatterlist[i].length = len % bsize_elem;
3950         }
3951
3952         sglist->buffer_len = len;
3953         return result;
3954 }
3955
3956 /**
3957  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3958  * @ipr_cmd:            ipr command struct
3959  * @sglist:             scatter/gather list
3960  *
3961  * Builds a microcode download IOA data list (IOADL).
3962  *
3963  **/
3964 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3965                                     struct ipr_sglist *sglist)
3966 {
3967         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3968         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3969         struct scatterlist *scatterlist = sglist->scatterlist;
3970         int i;
3971
3972         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3973         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3974         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3975
3976         ioarcb->ioadl_len =
3977                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3978         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3979                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3980                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3981                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3982         }
3983
3984         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3985 }
3986
3987 /**
3988  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3989  * @ipr_cmd:    ipr command struct
3990  * @sglist:             scatter/gather list
3991  *
3992  * Builds a microcode download IOA data list (IOADL).
3993  *
3994  **/
3995 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3996                                   struct ipr_sglist *sglist)
3997 {
3998         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3999         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4000         struct scatterlist *scatterlist = sglist->scatterlist;
4001         int i;
4002
4003         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
4004         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4005         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4006
4007         ioarcb->ioadl_len =
4008                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4009
4010         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4011                 ioadl[i].flags_and_data_len =
4012                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
4013                 ioadl[i].address =
4014                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
4015         }
4016
4017         ioadl[i-1].flags_and_data_len |=
4018                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4019 }
4020
4021 /**
4022  * ipr_update_ioa_ucode - Update IOA's microcode
4023  * @ioa_cfg:    ioa config struct
4024  * @sglist:             scatter/gather list
4025  *
4026  * Initiate an adapter reset to update the IOA's microcode
4027  *
4028  * Return value:
4029  *      0 on success / -EIO on failure
4030  **/
4031 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4032                                 struct ipr_sglist *sglist)
4033 {
4034         unsigned long lock_flags;
4035
4036         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4037         while (ioa_cfg->in_reset_reload) {
4038                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4039                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4040                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4041         }
4042
4043         if (ioa_cfg->ucode_sglist) {
4044                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4045                 dev_err(&ioa_cfg->pdev->dev,
4046                         "Microcode download already in progress\n");
4047                 return -EIO;
4048         }
4049
4050         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4051                                         sglist->scatterlist, sglist->num_sg,
4052                                         DMA_TO_DEVICE);
4053
4054         if (!sglist->num_dma_sg) {
4055                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4056                 dev_err(&ioa_cfg->pdev->dev,
4057                         "Failed to map microcode download buffer!\n");
4058                 return -EIO;
4059         }
4060
4061         ioa_cfg->ucode_sglist = sglist;
4062         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4063         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4064         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4065
4066         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4067         ioa_cfg->ucode_sglist = NULL;
4068         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4069         return 0;
4070 }
4071
4072 /**
4073  * ipr_store_update_fw - Update the firmware on the adapter
4074  * @class_dev:  device struct
4075  * @buf:        buffer
4076  * @count:      buffer size
4077  *
4078  * This function will update the firmware on the adapter.
4079  *
4080  * Return value:
4081  *      count on success / other on failure
4082  **/
4083 static ssize_t ipr_store_update_fw(struct device *dev,
4084                                    struct device_attribute *attr,
4085                                    const char *buf, size_t count)
4086 {
4087         struct Scsi_Host *shost = class_to_shost(dev);
4088         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4089         struct ipr_ucode_image_header *image_hdr;
4090         const struct firmware *fw_entry;
4091         struct ipr_sglist *sglist;
4092         char fname[100];
4093         char *src;
4094         char *endline;
4095         int result, dnld_size;
4096
4097         if (!capable(CAP_SYS_ADMIN))
4098                 return -EACCES;
4099
4100         snprintf(fname, sizeof(fname), "%s", buf);
4101
4102         endline = strchr(fname, '\n');
4103         if (endline)
4104                 *endline = '\0';
4105
4106         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4107                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4108                 return -EIO;
4109         }
4110
4111         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4112
4113         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4114         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4115         sglist = ipr_alloc_ucode_buffer(dnld_size);
4116
4117         if (!sglist) {
4118                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4119                 release_firmware(fw_entry);
4120                 return -ENOMEM;
4121         }
4122
4123         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4124
4125         if (result) {
4126                 dev_err(&ioa_cfg->pdev->dev,
4127                         "Microcode buffer copy to DMA buffer failed\n");
4128                 goto out;
4129         }
4130
4131         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4132
4133         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4134
4135         if (!result)
4136                 result = count;
4137 out:
4138         ipr_free_ucode_buffer(sglist);
4139         release_firmware(fw_entry);
4140         return result;
4141 }
4142
4143 static struct device_attribute ipr_update_fw_attr = {
4144         .attr = {
4145                 .name =         "update_fw",
4146                 .mode =         S_IWUSR,
4147         },
4148         .store = ipr_store_update_fw
4149 };
4150
4151 /**
4152  * ipr_show_fw_type - Show the adapter's firmware type.
4153  * @dev:        class device struct
4154  * @buf:        buffer
4155  *
4156  * Return value:
4157  *      number of bytes printed to buffer
4158  **/
4159 static ssize_t ipr_show_fw_type(struct device *dev,
4160                                 struct device_attribute *attr, char *buf)
4161 {
4162         struct Scsi_Host *shost = class_to_shost(dev);
4163         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4164         unsigned long lock_flags = 0;
4165         int len;
4166
4167         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4168         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4169         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4170         return len;
4171 }
4172
4173 static struct device_attribute ipr_ioa_fw_type_attr = {
4174         .attr = {
4175                 .name =         "fw_type",
4176                 .mode =         S_IRUGO,
4177         },
4178         .show = ipr_show_fw_type
4179 };
4180
4181 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4182                                 struct bin_attribute *bin_attr, char *buf,
4183                                 loff_t off, size_t count)
4184 {
4185         struct device *cdev = container_of(kobj, struct device, kobj);
4186         struct Scsi_Host *shost = class_to_shost(cdev);
4187         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4188         struct ipr_hostrcb *hostrcb;
4189         unsigned long lock_flags = 0;
4190         int ret;
4191
4192         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4193         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4194                                         struct ipr_hostrcb, queue);
4195         if (!hostrcb) {
4196                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4197                 return 0;
4198         }
4199         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4200                                 sizeof(hostrcb->hcam));
4201         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4202         return ret;
4203 }
4204
4205 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4206                                 struct bin_attribute *bin_attr, char *buf,
4207                                 loff_t off, size_t count)
4208 {
4209         struct device *cdev = container_of(kobj, struct device, kobj);
4210         struct Scsi_Host *shost = class_to_shost(cdev);
4211         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4212         struct ipr_hostrcb *hostrcb;
4213         unsigned long lock_flags = 0;
4214
4215         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4216         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4217                                         struct ipr_hostrcb, queue);
4218         if (!hostrcb) {
4219                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4220                 return count;
4221         }
4222
4223         /* Reclaim hostrcb before exit */
4224         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4225         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4226         return count;
4227 }
4228
4229 static struct bin_attribute ipr_ioa_async_err_log = {
4230         .attr = {
4231                 .name =         "async_err_log",
4232                 .mode =         S_IRUGO | S_IWUSR,
4233         },
4234         .size = 0,
4235         .read = ipr_read_async_err_log,
4236         .write = ipr_next_async_err_log
4237 };
4238
4239 static struct device_attribute *ipr_ioa_attrs[] = {
4240         &ipr_fw_version_attr,
4241         &ipr_log_level_attr,
4242         &ipr_diagnostics_attr,
4243         &ipr_ioa_state_attr,
4244         &ipr_ioa_reset_attr,
4245         &ipr_update_fw_attr,
4246         &ipr_ioa_fw_type_attr,
4247         &ipr_iopoll_weight_attr,
4248         NULL,
4249 };
4250
4251 #ifdef CONFIG_SCSI_IPR_DUMP
4252 /**
4253  * ipr_read_dump - Dump the adapter
4254  * @filp:               open sysfs file
4255  * @kobj:               kobject struct
4256  * @bin_attr:           bin_attribute struct
4257  * @buf:                buffer
4258  * @off:                offset
4259  * @count:              buffer size
4260  *
4261  * Return value:
4262  *      number of bytes printed to buffer
4263  **/
4264 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4265                              struct bin_attribute *bin_attr,
4266                              char *buf, loff_t off, size_t count)
4267 {
4268         struct device *cdev = container_of(kobj, struct device, kobj);
4269         struct Scsi_Host *shost = class_to_shost(cdev);
4270         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4271         struct ipr_dump *dump;
4272         unsigned long lock_flags = 0;
4273         char *src;
4274         int len, sdt_end;
4275         size_t rc = count;
4276
4277         if (!capable(CAP_SYS_ADMIN))
4278                 return -EACCES;
4279
4280         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4281         dump = ioa_cfg->dump;
4282
4283         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4284                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4285                 return 0;
4286         }
4287         kref_get(&dump->kref);
4288         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4289
4290         if (off > dump->driver_dump.hdr.len) {
4291                 kref_put(&dump->kref, ipr_release_dump);
4292                 return 0;
4293         }
4294
4295         if (off + count > dump->driver_dump.hdr.len) {
4296                 count = dump->driver_dump.hdr.len - off;
4297                 rc = count;
4298         }
4299
4300         if (count && off < sizeof(dump->driver_dump)) {
4301                 if (off + count > sizeof(dump->driver_dump))
4302                         len = sizeof(dump->driver_dump) - off;
4303                 else
4304                         len = count;
4305                 src = (u8 *)&dump->driver_dump + off;
4306                 memcpy(buf, src, len);
4307                 buf += len;
4308                 off += len;
4309                 count -= len;
4310         }
4311
4312         off -= sizeof(dump->driver_dump);
4313
4314         if (ioa_cfg->sis64)
4315                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4316                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4317                            sizeof(struct ipr_sdt_entry));
4318         else
4319                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4320                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4321
4322         if (count && off < sdt_end) {
4323                 if (off + count > sdt_end)
4324                         len = sdt_end - off;
4325                 else
4326                         len = count;
4327                 src = (u8 *)&dump->ioa_dump + off;
4328                 memcpy(buf, src, len);
4329                 buf += len;
4330                 off += len;
4331                 count -= len;
4332         }
4333
4334         off -= sdt_end;
4335
4336         while (count) {
4337                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4338                         len = PAGE_ALIGN(off) - off;
4339                 else
4340                         len = count;
4341                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4342                 src += off & ~PAGE_MASK;
4343                 memcpy(buf, src, len);
4344                 buf += len;
4345                 off += len;
4346                 count -= len;
4347         }
4348
4349         kref_put(&dump->kref, ipr_release_dump);
4350         return rc;
4351 }
4352
4353 /**
4354  * ipr_alloc_dump - Prepare for adapter dump
4355  * @ioa_cfg:    ioa config struct
4356  *
4357  * Return value:
4358  *      0 on success / other on failure
4359  **/
4360 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4361 {
4362         struct ipr_dump *dump;
4363         __be32 **ioa_data;
4364         unsigned long lock_flags = 0;
4365
4366         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4367
4368         if (!dump) {
4369                 ipr_err("Dump memory allocation failed\n");
4370                 return -ENOMEM;
4371         }
4372
4373         if (ioa_cfg->sis64)
4374                 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4375                                               sizeof(__be32 *)));
4376         else
4377                 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4378                                               sizeof(__be32 *)));
4379
4380         if (!ioa_data) {
4381                 ipr_err("Dump memory allocation failed\n");
4382                 kfree(dump);
4383                 return -ENOMEM;
4384         }
4385
4386         dump->ioa_dump.ioa_data = ioa_data;
4387
4388         kref_init(&dump->kref);
4389         dump->ioa_cfg = ioa_cfg;
4390
4391         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4392
4393         if (INACTIVE != ioa_cfg->sdt_state) {
4394                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4395                 vfree(dump->ioa_dump.ioa_data);
4396                 kfree(dump);
4397                 return 0;
4398         }
4399
4400         ioa_cfg->dump = dump;
4401         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4402         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4403                 ioa_cfg->dump_taken = 1;
4404                 schedule_work(&ioa_cfg->work_q);
4405         }
4406         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4407
4408         return 0;
4409 }
4410
4411 /**
4412  * ipr_free_dump - Free adapter dump memory
4413  * @ioa_cfg:    ioa config struct
4414  *
4415  * Return value:
4416  *      0 on success / other on failure
4417  **/
4418 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4419 {
4420         struct ipr_dump *dump;
4421         unsigned long lock_flags = 0;
4422
4423         ENTER;
4424
4425         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4426         dump = ioa_cfg->dump;
4427         if (!dump) {
4428                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4429                 return 0;
4430         }
4431
4432         ioa_cfg->dump = NULL;
4433         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4434
4435         kref_put(&dump->kref, ipr_release_dump);
4436
4437         LEAVE;
4438         return 0;
4439 }
4440
4441 /**
4442  * ipr_write_dump - Setup dump state of adapter
4443  * @filp:               open sysfs file
4444  * @kobj:               kobject struct
4445  * @bin_attr:           bin_attribute struct
4446  * @buf:                buffer
4447  * @off:                offset
4448  * @count:              buffer size
4449  *
4450  * Return value:
4451  *      number of bytes printed to buffer
4452  **/
4453 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4454                               struct bin_attribute *bin_attr,
4455                               char *buf, loff_t off, size_t count)
4456 {
4457         struct device *cdev = container_of(kobj, struct device, kobj);
4458         struct Scsi_Host *shost = class_to_shost(cdev);
4459         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4460         int rc;
4461
4462         if (!capable(CAP_SYS_ADMIN))
4463                 return -EACCES;
4464
4465         if (buf[0] == '1')
4466                 rc = ipr_alloc_dump(ioa_cfg);
4467         else if (buf[0] == '0')
4468                 rc = ipr_free_dump(ioa_cfg);
4469         else
4470                 return -EINVAL;
4471
4472         if (rc)
4473                 return rc;
4474         else
4475                 return count;
4476 }
4477
4478 static struct bin_attribute ipr_dump_attr = {
4479         .attr = {
4480                 .name = "dump",
4481                 .mode = S_IRUSR | S_IWUSR,
4482         },
4483         .size = 0,
4484         .read = ipr_read_dump,
4485         .write = ipr_write_dump
4486 };
4487 #else
4488 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4489 #endif
4490
4491 /**
4492  * ipr_change_queue_depth - Change the device's queue depth
4493  * @sdev:       scsi device struct
4494  * @qdepth:     depth to set
4495  * @reason:     calling context
4496  *
4497  * Return value:
4498  *      actual depth set
4499  **/
4500 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4501 {
4502         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4503         struct ipr_resource_entry *res;
4504         unsigned long lock_flags = 0;
4505
4506         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4507         res = (struct ipr_resource_entry *)sdev->hostdata;
4508
4509         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4510                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4511         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4512
4513         scsi_change_queue_depth(sdev, qdepth);
4514         return sdev->queue_depth;
4515 }
4516
4517 /**
4518  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4519  * @dev:        device struct
4520  * @attr:       device attribute structure
4521  * @buf:        buffer
4522  *
4523  * Return value:
4524  *      number of bytes printed to buffer
4525  **/
4526 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4527 {
4528         struct scsi_device *sdev = to_scsi_device(dev);
4529         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4530         struct ipr_resource_entry *res;
4531         unsigned long lock_flags = 0;
4532         ssize_t len = -ENXIO;
4533
4534         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4535         res = (struct ipr_resource_entry *)sdev->hostdata;
4536         if (res)
4537                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4538         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4539         return len;
4540 }
4541
4542 static struct device_attribute ipr_adapter_handle_attr = {
4543         .attr = {
4544                 .name =         "adapter_handle",
4545                 .mode =         S_IRUSR,
4546         },
4547         .show = ipr_show_adapter_handle
4548 };
4549
4550 /**
4551  * ipr_show_resource_path - Show the resource path or the resource address for
4552  *                          this device.
4553  * @dev:        device struct
4554  * @attr:       device attribute structure
4555  * @buf:        buffer
4556  *
4557  * Return value:
4558  *      number of bytes printed to buffer
4559  **/
4560 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4561 {
4562         struct scsi_device *sdev = to_scsi_device(dev);
4563         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4564         struct ipr_resource_entry *res;
4565         unsigned long lock_flags = 0;
4566         ssize_t len = -ENXIO;
4567         char buffer[IPR_MAX_RES_PATH_LENGTH];
4568
4569         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4570         res = (struct ipr_resource_entry *)sdev->hostdata;
4571         if (res && ioa_cfg->sis64)
4572                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4573                                __ipr_format_res_path(res->res_path, buffer,
4574                                                      sizeof(buffer)));
4575         else if (res)
4576                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4577                                res->bus, res->target, res->lun);
4578
4579         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4580         return len;
4581 }
4582
4583 static struct device_attribute ipr_resource_path_attr = {
4584         .attr = {
4585                 .name =         "resource_path",
4586                 .mode =         S_IRUGO,
4587         },
4588         .show = ipr_show_resource_path
4589 };
4590
4591 /**
4592  * ipr_show_device_id - Show the device_id for this device.
4593  * @dev:        device struct
4594  * @attr:       device attribute structure
4595  * @buf:        buffer
4596  *
4597  * Return value:
4598  *      number of bytes printed to buffer
4599  **/
4600 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4601 {
4602         struct scsi_device *sdev = to_scsi_device(dev);
4603         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4604         struct ipr_resource_entry *res;
4605         unsigned long lock_flags = 0;
4606         ssize_t len = -ENXIO;
4607
4608         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4609         res = (struct ipr_resource_entry *)sdev->hostdata;
4610         if (res && ioa_cfg->sis64)
4611                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4612         else if (res)
4613                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4614
4615         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4616         return len;
4617 }
4618
4619 static struct device_attribute ipr_device_id_attr = {
4620         .attr = {
4621                 .name =         "device_id",
4622                 .mode =         S_IRUGO,
4623         },
4624         .show = ipr_show_device_id
4625 };
4626
4627 /**
4628  * ipr_show_resource_type - Show the resource type for this device.
4629  * @dev:        device struct
4630  * @attr:       device attribute structure
4631  * @buf:        buffer
4632  *
4633  * Return value:
4634  *      number of bytes printed to buffer
4635  **/
4636 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4637 {
4638         struct scsi_device *sdev = to_scsi_device(dev);
4639         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4640         struct ipr_resource_entry *res;
4641         unsigned long lock_flags = 0;
4642         ssize_t len = -ENXIO;
4643
4644         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4645         res = (struct ipr_resource_entry *)sdev->hostdata;
4646
4647         if (res)
4648                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4649
4650         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4651         return len;
4652 }
4653
4654 static struct device_attribute ipr_resource_type_attr = {
4655         .attr = {
4656                 .name =         "resource_type",
4657                 .mode =         S_IRUGO,
4658         },
4659         .show = ipr_show_resource_type
4660 };
4661
4662 /**
4663  * ipr_show_raw_mode - Show the adapter's raw mode
4664  * @dev:        class device struct
4665  * @buf:        buffer
4666  *
4667  * Return value:
4668  *      number of bytes printed to buffer
4669  **/
4670 static ssize_t ipr_show_raw_mode(struct device *dev,
4671                                  struct device_attribute *attr, char *buf)
4672 {
4673         struct scsi_device *sdev = to_scsi_device(dev);
4674         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4675         struct ipr_resource_entry *res;
4676         unsigned long lock_flags = 0;
4677         ssize_t len;
4678
4679         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4680         res = (struct ipr_resource_entry *)sdev->hostdata;
4681         if (res)
4682                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4683         else
4684                 len = -ENXIO;
4685         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4686         return len;
4687 }
4688
4689 /**
4690  * ipr_store_raw_mode - Change the adapter's raw mode
4691  * @dev:        class device struct
4692  * @buf:        buffer
4693  *
4694  * Return value:
4695  *      number of bytes printed to buffer
4696  **/
4697 static ssize_t ipr_store_raw_mode(struct device *dev,
4698                                   struct device_attribute *attr,
4699                                   const char *buf, size_t count)
4700 {
4701         struct scsi_device *sdev = to_scsi_device(dev);
4702         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4703         struct ipr_resource_entry *res;
4704         unsigned long lock_flags = 0;
4705         ssize_t len;
4706
4707         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4708         res = (struct ipr_resource_entry *)sdev->hostdata;
4709         if (res) {
4710                 if (ipr_is_af_dasd_device(res)) {
4711                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4712                         len = strlen(buf);
4713                         if (res->sdev)
4714                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4715                                         res->raw_mode ? "enabled" : "disabled");
4716                 } else
4717                         len = -EINVAL;
4718         } else
4719                 len = -ENXIO;
4720         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4721         return len;
4722 }
4723
4724 static struct device_attribute ipr_raw_mode_attr = {
4725         .attr = {
4726                 .name =         "raw_mode",
4727                 .mode =         S_IRUGO | S_IWUSR,
4728         },
4729         .show = ipr_show_raw_mode,
4730         .store = ipr_store_raw_mode
4731 };
4732
4733 static struct device_attribute *ipr_dev_attrs[] = {
4734         &ipr_adapter_handle_attr,
4735         &ipr_resource_path_attr,
4736         &ipr_device_id_attr,
4737         &ipr_resource_type_attr,
4738         &ipr_raw_mode_attr,
4739         NULL,
4740 };
4741
4742 /**
4743  * ipr_biosparam - Return the HSC mapping
4744  * @sdev:                       scsi device struct
4745  * @block_device:       block device pointer
4746  * @capacity:           capacity of the device
4747  * @parm:                       Array containing returned HSC values.
4748  *
4749  * This function generates the HSC parms that fdisk uses.
4750  * We want to make sure we return something that places partitions
4751  * on 4k boundaries for best performance with the IOA.
4752  *
4753  * Return value:
4754  *      0 on success
4755  **/
4756 static int ipr_biosparam(struct scsi_device *sdev,
4757                          struct block_device *block_device,
4758                          sector_t capacity, int *parm)
4759 {
4760         int heads, sectors;
4761         sector_t cylinders;
4762
4763         heads = 128;
4764         sectors = 32;
4765
4766         cylinders = capacity;
4767         sector_div(cylinders, (128 * 32));
4768
4769         /* return result */
4770         parm[0] = heads;
4771         parm[1] = sectors;
4772         parm[2] = cylinders;
4773
4774         return 0;
4775 }
4776
4777 /**
4778  * ipr_find_starget - Find target based on bus/target.
4779  * @starget:    scsi target struct
4780  *
4781  * Return value:
4782  *      resource entry pointer if found / NULL if not found
4783  **/
4784 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4785 {
4786         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4787         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4788         struct ipr_resource_entry *res;
4789
4790         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4791                 if ((res->bus == starget->channel) &&
4792                     (res->target == starget->id)) {
4793                         return res;
4794                 }
4795         }
4796
4797         return NULL;
4798 }
4799
4800 static struct ata_port_info sata_port_info;
4801
4802 /**
4803  * ipr_target_alloc - Prepare for commands to a SCSI target
4804  * @starget:    scsi target struct
4805  *
4806  * If the device is a SATA device, this function allocates an
4807  * ATA port with libata, else it does nothing.
4808  *
4809  * Return value:
4810  *      0 on success / non-0 on failure
4811  **/
4812 static int ipr_target_alloc(struct scsi_target *starget)
4813 {
4814         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4815         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4816         struct ipr_sata_port *sata_port;
4817         struct ata_port *ap;
4818         struct ipr_resource_entry *res;
4819         unsigned long lock_flags;
4820
4821         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4822         res = ipr_find_starget(starget);
4823         starget->hostdata = NULL;
4824
4825         if (res && ipr_is_gata(res)) {
4826                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4827                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4828                 if (!sata_port)
4829                         return -ENOMEM;
4830
4831                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4832                 if (ap) {
4833                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4834                         sata_port->ioa_cfg = ioa_cfg;
4835                         sata_port->ap = ap;
4836                         sata_port->res = res;
4837
4838                         res->sata_port = sata_port;
4839                         ap->private_data = sata_port;
4840                         starget->hostdata = sata_port;
4841                 } else {
4842                         kfree(sata_port);
4843                         return -ENOMEM;
4844                 }
4845         }
4846         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4847
4848         return 0;
4849 }
4850
4851 /**
4852  * ipr_target_destroy - Destroy a SCSI target
4853  * @starget:    scsi target struct
4854  *
4855  * If the device was a SATA device, this function frees the libata
4856  * ATA port, else it does nothing.
4857  *
4858  **/
4859 static void ipr_target_destroy(struct scsi_target *starget)
4860 {
4861         struct ipr_sata_port *sata_port = starget->hostdata;
4862         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4863         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4864
4865         if (ioa_cfg->sis64) {
4866                 if (!ipr_find_starget(starget)) {
4867                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4868                                 clear_bit(starget->id, ioa_cfg->array_ids);
4869                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4870                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4871                         else if (starget->channel == 0)
4872                                 clear_bit(starget->id, ioa_cfg->target_ids);
4873                 }
4874         }
4875
4876         if (sata_port) {
4877                 starget->hostdata = NULL;
4878                 ata_sas_port_destroy(sata_port->ap);
4879                 kfree(sata_port);
4880         }
4881 }
4882
4883 /**
4884  * ipr_find_sdev - Find device based on bus/target/lun.
4885  * @sdev:       scsi device struct
4886  *
4887  * Return value:
4888  *      resource entry pointer if found / NULL if not found
4889  **/
4890 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4891 {
4892         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4893         struct ipr_resource_entry *res;
4894
4895         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4896                 if ((res->bus == sdev->channel) &&
4897                     (res->target == sdev->id) &&
4898                     (res->lun == sdev->lun))
4899                         return res;
4900         }
4901
4902         return NULL;
4903 }
4904
4905 /**
4906  * ipr_slave_destroy - Unconfigure a SCSI device
4907  * @sdev:       scsi device struct
4908  *
4909  * Return value:
4910  *      nothing
4911  **/
4912 static void ipr_slave_destroy(struct scsi_device *sdev)
4913 {
4914         struct ipr_resource_entry *res;
4915         struct ipr_ioa_cfg *ioa_cfg;
4916         unsigned long lock_flags = 0;
4917
4918         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4919
4920         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4921         res = (struct ipr_resource_entry *) sdev->hostdata;
4922         if (res) {
4923                 if (res->sata_port)
4924                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4925                 sdev->hostdata = NULL;
4926                 res->sdev = NULL;
4927                 res->sata_port = NULL;
4928         }
4929         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4930 }
4931
4932 /**
4933  * ipr_slave_configure - Configure a SCSI device
4934  * @sdev:       scsi device struct
4935  *
4936  * This function configures the specified scsi device.
4937  *
4938  * Return value:
4939  *      0 on success
4940  **/
4941 static int ipr_slave_configure(struct scsi_device *sdev)
4942 {
4943         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4944         struct ipr_resource_entry *res;
4945         struct ata_port *ap = NULL;
4946         unsigned long lock_flags = 0;
4947         char buffer[IPR_MAX_RES_PATH_LENGTH];
4948
4949         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4950         res = sdev->hostdata;
4951         if (res) {
4952                 if (ipr_is_af_dasd_device(res))
4953                         sdev->type = TYPE_RAID;
4954                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4955                         sdev->scsi_level = 4;
4956                         sdev->no_uld_attach = 1;
4957                 }
4958                 if (ipr_is_vset_device(res)) {
4959                         sdev->scsi_level = SCSI_SPC_3;
4960                         sdev->no_report_opcodes = 1;
4961                         blk_queue_rq_timeout(sdev->request_queue,
4962                                              IPR_VSET_RW_TIMEOUT);
4963                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4964                 }
4965                 if (ipr_is_gata(res) && res->sata_port)
4966                         ap = res->sata_port->ap;
4967                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4968
4969                 if (ap) {
4970                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4971                         ata_sas_slave_configure(sdev, ap);
4972                 }
4973
4974                 if (ioa_cfg->sis64)
4975                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4976                                     ipr_format_res_path(ioa_cfg,
4977                                 res->res_path, buffer, sizeof(buffer)));
4978                 return 0;
4979         }
4980         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4981         return 0;
4982 }
4983
4984 /**
4985  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4986  * @sdev:       scsi device struct
4987  *
4988  * This function initializes an ATA port so that future commands
4989  * sent through queuecommand will work.
4990  *
4991  * Return value:
4992  *      0 on success
4993  **/
4994 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4995 {
4996         struct ipr_sata_port *sata_port = NULL;
4997         int rc = -ENXIO;
4998
4999         ENTER;
5000         if (sdev->sdev_target)
5001                 sata_port = sdev->sdev_target->hostdata;
5002         if (sata_port) {
5003                 rc = ata_sas_port_init(sata_port->ap);
5004                 if (rc == 0)
5005                         rc = ata_sas_sync_probe(sata_port->ap);
5006         }
5007
5008         if (rc)
5009                 ipr_slave_destroy(sdev);
5010
5011         LEAVE;
5012         return rc;
5013 }
5014
5015 /**
5016  * ipr_slave_alloc - Prepare for commands to a device.
5017  * @sdev:       scsi device struct
5018  *
5019  * This function saves a pointer to the resource entry
5020  * in the scsi device struct if the device exists. We
5021  * can then use this pointer in ipr_queuecommand when
5022  * handling new commands.
5023  *
5024  * Return value:
5025  *      0 on success / -ENXIO if device does not exist
5026  **/
5027 static int ipr_slave_alloc(struct scsi_device *sdev)
5028 {
5029         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5030         struct ipr_resource_entry *res;
5031         unsigned long lock_flags;
5032         int rc = -ENXIO;
5033
5034         sdev->hostdata = NULL;
5035
5036         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5037
5038         res = ipr_find_sdev(sdev);
5039         if (res) {
5040                 res->sdev = sdev;
5041                 res->add_to_ml = 0;
5042                 res->in_erp = 0;
5043                 sdev->hostdata = res;
5044                 if (!ipr_is_naca_model(res))
5045                         res->needs_sync_complete = 1;
5046                 rc = 0;
5047                 if (ipr_is_gata(res)) {
5048                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5049                         return ipr_ata_slave_alloc(sdev);
5050                 }
5051         }
5052
5053         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5054
5055         return rc;
5056 }
5057
5058 /**
5059  * ipr_match_lun - Match function for specified LUN
5060  * @ipr_cmd:    ipr command struct
5061  * @device:             device to match (sdev)
5062  *
5063  * Returns:
5064  *      1 if command matches sdev / 0 if command does not match sdev
5065  **/
5066 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5067 {
5068         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5069                 return 1;
5070         return 0;
5071 }
5072
5073 /**
5074  * ipr_cmnd_is_free - Check if a command is free or not
5075  * @ipr_cmd     ipr command struct
5076  *
5077  * Returns:
5078  *      true / false
5079  **/
5080 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5081 {
5082         struct ipr_cmnd *loop_cmd;
5083
5084         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5085                 if (loop_cmd == ipr_cmd)
5086                         return true;
5087         }
5088
5089         return false;
5090 }
5091
5092 /**
5093  * ipr_match_res - Match function for specified resource entry
5094  * @ipr_cmd:    ipr command struct
5095  * @resource:   resource entry to match
5096  *
5097  * Returns:
5098  *      1 if command matches sdev / 0 if command does not match sdev
5099  **/
5100 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5101 {
5102         struct ipr_resource_entry *res = resource;
5103
5104         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5105                 return 1;
5106         return 0;
5107 }
5108
5109 /**
5110  * ipr_wait_for_ops - Wait for matching commands to complete
5111  * @ipr_cmd:    ipr command struct
5112  * @device:             device to match (sdev)
5113  * @match:              match function to use
5114  *
5115  * Returns:
5116  *      SUCCESS / FAILED
5117  **/
5118 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5119                             int (*match)(struct ipr_cmnd *, void *))
5120 {
5121         struct ipr_cmnd *ipr_cmd;
5122         int wait, i;
5123         unsigned long flags;
5124         struct ipr_hrr_queue *hrrq;
5125         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5126         DECLARE_COMPLETION_ONSTACK(comp);
5127
5128         ENTER;
5129         do {
5130                 wait = 0;
5131
5132                 for_each_hrrq(hrrq, ioa_cfg) {
5133                         spin_lock_irqsave(hrrq->lock, flags);
5134                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5135                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5136                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5137                                         if (match(ipr_cmd, device)) {
5138                                                 ipr_cmd->eh_comp = &comp;
5139                                                 wait++;
5140                                         }
5141                                 }
5142                         }
5143                         spin_unlock_irqrestore(hrrq->lock, flags);
5144                 }
5145
5146                 if (wait) {
5147                         timeout = wait_for_completion_timeout(&comp, timeout);
5148
5149                         if (!timeout) {
5150                                 wait = 0;
5151
5152                                 for_each_hrrq(hrrq, ioa_cfg) {
5153                                         spin_lock_irqsave(hrrq->lock, flags);
5154                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5155                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5156                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5157                                                         if (match(ipr_cmd, device)) {
5158                                                                 ipr_cmd->eh_comp = NULL;
5159                                                                 wait++;
5160                                                         }
5161                                                 }
5162                                         }
5163                                         spin_unlock_irqrestore(hrrq->lock, flags);
5164                                 }
5165
5166                                 if (wait)
5167                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5168                                 LEAVE;
5169                                 return wait ? FAILED : SUCCESS;
5170                         }
5171                 }
5172         } while (wait);
5173
5174         LEAVE;
5175         return SUCCESS;
5176 }
5177
5178 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5179 {
5180         struct ipr_ioa_cfg *ioa_cfg;
5181         unsigned long lock_flags = 0;
5182         int rc = SUCCESS;
5183
5184         ENTER;
5185         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5186         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5187
5188         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5189                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5190                 dev_err(&ioa_cfg->pdev->dev,
5191                         "Adapter being reset as a result of error recovery.\n");
5192
5193                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5194                         ioa_cfg->sdt_state = GET_DUMP;
5195         }
5196
5197         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5198         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5199         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5200
5201         /* If we got hit with a host reset while we were already resetting
5202          the adapter for some reason, and the reset failed. */
5203         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5204                 ipr_trace;
5205                 rc = FAILED;
5206         }
5207
5208         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5209         LEAVE;
5210         return rc;
5211 }
5212
5213 /**
5214  * ipr_device_reset - Reset the device
5215  * @ioa_cfg:    ioa config struct
5216  * @res:                resource entry struct
5217  *
5218  * This function issues a device reset to the affected device.
5219  * If the device is a SCSI device, a LUN reset will be sent
5220  * to the device first. If that does not work, a target reset
5221  * will be sent. If the device is a SATA device, a PHY reset will
5222  * be sent.
5223  *
5224  * Return value:
5225  *      0 on success / non-zero on failure
5226  **/
5227 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5228                             struct ipr_resource_entry *res)
5229 {
5230         struct ipr_cmnd *ipr_cmd;
5231         struct ipr_ioarcb *ioarcb;
5232         struct ipr_cmd_pkt *cmd_pkt;
5233         struct ipr_ioarcb_ata_regs *regs;
5234         u32 ioasc;
5235
5236         ENTER;
5237         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5238         ioarcb = &ipr_cmd->ioarcb;
5239         cmd_pkt = &ioarcb->cmd_pkt;
5240
5241         if (ipr_cmd->ioa_cfg->sis64) {
5242                 regs = &ipr_cmd->i.ata_ioadl.regs;
5243                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5244         } else
5245                 regs = &ioarcb->u.add_data.u.regs;
5246
5247         ioarcb->res_handle = res->res_handle;
5248         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5249         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5250         if (ipr_is_gata(res)) {
5251                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5252                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5253                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5254         }
5255
5256         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5257         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5258         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5259         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5260                 if (ipr_cmd->ioa_cfg->sis64)
5261                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5262                                sizeof(struct ipr_ioasa_gata));
5263                 else
5264                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5265                                sizeof(struct ipr_ioasa_gata));
5266         }
5267
5268         LEAVE;
5269         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5270 }
5271
5272 /**
5273  * ipr_sata_reset - Reset the SATA port
5274  * @link:       SATA link to reset
5275  * @classes:    class of the attached device
5276  *
5277  * This function issues a SATA phy reset to the affected ATA link.
5278  *
5279  * Return value:
5280  *      0 on success / non-zero on failure
5281  **/
5282 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5283                                 unsigned long deadline)
5284 {
5285         struct ipr_sata_port *sata_port = link->ap->private_data;
5286         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5287         struct ipr_resource_entry *res;
5288         unsigned long lock_flags = 0;
5289         int rc = -ENXIO, ret;
5290
5291         ENTER;
5292         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5293         while (ioa_cfg->in_reset_reload) {
5294                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5295                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5296                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5297         }
5298
5299         res = sata_port->res;
5300         if (res) {
5301                 rc = ipr_device_reset(ioa_cfg, res);
5302                 *classes = res->ata_class;
5303                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5304
5305                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5306                 if (ret != SUCCESS) {
5307                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5308                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5309                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5310
5311                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5312                 }
5313         } else
5314                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5315
5316         LEAVE;
5317         return rc;
5318 }
5319
5320 /**
5321  * ipr_eh_dev_reset - Reset the device
5322  * @scsi_cmd:   scsi command struct
5323  *
5324  * This function issues a device reset to the affected device.
5325  * A LUN reset will be sent to the device first. If that does
5326  * not work, a target reset will be sent.
5327  *
5328  * Return value:
5329  *      SUCCESS / FAILED
5330  **/
5331 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5332 {
5333         struct ipr_cmnd *ipr_cmd;
5334         struct ipr_ioa_cfg *ioa_cfg;
5335         struct ipr_resource_entry *res;
5336         struct ata_port *ap;
5337         int rc = 0, i;
5338         struct ipr_hrr_queue *hrrq;
5339
5340         ENTER;
5341         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5342         res = scsi_cmd->device->hostdata;
5343
5344         /*
5345          * If we are currently going through reset/reload, return failed. This will force the
5346          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5347          * reset to complete
5348          */
5349         if (ioa_cfg->in_reset_reload)
5350                 return FAILED;
5351         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5352                 return FAILED;
5353
5354         for_each_hrrq(hrrq, ioa_cfg) {
5355                 spin_lock(&hrrq->_lock);
5356                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5357                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5358
5359                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5360                                 if (!ipr_cmd->qc)
5361                                         continue;
5362                                 if (ipr_cmnd_is_free(ipr_cmd))
5363                                         continue;
5364
5365                                 ipr_cmd->done = ipr_sata_eh_done;
5366                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5367                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5368                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5369                                 }
5370                         }
5371                 }
5372                 spin_unlock(&hrrq->_lock);
5373         }
5374         res->resetting_device = 1;
5375         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5376
5377         if (ipr_is_gata(res) && res->sata_port) {
5378                 ap = res->sata_port->ap;
5379                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5380                 ata_std_error_handler(ap);
5381                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5382         } else
5383                 rc = ipr_device_reset(ioa_cfg, res);
5384         res->resetting_device = 0;
5385         res->reset_occurred = 1;
5386
5387         LEAVE;
5388         return rc ? FAILED : SUCCESS;
5389 }
5390
5391 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5392 {
5393         int rc;
5394         struct ipr_ioa_cfg *ioa_cfg;
5395         struct ipr_resource_entry *res;
5396
5397         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5398         res = cmd->device->hostdata;
5399
5400         if (!res)
5401                 return FAILED;
5402
5403         spin_lock_irq(cmd->device->host->host_lock);
5404         rc = __ipr_eh_dev_reset(cmd);
5405         spin_unlock_irq(cmd->device->host->host_lock);
5406
5407         if (rc == SUCCESS) {
5408                 if (ipr_is_gata(res) && res->sata_port)
5409                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5410                 else
5411                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5412         }
5413
5414         return rc;
5415 }
5416
5417 /**
5418  * ipr_bus_reset_done - Op done function for bus reset.
5419  * @ipr_cmd:    ipr command struct
5420  *
5421  * This function is the op done function for a bus reset
5422  *
5423  * Return value:
5424  *      none
5425  **/
5426 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5427 {
5428         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5429         struct ipr_resource_entry *res;
5430
5431         ENTER;
5432         if (!ioa_cfg->sis64)
5433                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5434                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5435                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5436                                 break;
5437                         }
5438                 }
5439
5440         /*
5441          * If abort has not completed, indicate the reset has, else call the
5442          * abort's done function to wake the sleeping eh thread
5443          */
5444         if (ipr_cmd->sibling->sibling)
5445                 ipr_cmd->sibling->sibling = NULL;
5446         else
5447                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5448
5449         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5450         LEAVE;
5451 }
5452
5453 /**
5454  * ipr_abort_timeout - An abort task has timed out
5455  * @ipr_cmd:    ipr command struct
5456  *
5457  * This function handles when an abort task times out. If this
5458  * happens we issue a bus reset since we have resources tied
5459  * up that must be freed before returning to the midlayer.
5460  *
5461  * Return value:
5462  *      none
5463  **/
5464 static void ipr_abort_timeout(struct timer_list *t)
5465 {
5466         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5467         struct ipr_cmnd *reset_cmd;
5468         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5469         struct ipr_cmd_pkt *cmd_pkt;
5470         unsigned long lock_flags = 0;
5471
5472         ENTER;
5473         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5474         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5475                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5476                 return;
5477         }
5478
5479         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5480         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5481         ipr_cmd->sibling = reset_cmd;
5482         reset_cmd->sibling = ipr_cmd;
5483         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5484         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5485         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5486         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5487         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5488
5489         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5490         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5491         LEAVE;
5492 }
5493
5494 /**
5495  * ipr_cancel_op - Cancel specified op
5496  * @scsi_cmd:   scsi command struct
5497  *
5498  * This function cancels specified op.
5499  *
5500  * Return value:
5501  *      SUCCESS / FAILED
5502  **/
5503 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5504 {
5505         struct ipr_cmnd *ipr_cmd;
5506         struct ipr_ioa_cfg *ioa_cfg;
5507         struct ipr_resource_entry *res;
5508         struct ipr_cmd_pkt *cmd_pkt;
5509         u32 ioasc, int_reg;
5510         int i, op_found = 0;
5511         struct ipr_hrr_queue *hrrq;
5512
5513         ENTER;
5514         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5515         res = scsi_cmd->device->hostdata;
5516
5517         /* If we are currently going through reset/reload, return failed.
5518          * This will force the mid-layer to call ipr_eh_host_reset,
5519          * which will then go to sleep and wait for the reset to complete
5520          */
5521         if (ioa_cfg->in_reset_reload ||
5522             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5523                 return FAILED;
5524         if (!res)
5525                 return FAILED;
5526
5527         /*
5528          * If we are aborting a timed out op, chances are that the timeout was caused
5529          * by a still not detected EEH error. In such cases, reading a register will
5530          * trigger the EEH recovery infrastructure.
5531          */
5532         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5533
5534         if (!ipr_is_gscsi(res))
5535                 return FAILED;
5536
5537         for_each_hrrq(hrrq, ioa_cfg) {
5538                 spin_lock(&hrrq->_lock);
5539                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5540                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5541                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5542                                         op_found = 1;
5543                                         break;
5544                                 }
5545                         }
5546                 }
5547                 spin_unlock(&hrrq->_lock);
5548         }
5549
5550         if (!op_found)
5551                 return SUCCESS;
5552
5553         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5554         ipr_cmd->ioarcb.res_handle = res->res_handle;
5555         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5556         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5557         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5558         ipr_cmd->u.sdev = scsi_cmd->device;
5559
5560         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5561                     scsi_cmd->cmnd[0]);
5562         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5563         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5564
5565         /*
5566          * If the abort task timed out and we sent a bus reset, we will get
5567          * one the following responses to the abort
5568          */
5569         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5570                 ioasc = 0;
5571                 ipr_trace;
5572         }
5573
5574         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5575         if (!ipr_is_naca_model(res))
5576                 res->needs_sync_complete = 1;
5577
5578         LEAVE;
5579         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5580 }
5581
5582 /**
5583  * ipr_eh_abort - Abort a single op
5584  * @scsi_cmd:   scsi command struct
5585  *
5586  * Return value:
5587  *      0 if scan in progress / 1 if scan is complete
5588  **/
5589 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5590 {
5591         unsigned long lock_flags;
5592         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5593         int rc = 0;
5594
5595         spin_lock_irqsave(shost->host_lock, lock_flags);
5596         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5597                 rc = 1;
5598         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5599                 rc = 1;
5600         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5601         return rc;
5602 }
5603
5604 /**
5605  * ipr_eh_host_reset - Reset the host adapter
5606  * @scsi_cmd:   scsi command struct
5607  *
5608  * Return value:
5609  *      SUCCESS / FAILED
5610  **/
5611 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5612 {
5613         unsigned long flags;
5614         int rc;
5615         struct ipr_ioa_cfg *ioa_cfg;
5616
5617         ENTER;
5618
5619         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5620
5621         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5622         rc = ipr_cancel_op(scsi_cmd);
5623         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5624
5625         if (rc == SUCCESS)
5626                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5627         LEAVE;
5628         return rc;
5629 }
5630
5631 /**
5632  * ipr_handle_other_interrupt - Handle "other" interrupts
5633  * @ioa_cfg:    ioa config struct
5634  * @int_reg:    interrupt register
5635  *
5636  * Return value:
5637  *      IRQ_NONE / IRQ_HANDLED
5638  **/
5639 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5640                                               u32 int_reg)
5641 {
5642         irqreturn_t rc = IRQ_HANDLED;
5643         u32 int_mask_reg;
5644
5645         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5646         int_reg &= ~int_mask_reg;
5647
5648         /* If an interrupt on the adapter did not occur, ignore it.
5649          * Or in the case of SIS 64, check for a stage change interrupt.
5650          */
5651         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5652                 if (ioa_cfg->sis64) {
5653                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5654                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5655                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5656
5657                                 /* clear stage change */
5658                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5659                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5660                                 list_del(&ioa_cfg->reset_cmd->queue);
5661                                 del_timer(&ioa_cfg->reset_cmd->timer);
5662                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5663                                 return IRQ_HANDLED;
5664                         }
5665                 }
5666
5667                 return IRQ_NONE;
5668         }
5669
5670         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5671                 /* Mask the interrupt */
5672                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5673                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5674
5675                 list_del(&ioa_cfg->reset_cmd->queue);
5676                 del_timer(&ioa_cfg->reset_cmd->timer);
5677                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5678         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5679                 if (ioa_cfg->clear_isr) {
5680                         if (ipr_debug && printk_ratelimit())
5681                                 dev_err(&ioa_cfg->pdev->dev,
5682                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5683                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5684                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5685                         return IRQ_NONE;
5686                 }
5687         } else {
5688                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5689                         ioa_cfg->ioa_unit_checked = 1;
5690                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5691                         dev_err(&ioa_cfg->pdev->dev,
5692                                 "No Host RRQ. 0x%08X\n", int_reg);
5693                 else
5694                         dev_err(&ioa_cfg->pdev->dev,
5695                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5696
5697                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5698                         ioa_cfg->sdt_state = GET_DUMP;
5699
5700                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5701                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5702         }
5703
5704         return rc;
5705 }
5706
5707 /**
5708  * ipr_isr_eh - Interrupt service routine error handler
5709  * @ioa_cfg:    ioa config struct
5710  * @msg:        message to log
5711  *
5712  * Return value:
5713  *      none
5714  **/
5715 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5716 {
5717         ioa_cfg->errors_logged++;
5718         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5719
5720         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5721                 ioa_cfg->sdt_state = GET_DUMP;
5722
5723         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5724 }
5725
5726 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5727                                                 struct list_head *doneq)
5728 {
5729         u32 ioasc;
5730         u16 cmd_index;
5731         struct ipr_cmnd *ipr_cmd;
5732         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5733         int num_hrrq = 0;
5734
5735         /* If interrupts are disabled, ignore the interrupt */
5736         if (!hrr_queue->allow_interrupts)
5737                 return 0;
5738
5739         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5740                hrr_queue->toggle_bit) {
5741
5742                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5743                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5744                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5745
5746                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5747                              cmd_index < hrr_queue->min_cmd_id)) {
5748                         ipr_isr_eh(ioa_cfg,
5749                                 "Invalid response handle from IOA: ",
5750                                 cmd_index);
5751                         break;
5752                 }
5753
5754                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5755                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5756
5757                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5758
5759                 list_move_tail(&ipr_cmd->queue, doneq);
5760
5761                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5762                         hrr_queue->hrrq_curr++;
5763                 } else {
5764                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5765                         hrr_queue->toggle_bit ^= 1u;
5766                 }
5767                 num_hrrq++;
5768                 if (budget > 0 && num_hrrq >= budget)
5769                         break;
5770         }
5771
5772         return num_hrrq;
5773 }
5774
5775 static int ipr_iopoll(struct irq_poll *iop, int budget)
5776 {
5777         struct ipr_ioa_cfg *ioa_cfg;
5778         struct ipr_hrr_queue *hrrq;
5779         struct ipr_cmnd *ipr_cmd, *temp;
5780         unsigned long hrrq_flags;
5781         int completed_ops;
5782         LIST_HEAD(doneq);
5783
5784         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5785         ioa_cfg = hrrq->ioa_cfg;
5786
5787         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5788         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5789
5790         if (completed_ops < budget)
5791                 irq_poll_complete(iop);
5792         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5793
5794         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5795                 list_del(&ipr_cmd->queue);
5796                 del_timer(&ipr_cmd->timer);
5797                 ipr_cmd->fast_done(ipr_cmd);
5798         }
5799
5800         return completed_ops;
5801 }
5802
5803 /**
5804  * ipr_isr - Interrupt service routine
5805  * @irq:        irq number
5806  * @devp:       pointer to ioa config struct
5807  *
5808  * Return value:
5809  *      IRQ_NONE / IRQ_HANDLED
5810  **/
5811 static irqreturn_t ipr_isr(int irq, void *devp)
5812 {
5813         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5814         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5815         unsigned long hrrq_flags = 0;
5816         u32 int_reg = 0;
5817         int num_hrrq = 0;
5818         int irq_none = 0;
5819         struct ipr_cmnd *ipr_cmd, *temp;
5820         irqreturn_t rc = IRQ_NONE;
5821         LIST_HEAD(doneq);
5822
5823         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5824         /* If interrupts are disabled, ignore the interrupt */
5825         if (!hrrq->allow_interrupts) {
5826                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5827                 return IRQ_NONE;
5828         }
5829
5830         while (1) {
5831                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5832                         rc =  IRQ_HANDLED;
5833
5834                         if (!ioa_cfg->clear_isr)
5835                                 break;
5836
5837                         /* Clear the PCI interrupt */
5838                         num_hrrq = 0;
5839                         do {
5840                                 writel(IPR_PCII_HRRQ_UPDATED,
5841                                      ioa_cfg->regs.clr_interrupt_reg32);
5842                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5843                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5844                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5845
5846                 } else if (rc == IRQ_NONE && irq_none == 0) {
5847                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5848                         irq_none++;
5849                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5850                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5851                         ipr_isr_eh(ioa_cfg,
5852                                 "Error clearing HRRQ: ", num_hrrq);
5853                         rc = IRQ_HANDLED;
5854                         break;
5855                 } else
5856                         break;
5857         }
5858
5859         if (unlikely(rc == IRQ_NONE))
5860                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5861
5862         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5863         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5864                 list_del(&ipr_cmd->queue);
5865                 del_timer(&ipr_cmd->timer);
5866                 ipr_cmd->fast_done(ipr_cmd);
5867         }
5868         return rc;
5869 }
5870
5871 /**
5872  * ipr_isr_mhrrq - Interrupt service routine
5873  * @irq:        irq number
5874  * @devp:       pointer to ioa config struct
5875  *
5876  * Return value:
5877  *      IRQ_NONE / IRQ_HANDLED
5878  **/
5879 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5880 {
5881         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5882         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5883         unsigned long hrrq_flags = 0;
5884         struct ipr_cmnd *ipr_cmd, *temp;
5885         irqreturn_t rc = IRQ_NONE;
5886         LIST_HEAD(doneq);
5887
5888         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5889
5890         /* If interrupts are disabled, ignore the interrupt */
5891         if (!hrrq->allow_interrupts) {
5892                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5893                 return IRQ_NONE;
5894         }
5895
5896         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5897                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5898                        hrrq->toggle_bit) {
5899                         irq_poll_sched(&hrrq->iopoll);
5900                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5901                         return IRQ_HANDLED;
5902                 }
5903         } else {
5904                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5905                         hrrq->toggle_bit)
5906
5907                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5908                                 rc =  IRQ_HANDLED;
5909         }
5910
5911         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5912
5913         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5914                 list_del(&ipr_cmd->queue);
5915                 del_timer(&ipr_cmd->timer);
5916                 ipr_cmd->fast_done(ipr_cmd);
5917         }
5918         return rc;
5919 }
5920
5921 /**
5922  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5923  * @ioa_cfg:    ioa config struct
5924  * @ipr_cmd:    ipr command struct
5925  *
5926  * Return value:
5927  *      0 on success / -1 on failure
5928  **/
5929 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5930                              struct ipr_cmnd *ipr_cmd)
5931 {
5932         int i, nseg;
5933         struct scatterlist *sg;
5934         u32 length;
5935         u32 ioadl_flags = 0;
5936         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5937         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5938         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5939
5940         length = scsi_bufflen(scsi_cmd);
5941         if (!length)
5942                 return 0;
5943
5944         nseg = scsi_dma_map(scsi_cmd);
5945         if (nseg < 0) {
5946                 if (printk_ratelimit())
5947                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5948                 return -1;
5949         }
5950
5951         ipr_cmd->dma_use_sg = nseg;
5952
5953         ioarcb->data_transfer_length = cpu_to_be32(length);
5954         ioarcb->ioadl_len =
5955                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5956
5957         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5958                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5959                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5960         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5961                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5962
5963         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5964                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5965                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5966                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5967         }
5968
5969         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5970         return 0;
5971 }
5972
5973 /**
5974  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5975  * @ioa_cfg:    ioa config struct
5976  * @ipr_cmd:    ipr command struct
5977  *
5978  * Return value:
5979  *      0 on success / -1 on failure
5980  **/
5981 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5982                            struct ipr_cmnd *ipr_cmd)
5983 {
5984         int i, nseg;
5985         struct scatterlist *sg;
5986         u32 length;
5987         u32 ioadl_flags = 0;
5988         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5989         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5990         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5991
5992         length = scsi_bufflen(scsi_cmd);
5993         if (!length)
5994                 return 0;
5995
5996         nseg = scsi_dma_map(scsi_cmd);
5997         if (nseg < 0) {
5998                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5999                 return -1;
6000         }
6001
6002         ipr_cmd->dma_use_sg = nseg;
6003
6004         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6005                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6006                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6007                 ioarcb->data_transfer_length = cpu_to_be32(length);
6008                 ioarcb->ioadl_len =
6009                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6010         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6011                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6012                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6013                 ioarcb->read_ioadl_len =
6014                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6015         }
6016
6017         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6018                 ioadl = ioarcb->u.add_data.u.ioadl;
6019                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6020                                     offsetof(struct ipr_ioarcb, u.add_data));
6021                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6022         }
6023
6024         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6025                 ioadl[i].flags_and_data_len =
6026                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6027                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6028         }
6029
6030         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6031         return 0;
6032 }
6033
6034 /**
6035  * __ipr_erp_done - Process completion of ERP for a device
6036  * @ipr_cmd:            ipr command struct
6037  *
6038  * This function copies the sense buffer into the scsi_cmd
6039  * struct and pushes the scsi_done function.
6040  *
6041  * Return value:
6042  *      nothing
6043  **/
6044 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6045 {
6046         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6047         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6048         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6049
6050         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6051                 scsi_cmd->result |= (DID_ERROR << 16);
6052                 scmd_printk(KERN_ERR, scsi_cmd,
6053                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6054         } else {
6055                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6056                        SCSI_SENSE_BUFFERSIZE);
6057         }
6058
6059         if (res) {
6060                 if (!ipr_is_naca_model(res))
6061                         res->needs_sync_complete = 1;
6062                 res->in_erp = 0;
6063         }
6064         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6065         scsi_cmd->scsi_done(scsi_cmd);
6066         if (ipr_cmd->eh_comp)
6067                 complete(ipr_cmd->eh_comp);
6068         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6069 }
6070
6071 /**
6072  * ipr_erp_done - Process completion of ERP for a device
6073  * @ipr_cmd:            ipr command struct
6074  *
6075  * This function copies the sense buffer into the scsi_cmd
6076  * struct and pushes the scsi_done function.
6077  *
6078  * Return value:
6079  *      nothing
6080  **/
6081 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6082 {
6083         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6084         unsigned long hrrq_flags;
6085
6086         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6087         __ipr_erp_done(ipr_cmd);
6088         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6089 }
6090
6091 /**
6092  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6093  * @ipr_cmd:    ipr command struct
6094  *
6095  * Return value:
6096  *      none
6097  **/
6098 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6099 {
6100         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6101         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6102         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6103
6104         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6105         ioarcb->data_transfer_length = 0;
6106         ioarcb->read_data_transfer_length = 0;
6107         ioarcb->ioadl_len = 0;
6108         ioarcb->read_ioadl_len = 0;
6109         ioasa->hdr.ioasc = 0;
6110         ioasa->hdr.residual_data_len = 0;
6111
6112         if (ipr_cmd->ioa_cfg->sis64)
6113                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6114                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6115         else {
6116                 ioarcb->write_ioadl_addr =
6117                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6118                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6119         }
6120 }
6121
6122 /**
6123  * __ipr_erp_request_sense - Send request sense to a device
6124  * @ipr_cmd:    ipr command struct
6125  *
6126  * This function sends a request sense to a device as a result
6127  * of a check condition.
6128  *
6129  * Return value:
6130  *      nothing
6131  **/
6132 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6133 {
6134         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6135         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6136
6137         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6138                 __ipr_erp_done(ipr_cmd);
6139                 return;
6140         }
6141
6142         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6143
6144         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6145         cmd_pkt->cdb[0] = REQUEST_SENSE;
6146         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6147         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6148         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6149         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6150
6151         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6152                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6153
6154         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6155                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6156 }
6157
6158 /**
6159  * ipr_erp_request_sense - Send request sense to a device
6160  * @ipr_cmd:    ipr command struct
6161  *
6162  * This function sends a request sense to a device as a result
6163  * of a check condition.
6164  *
6165  * Return value:
6166  *      nothing
6167  **/
6168 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6169 {
6170         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6171         unsigned long hrrq_flags;
6172
6173         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6174         __ipr_erp_request_sense(ipr_cmd);
6175         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6176 }
6177
6178 /**
6179  * ipr_erp_cancel_all - Send cancel all to a device
6180  * @ipr_cmd:    ipr command struct
6181  *
6182  * This function sends a cancel all to a device to clear the
6183  * queue. If we are running TCQ on the device, QERR is set to 1,
6184  * which means all outstanding ops have been dropped on the floor.
6185  * Cancel all will return them to us.
6186  *
6187  * Return value:
6188  *      nothing
6189  **/
6190 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6191 {
6192         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6193         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6194         struct ipr_cmd_pkt *cmd_pkt;
6195
6196         res->in_erp = 1;
6197
6198         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6199
6200         if (!scsi_cmd->device->simple_tags) {
6201                 __ipr_erp_request_sense(ipr_cmd);
6202                 return;
6203         }
6204
6205         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6206         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6207         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6208
6209         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6210                    IPR_CANCEL_ALL_TIMEOUT);
6211 }
6212
6213 /**
6214  * ipr_dump_ioasa - Dump contents of IOASA
6215  * @ioa_cfg:    ioa config struct
6216  * @ipr_cmd:    ipr command struct
6217  * @res:                resource entry struct
6218  *
6219  * This function is invoked by the interrupt handler when ops
6220  * fail. It will log the IOASA if appropriate. Only called
6221  * for GPDD ops.
6222  *
6223  * Return value:
6224  *      none
6225  **/
6226 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6227                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6228 {
6229         int i;
6230         u16 data_len;
6231         u32 ioasc, fd_ioasc;
6232         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6233         __be32 *ioasa_data = (__be32 *)ioasa;
6234         int error_index;
6235
6236         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6237         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6238
6239         if (0 == ioasc)
6240                 return;
6241
6242         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6243                 return;
6244
6245         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6246                 error_index = ipr_get_error(fd_ioasc);
6247         else
6248                 error_index = ipr_get_error(ioasc);
6249
6250         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6251                 /* Don't log an error if the IOA already logged one */
6252                 if (ioasa->hdr.ilid != 0)
6253                         return;
6254
6255                 if (!ipr_is_gscsi(res))
6256                         return;
6257
6258                 if (ipr_error_table[error_index].log_ioasa == 0)
6259                         return;
6260         }
6261
6262         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6263
6264         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6265         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6266                 data_len = sizeof(struct ipr_ioasa64);
6267         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6268                 data_len = sizeof(struct ipr_ioasa);
6269
6270         ipr_err("IOASA Dump:\n");
6271
6272         for (i = 0; i < data_len / 4; i += 4) {
6273                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6274                         be32_to_cpu(ioasa_data[i]),
6275                         be32_to_cpu(ioasa_data[i+1]),
6276                         be32_to_cpu(ioasa_data[i+2]),
6277                         be32_to_cpu(ioasa_data[i+3]));
6278         }
6279 }
6280
6281 /**
6282  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6283  * @ioasa:              IOASA
6284  * @sense_buf:  sense data buffer
6285  *
6286  * Return value:
6287  *      none
6288  **/
6289 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6290 {
6291         u32 failing_lba;
6292         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6293         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6294         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6295         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6296
6297         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6298
6299         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6300                 return;
6301
6302         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6303
6304         if (ipr_is_vset_device(res) &&
6305             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6306             ioasa->u.vset.failing_lba_hi != 0) {
6307                 sense_buf[0] = 0x72;
6308                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6309                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6310                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6311
6312                 sense_buf[7] = 12;
6313                 sense_buf[8] = 0;
6314                 sense_buf[9] = 0x0A;
6315                 sense_buf[10] = 0x80;
6316
6317                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6318
6319                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6320                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6321                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6322                 sense_buf[15] = failing_lba & 0x000000ff;
6323
6324                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6325
6326                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6327                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6328                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6329                 sense_buf[19] = failing_lba & 0x000000ff;
6330         } else {
6331                 sense_buf[0] = 0x70;
6332                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6333                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6334                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6335
6336                 /* Illegal request */
6337                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6338                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6339                         sense_buf[7] = 10;      /* additional length */
6340
6341                         /* IOARCB was in error */
6342                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6343                                 sense_buf[15] = 0xC0;
6344                         else    /* Parameter data was invalid */
6345                                 sense_buf[15] = 0x80;
6346
6347                         sense_buf[16] =
6348                             ((IPR_FIELD_POINTER_MASK &
6349                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6350                         sense_buf[17] =
6351                             (IPR_FIELD_POINTER_MASK &
6352                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6353                 } else {
6354                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6355                                 if (ipr_is_vset_device(res))
6356                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6357                                 else
6358                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6359
6360                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6361                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6362                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6363                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6364                                 sense_buf[6] = failing_lba & 0x000000ff;
6365                         }
6366
6367                         sense_buf[7] = 6;       /* additional length */
6368                 }
6369         }
6370 }
6371
6372 /**
6373  * ipr_get_autosense - Copy autosense data to sense buffer
6374  * @ipr_cmd:    ipr command struct
6375  *
6376  * This function copies the autosense buffer to the buffer
6377  * in the scsi_cmd, if there is autosense available.
6378  *
6379  * Return value:
6380  *      1 if autosense was available / 0 if not
6381  **/
6382 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6383 {
6384         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6385         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6386
6387         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6388                 return 0;
6389
6390         if (ipr_cmd->ioa_cfg->sis64)
6391                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6392                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6393                            SCSI_SENSE_BUFFERSIZE));
6394         else
6395                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6396                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6397                            SCSI_SENSE_BUFFERSIZE));
6398         return 1;
6399 }
6400
6401 /**
6402  * ipr_erp_start - Process an error response for a SCSI op
6403  * @ioa_cfg:    ioa config struct
6404  * @ipr_cmd:    ipr command struct
6405  *
6406  * This function determines whether or not to initiate ERP
6407  * on the affected device.
6408  *
6409  * Return value:
6410  *      nothing
6411  **/
6412 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6413                               struct ipr_cmnd *ipr_cmd)
6414 {
6415         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6416         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6417         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6418         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6419
6420         if (!res) {
6421                 __ipr_scsi_eh_done(ipr_cmd);
6422                 return;
6423         }
6424
6425         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6426                 ipr_gen_sense(ipr_cmd);
6427
6428         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6429
6430         switch (masked_ioasc) {
6431         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6432                 if (ipr_is_naca_model(res))
6433                         scsi_cmd->result |= (DID_ABORT << 16);
6434                 else
6435                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6436                 break;
6437         case IPR_IOASC_IR_RESOURCE_HANDLE:
6438         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6439                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6440                 break;
6441         case IPR_IOASC_HW_SEL_TIMEOUT:
6442                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6443                 if (!ipr_is_naca_model(res))
6444                         res->needs_sync_complete = 1;
6445                 break;
6446         case IPR_IOASC_SYNC_REQUIRED:
6447                 if (!res->in_erp)
6448                         res->needs_sync_complete = 1;
6449                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6450                 break;
6451         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6452         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6453                 /*
6454                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6455                  * so SCSI mid-layer and upper layers handle it accordingly.
6456                  */
6457                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6458                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6459                 break;
6460         case IPR_IOASC_BUS_WAS_RESET:
6461         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6462                 /*
6463                  * Report the bus reset and ask for a retry. The device
6464                  * will give CC/UA the next command.
6465                  */
6466                 if (!res->resetting_device)
6467                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6468                 scsi_cmd->result |= (DID_ERROR << 16);
6469                 if (!ipr_is_naca_model(res))
6470                         res->needs_sync_complete = 1;
6471                 break;
6472         case IPR_IOASC_HW_DEV_BUS_STATUS:
6473                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6474                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6475                         if (!ipr_get_autosense(ipr_cmd)) {
6476                                 if (!ipr_is_naca_model(res)) {
6477                                         ipr_erp_cancel_all(ipr_cmd);
6478                                         return;
6479                                 }
6480                         }
6481                 }
6482                 if (!ipr_is_naca_model(res))
6483                         res->needs_sync_complete = 1;
6484                 break;
6485         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6486                 break;
6487         case IPR_IOASC_IR_NON_OPTIMIZED:
6488                 if (res->raw_mode) {
6489                         res->raw_mode = 0;
6490                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6491                 } else
6492                         scsi_cmd->result |= (DID_ERROR << 16);
6493                 break;
6494         default:
6495                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6496                         scsi_cmd->result |= (DID_ERROR << 16);
6497                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6498                         res->needs_sync_complete = 1;
6499                 break;
6500         }
6501
6502         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6503         scsi_cmd->scsi_done(scsi_cmd);
6504         if (ipr_cmd->eh_comp)
6505                 complete(ipr_cmd->eh_comp);
6506         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6507 }
6508
6509 /**
6510  * ipr_scsi_done - mid-layer done function
6511  * @ipr_cmd:    ipr command struct
6512  *
6513  * This function is invoked by the interrupt handler for
6514  * ops generated by the SCSI mid-layer
6515  *
6516  * Return value:
6517  *      none
6518  **/
6519 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6520 {
6521         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6522         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6523         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6524         unsigned long lock_flags;
6525
6526         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6527
6528         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6529                 scsi_dma_unmap(scsi_cmd);
6530
6531                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6532                 scsi_cmd->scsi_done(scsi_cmd);
6533                 if (ipr_cmd->eh_comp)
6534                         complete(ipr_cmd->eh_comp);
6535                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6536                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6537         } else {
6538                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6539                 spin_lock(&ipr_cmd->hrrq->_lock);
6540                 ipr_erp_start(ioa_cfg, ipr_cmd);
6541                 spin_unlock(&ipr_cmd->hrrq->_lock);
6542                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6543         }
6544 }
6545
6546 /**
6547  * ipr_queuecommand - Queue a mid-layer request
6548  * @shost:              scsi host struct
6549  * @scsi_cmd:   scsi command struct
6550  *
6551  * This function queues a request generated by the mid-layer.
6552  *
6553  * Return value:
6554  *      0 on success
6555  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6556  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6557  **/
6558 static int ipr_queuecommand(struct Scsi_Host *shost,
6559                             struct scsi_cmnd *scsi_cmd)
6560 {
6561         struct ipr_ioa_cfg *ioa_cfg;
6562         struct ipr_resource_entry *res;
6563         struct ipr_ioarcb *ioarcb;
6564         struct ipr_cmnd *ipr_cmd;
6565         unsigned long hrrq_flags, lock_flags;
6566         int rc;
6567         struct ipr_hrr_queue *hrrq;
6568         int hrrq_id;
6569
6570         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6571
6572         scsi_cmd->result = (DID_OK << 16);
6573         res = scsi_cmd->device->hostdata;
6574
6575         if (ipr_is_gata(res) && res->sata_port) {
6576                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6577                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6578                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6579                 return rc;
6580         }
6581
6582         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6583         hrrq = &ioa_cfg->hrrq[hrrq_id];
6584
6585         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6586         /*
6587          * We are currently blocking all devices due to a host reset
6588          * We have told the host to stop giving us new requests, but
6589          * ERP ops don't count. FIXME
6590          */
6591         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6592                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6593                 return SCSI_MLQUEUE_HOST_BUSY;
6594         }
6595
6596         /*
6597          * FIXME - Create scsi_set_host_offline interface
6598          *  and the ioa_is_dead check can be removed
6599          */
6600         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6601                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6602                 goto err_nodev;
6603         }
6604
6605         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6606         if (ipr_cmd == NULL) {
6607                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6608                 return SCSI_MLQUEUE_HOST_BUSY;
6609         }
6610         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6611
6612         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6613         ioarcb = &ipr_cmd->ioarcb;
6614
6615         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6616         ipr_cmd->scsi_cmd = scsi_cmd;
6617         ipr_cmd->done = ipr_scsi_eh_done;
6618
6619         if (ipr_is_gscsi(res)) {
6620                 if (scsi_cmd->underflow == 0)
6621                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6622
6623                 if (res->reset_occurred) {
6624                         res->reset_occurred = 0;
6625                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6626                 }
6627         }
6628
6629         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6630                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6631
6632                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6633                 if (scsi_cmd->flags & SCMD_TAGGED)
6634                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6635                 else
6636                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6637         }
6638
6639         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6640             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6641                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6642         }
6643         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6644                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6645
6646                 if (scsi_cmd->underflow == 0)
6647                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6648         }
6649
6650         if (ioa_cfg->sis64)
6651                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6652         else
6653                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6654
6655         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6656         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6657                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6658                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6659                 if (!rc)
6660                         scsi_dma_unmap(scsi_cmd);
6661                 return SCSI_MLQUEUE_HOST_BUSY;
6662         }
6663
6664         if (unlikely(hrrq->ioa_is_dead)) {
6665                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6666                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6667                 scsi_dma_unmap(scsi_cmd);
6668                 goto err_nodev;
6669         }
6670
6671         ioarcb->res_handle = res->res_handle;
6672         if (res->needs_sync_complete) {
6673                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6674                 res->needs_sync_complete = 0;
6675         }
6676         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6677         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6678         ipr_send_command(ipr_cmd);
6679         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6680         return 0;
6681
6682 err_nodev:
6683         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6684         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6685         scsi_cmd->result = (DID_NO_CONNECT << 16);
6686         scsi_cmd->scsi_done(scsi_cmd);
6687         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6688         return 0;
6689 }
6690
6691 /**
6692  * ipr_ioctl - IOCTL handler
6693  * @sdev:       scsi device struct
6694  * @cmd:        IOCTL cmd
6695  * @arg:        IOCTL arg
6696  *
6697  * Return value:
6698  *      0 on success / other on failure
6699  **/
6700 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6701 {
6702         struct ipr_resource_entry *res;
6703
6704         res = (struct ipr_resource_entry *)sdev->hostdata;
6705         if (res && ipr_is_gata(res)) {
6706                 if (cmd == HDIO_GET_IDENTITY)
6707                         return -ENOTTY;
6708                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6709         }
6710
6711         return -EINVAL;
6712 }
6713
6714 /**
6715  * ipr_info - Get information about the card/driver
6716  * @scsi_host:  scsi host struct
6717  *
6718  * Return value:
6719  *      pointer to buffer with description string
6720  **/
6721 static const char *ipr_ioa_info(struct Scsi_Host *host)
6722 {
6723         static char buffer[512];
6724         struct ipr_ioa_cfg *ioa_cfg;
6725         unsigned long lock_flags = 0;
6726
6727         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6728
6729         spin_lock_irqsave(host->host_lock, lock_flags);
6730         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6731         spin_unlock_irqrestore(host->host_lock, lock_flags);
6732
6733         return buffer;
6734 }
6735
6736 static struct scsi_host_template driver_template = {
6737         .module = THIS_MODULE,
6738         .name = "IPR",
6739         .info = ipr_ioa_info,
6740         .ioctl = ipr_ioctl,
6741         .queuecommand = ipr_queuecommand,
6742         .eh_abort_handler = ipr_eh_abort,
6743         .eh_device_reset_handler = ipr_eh_dev_reset,
6744         .eh_host_reset_handler = ipr_eh_host_reset,
6745         .slave_alloc = ipr_slave_alloc,
6746         .slave_configure = ipr_slave_configure,
6747         .slave_destroy = ipr_slave_destroy,
6748         .scan_finished = ipr_scan_finished,
6749         .target_alloc = ipr_target_alloc,
6750         .target_destroy = ipr_target_destroy,
6751         .change_queue_depth = ipr_change_queue_depth,
6752         .bios_param = ipr_biosparam,
6753         .can_queue = IPR_MAX_COMMANDS,
6754         .this_id = -1,
6755         .sg_tablesize = IPR_MAX_SGLIST,
6756         .max_sectors = IPR_IOA_MAX_SECTORS,
6757         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6758         .use_clustering = ENABLE_CLUSTERING,
6759         .shost_attrs = ipr_ioa_attrs,
6760         .sdev_attrs = ipr_dev_attrs,
6761         .proc_name = IPR_NAME,
6762 };
6763
6764 /**
6765  * ipr_ata_phy_reset - libata phy_reset handler
6766  * @ap:         ata port to reset
6767  *
6768  **/
6769 static void ipr_ata_phy_reset(struct ata_port *ap)
6770 {
6771         unsigned long flags;
6772         struct ipr_sata_port *sata_port = ap->private_data;
6773         struct ipr_resource_entry *res = sata_port->res;
6774         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6775         int rc;
6776
6777         ENTER;
6778         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6779         while (ioa_cfg->in_reset_reload) {
6780                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6781                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6782                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6783         }
6784
6785         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6786                 goto out_unlock;
6787
6788         rc = ipr_device_reset(ioa_cfg, res);
6789
6790         if (rc) {
6791                 ap->link.device[0].class = ATA_DEV_NONE;
6792                 goto out_unlock;
6793         }
6794
6795         ap->link.device[0].class = res->ata_class;
6796         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6797                 ap->link.device[0].class = ATA_DEV_NONE;
6798
6799 out_unlock:
6800         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6801         LEAVE;
6802 }
6803
6804 /**
6805  * ipr_ata_post_internal - Cleanup after an internal command
6806  * @qc: ATA queued command
6807  *
6808  * Return value:
6809  *      none
6810  **/
6811 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6812 {
6813         struct ipr_sata_port *sata_port = qc->ap->private_data;
6814         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6815         struct ipr_cmnd *ipr_cmd;
6816         struct ipr_hrr_queue *hrrq;
6817         unsigned long flags;
6818
6819         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6820         while (ioa_cfg->in_reset_reload) {
6821                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6822                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6823                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6824         }
6825
6826         for_each_hrrq(hrrq, ioa_cfg) {
6827                 spin_lock(&hrrq->_lock);
6828                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6829                         if (ipr_cmd->qc == qc) {
6830                                 ipr_device_reset(ioa_cfg, sata_port->res);
6831                                 break;
6832                         }
6833                 }
6834                 spin_unlock(&hrrq->_lock);
6835         }
6836         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6837 }
6838
6839 /**
6840  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6841  * @regs:       destination
6842  * @tf: source ATA taskfile
6843  *
6844  * Return value:
6845  *      none
6846  **/
6847 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6848                              struct ata_taskfile *tf)
6849 {
6850         regs->feature = tf->feature;
6851         regs->nsect = tf->nsect;
6852         regs->lbal = tf->lbal;
6853         regs->lbam = tf->lbam;
6854         regs->lbah = tf->lbah;
6855         regs->device = tf->device;
6856         regs->command = tf->command;
6857         regs->hob_feature = tf->hob_feature;
6858         regs->hob_nsect = tf->hob_nsect;
6859         regs->hob_lbal = tf->hob_lbal;
6860         regs->hob_lbam = tf->hob_lbam;
6861         regs->hob_lbah = tf->hob_lbah;
6862         regs->ctl = tf->ctl;
6863 }
6864
6865 /**
6866  * ipr_sata_done - done function for SATA commands
6867  * @ipr_cmd:    ipr command struct
6868  *
6869  * This function is invoked by the interrupt handler for
6870  * ops generated by the SCSI mid-layer to SATA devices
6871  *
6872  * Return value:
6873  *      none
6874  **/
6875 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6876 {
6877         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6878         struct ata_queued_cmd *qc = ipr_cmd->qc;
6879         struct ipr_sata_port *sata_port = qc->ap->private_data;
6880         struct ipr_resource_entry *res = sata_port->res;
6881         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6882
6883         spin_lock(&ipr_cmd->hrrq->_lock);
6884         if (ipr_cmd->ioa_cfg->sis64)
6885                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6886                        sizeof(struct ipr_ioasa_gata));
6887         else
6888                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6889                        sizeof(struct ipr_ioasa_gata));
6890         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6891
6892         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6893                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6894
6895         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6896                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6897         else
6898                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6899         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6900         spin_unlock(&ipr_cmd->hrrq->_lock);
6901         ata_qc_complete(qc);
6902 }
6903
6904 /**
6905  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6906  * @ipr_cmd:    ipr command struct
6907  * @qc:         ATA queued command
6908  *
6909  **/
6910 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6911                                   struct ata_queued_cmd *qc)
6912 {
6913         u32 ioadl_flags = 0;
6914         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6915         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6916         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6917         int len = qc->nbytes;
6918         struct scatterlist *sg;
6919         unsigned int si;
6920         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6921
6922         if (len == 0)
6923                 return;
6924
6925         if (qc->dma_dir == DMA_TO_DEVICE) {
6926                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6927                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6928         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6929                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6930
6931         ioarcb->data_transfer_length = cpu_to_be32(len);
6932         ioarcb->ioadl_len =
6933                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6934         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6935                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6936
6937         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6938                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6939                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6940                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6941
6942                 last_ioadl64 = ioadl64;
6943                 ioadl64++;
6944         }
6945
6946         if (likely(last_ioadl64))
6947                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6948 }
6949
6950 /**
6951  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6952  * @ipr_cmd:    ipr command struct
6953  * @qc:         ATA queued command
6954  *
6955  **/
6956 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6957                                 struct ata_queued_cmd *qc)
6958 {
6959         u32 ioadl_flags = 0;
6960         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6961         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6962         struct ipr_ioadl_desc *last_ioadl = NULL;
6963         int len = qc->nbytes;
6964         struct scatterlist *sg;
6965         unsigned int si;
6966
6967         if (len == 0)
6968                 return;
6969
6970         if (qc->dma_dir == DMA_TO_DEVICE) {
6971                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6972                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6973                 ioarcb->data_transfer_length = cpu_to_be32(len);
6974                 ioarcb->ioadl_len =
6975                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6976         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6977                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6978                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6979                 ioarcb->read_ioadl_len =
6980                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6981         }
6982
6983         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6984                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6985                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6986
6987                 last_ioadl = ioadl;
6988                 ioadl++;
6989         }
6990
6991         if (likely(last_ioadl))
6992                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6993 }
6994
6995 /**
6996  * ipr_qc_defer - Get a free ipr_cmd
6997  * @qc: queued command
6998  *
6999  * Return value:
7000  *      0 if success
7001  **/
7002 static int ipr_qc_defer(struct ata_queued_cmd *qc)
7003 {
7004         struct ata_port *ap = qc->ap;
7005         struct ipr_sata_port *sata_port = ap->private_data;
7006         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7007         struct ipr_cmnd *ipr_cmd;
7008         struct ipr_hrr_queue *hrrq;
7009         int hrrq_id;
7010
7011         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7012         hrrq = &ioa_cfg->hrrq[hrrq_id];
7013
7014         qc->lldd_task = NULL;
7015         spin_lock(&hrrq->_lock);
7016         if (unlikely(hrrq->ioa_is_dead)) {
7017                 spin_unlock(&hrrq->_lock);
7018                 return 0;
7019         }
7020
7021         if (unlikely(!hrrq->allow_cmds)) {
7022                 spin_unlock(&hrrq->_lock);
7023                 return ATA_DEFER_LINK;
7024         }
7025
7026         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7027         if (ipr_cmd == NULL) {
7028                 spin_unlock(&hrrq->_lock);
7029                 return ATA_DEFER_LINK;
7030         }
7031
7032         qc->lldd_task = ipr_cmd;
7033         spin_unlock(&hrrq->_lock);
7034         return 0;
7035 }
7036
7037 /**
7038  * ipr_qc_issue - Issue a SATA qc to a device
7039  * @qc: queued command
7040  *
7041  * Return value:
7042  *      0 if success
7043  **/
7044 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7045 {
7046         struct ata_port *ap = qc->ap;
7047         struct ipr_sata_port *sata_port = ap->private_data;
7048         struct ipr_resource_entry *res = sata_port->res;
7049         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7050         struct ipr_cmnd *ipr_cmd;
7051         struct ipr_ioarcb *ioarcb;
7052         struct ipr_ioarcb_ata_regs *regs;
7053
7054         if (qc->lldd_task == NULL)
7055                 ipr_qc_defer(qc);
7056
7057         ipr_cmd = qc->lldd_task;
7058         if (ipr_cmd == NULL)
7059                 return AC_ERR_SYSTEM;
7060
7061         qc->lldd_task = NULL;
7062         spin_lock(&ipr_cmd->hrrq->_lock);
7063         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7064                         ipr_cmd->hrrq->ioa_is_dead)) {
7065                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7066                 spin_unlock(&ipr_cmd->hrrq->_lock);
7067                 return AC_ERR_SYSTEM;
7068         }
7069
7070         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7071         ioarcb = &ipr_cmd->ioarcb;
7072
7073         if (ioa_cfg->sis64) {
7074                 regs = &ipr_cmd->i.ata_ioadl.regs;
7075                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7076         } else
7077                 regs = &ioarcb->u.add_data.u.regs;
7078
7079         memset(regs, 0, sizeof(*regs));
7080         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7081
7082         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7083         ipr_cmd->qc = qc;
7084         ipr_cmd->done = ipr_sata_done;
7085         ipr_cmd->ioarcb.res_handle = res->res_handle;
7086         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7087         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7088         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7089         ipr_cmd->dma_use_sg = qc->n_elem;
7090
7091         if (ioa_cfg->sis64)
7092                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7093         else
7094                 ipr_build_ata_ioadl(ipr_cmd, qc);
7095
7096         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7097         ipr_copy_sata_tf(regs, &qc->tf);
7098         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7099         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7100
7101         switch (qc->tf.protocol) {
7102         case ATA_PROT_NODATA:
7103         case ATA_PROT_PIO:
7104                 break;
7105
7106         case ATA_PROT_DMA:
7107                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7108                 break;
7109
7110         case ATAPI_PROT_PIO:
7111         case ATAPI_PROT_NODATA:
7112                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7113                 break;
7114
7115         case ATAPI_PROT_DMA:
7116                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7117                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7118                 break;
7119
7120         default:
7121                 WARN_ON(1);
7122                 spin_unlock(&ipr_cmd->hrrq->_lock);
7123                 return AC_ERR_INVALID;
7124         }
7125
7126         ipr_send_command(ipr_cmd);
7127         spin_unlock(&ipr_cmd->hrrq->_lock);
7128
7129         return 0;
7130 }
7131
7132 /**
7133  * ipr_qc_fill_rtf - Read result TF
7134  * @qc: ATA queued command
7135  *
7136  * Return value:
7137  *      true
7138  **/
7139 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7140 {
7141         struct ipr_sata_port *sata_port = qc->ap->private_data;
7142         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7143         struct ata_taskfile *tf = &qc->result_tf;
7144
7145         tf->feature = g->error;
7146         tf->nsect = g->nsect;
7147         tf->lbal = g->lbal;
7148         tf->lbam = g->lbam;
7149         tf->lbah = g->lbah;
7150         tf->device = g->device;
7151         tf->command = g->status;
7152         tf->hob_nsect = g->hob_nsect;
7153         tf->hob_lbal = g->hob_lbal;
7154         tf->hob_lbam = g->hob_lbam;
7155         tf->hob_lbah = g->hob_lbah;
7156
7157         return true;
7158 }
7159
7160 static struct ata_port_operations ipr_sata_ops = {
7161         .phy_reset = ipr_ata_phy_reset,
7162         .hardreset = ipr_sata_reset,
7163         .post_internal_cmd = ipr_ata_post_internal,
7164         .qc_prep = ata_noop_qc_prep,
7165         .qc_defer = ipr_qc_defer,
7166         .qc_issue = ipr_qc_issue,
7167         .qc_fill_rtf = ipr_qc_fill_rtf,
7168         .port_start = ata_sas_port_start,
7169         .port_stop = ata_sas_port_stop
7170 };
7171
7172 static struct ata_port_info sata_port_info = {
7173         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7174                           ATA_FLAG_SAS_HOST,
7175         .pio_mask       = ATA_PIO4_ONLY,
7176         .mwdma_mask     = ATA_MWDMA2,
7177         .udma_mask      = ATA_UDMA6,
7178         .port_ops       = &ipr_sata_ops
7179 };
7180
7181 #ifdef CONFIG_PPC_PSERIES
7182 static const u16 ipr_blocked_processors[] = {
7183         PVR_NORTHSTAR,
7184         PVR_PULSAR,
7185         PVR_POWER4,
7186         PVR_ICESTAR,
7187         PVR_SSTAR,
7188         PVR_POWER4p,
7189         PVR_630,
7190         PVR_630p
7191 };
7192
7193 /**
7194  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7195  * @ioa_cfg:    ioa cfg struct
7196  *
7197  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7198  * certain pSeries hardware. This function determines if the given
7199  * adapter is in one of these confgurations or not.
7200  *
7201  * Return value:
7202  *      1 if adapter is not supported / 0 if adapter is supported
7203  **/
7204 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7205 {
7206         int i;
7207
7208         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7209                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7210                         if (pvr_version_is(ipr_blocked_processors[i]))
7211                                 return 1;
7212                 }
7213         }
7214         return 0;
7215 }
7216 #else
7217 #define ipr_invalid_adapter(ioa_cfg) 0
7218 #endif
7219
7220 /**
7221  * ipr_ioa_bringdown_done - IOA bring down completion.
7222  * @ipr_cmd:    ipr command struct
7223  *
7224  * This function processes the completion of an adapter bring down.
7225  * It wakes any reset sleepers.
7226  *
7227  * Return value:
7228  *      IPR_RC_JOB_RETURN
7229  **/
7230 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7231 {
7232         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7233         int i;
7234
7235         ENTER;
7236         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7237                 ipr_trace;
7238                 ioa_cfg->scsi_unblock = 1;
7239                 schedule_work(&ioa_cfg->work_q);
7240         }
7241
7242         ioa_cfg->in_reset_reload = 0;
7243         ioa_cfg->reset_retries = 0;
7244         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7245                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7246                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7247                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7248         }
7249         wmb();
7250
7251         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7252         wake_up_all(&ioa_cfg->reset_wait_q);
7253         LEAVE;
7254
7255         return IPR_RC_JOB_RETURN;
7256 }
7257
7258 /**
7259  * ipr_ioa_reset_done - IOA reset completion.
7260  * @ipr_cmd:    ipr command struct
7261  *
7262  * This function processes the completion of an adapter reset.
7263  * It schedules any necessary mid-layer add/removes and
7264  * wakes any reset sleepers.
7265  *
7266  * Return value:
7267  *      IPR_RC_JOB_RETURN
7268  **/
7269 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7270 {
7271         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7272         struct ipr_resource_entry *res;
7273         int j;
7274
7275         ENTER;
7276         ioa_cfg->in_reset_reload = 0;
7277         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7278                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7279                 ioa_cfg->hrrq[j].allow_cmds = 1;
7280                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7281         }
7282         wmb();
7283         ioa_cfg->reset_cmd = NULL;
7284         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7285
7286         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7287                 if (res->add_to_ml || res->del_from_ml) {
7288                         ipr_trace;
7289                         break;
7290                 }
7291         }
7292         schedule_work(&ioa_cfg->work_q);
7293
7294         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7295                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7296                 if (j < IPR_NUM_LOG_HCAMS)
7297                         ipr_send_hcam(ioa_cfg,
7298                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7299                                 ioa_cfg->hostrcb[j]);
7300                 else
7301                         ipr_send_hcam(ioa_cfg,
7302                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7303                                 ioa_cfg->hostrcb[j]);
7304         }
7305
7306         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7307         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7308
7309         ioa_cfg->reset_retries = 0;
7310         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7311         wake_up_all(&ioa_cfg->reset_wait_q);
7312
7313         ioa_cfg->scsi_unblock = 1;
7314         schedule_work(&ioa_cfg->work_q);
7315         LEAVE;
7316         return IPR_RC_JOB_RETURN;
7317 }
7318
7319 /**
7320  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7321  * @supported_dev:      supported device struct
7322  * @vpids:                      vendor product id struct
7323  *
7324  * Return value:
7325  *      none
7326  **/
7327 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7328                                  struct ipr_std_inq_vpids *vpids)
7329 {
7330         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7331         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7332         supported_dev->num_records = 1;
7333         supported_dev->data_length =
7334                 cpu_to_be16(sizeof(struct ipr_supported_device));
7335         supported_dev->reserved = 0;
7336 }
7337
7338 /**
7339  * ipr_set_supported_devs - Send Set Supported Devices for a device
7340  * @ipr_cmd:    ipr command struct
7341  *
7342  * This function sends a Set Supported Devices to the adapter
7343  *
7344  * Return value:
7345  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7346  **/
7347 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7348 {
7349         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7350         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7351         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7352         struct ipr_resource_entry *res = ipr_cmd->u.res;
7353
7354         ipr_cmd->job_step = ipr_ioa_reset_done;
7355
7356         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7357                 if (!ipr_is_scsi_disk(res))
7358                         continue;
7359
7360                 ipr_cmd->u.res = res;
7361                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7362
7363                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7364                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7365                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7366
7367                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7368                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7369                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7370                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7371
7372                 ipr_init_ioadl(ipr_cmd,
7373                                ioa_cfg->vpd_cbs_dma +
7374                                  offsetof(struct ipr_misc_cbs, supp_dev),
7375                                sizeof(struct ipr_supported_device),
7376                                IPR_IOADL_FLAGS_WRITE_LAST);
7377
7378                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7379                            IPR_SET_SUP_DEVICE_TIMEOUT);
7380
7381                 if (!ioa_cfg->sis64)
7382                         ipr_cmd->job_step = ipr_set_supported_devs;
7383                 LEAVE;
7384                 return IPR_RC_JOB_RETURN;
7385         }
7386
7387         LEAVE;
7388         return IPR_RC_JOB_CONTINUE;
7389 }
7390
7391 /**
7392  * ipr_get_mode_page - Locate specified mode page
7393  * @mode_pages: mode page buffer
7394  * @page_code:  page code to find
7395  * @len:                minimum required length for mode page
7396  *
7397  * Return value:
7398  *      pointer to mode page / NULL on failure
7399  **/
7400 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7401                                u32 page_code, u32 len)
7402 {
7403         struct ipr_mode_page_hdr *mode_hdr;
7404         u32 page_length;
7405         u32 length;
7406
7407         if (!mode_pages || (mode_pages->hdr.length == 0))
7408                 return NULL;
7409
7410         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7411         mode_hdr = (struct ipr_mode_page_hdr *)
7412                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7413
7414         while (length) {
7415                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7416                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7417                                 return mode_hdr;
7418                         break;
7419                 } else {
7420                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7421                                        mode_hdr->page_length);
7422                         length -= page_length;
7423                         mode_hdr = (struct ipr_mode_page_hdr *)
7424                                 ((unsigned long)mode_hdr + page_length);
7425                 }
7426         }
7427         return NULL;
7428 }
7429
7430 /**
7431  * ipr_check_term_power - Check for term power errors
7432  * @ioa_cfg:    ioa config struct
7433  * @mode_pages: IOAFP mode pages buffer
7434  *
7435  * Check the IOAFP's mode page 28 for term power errors
7436  *
7437  * Return value:
7438  *      nothing
7439  **/
7440 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7441                                  struct ipr_mode_pages *mode_pages)
7442 {
7443         int i;
7444         int entry_length;
7445         struct ipr_dev_bus_entry *bus;
7446         struct ipr_mode_page28 *mode_page;
7447
7448         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7449                                       sizeof(struct ipr_mode_page28));
7450
7451         entry_length = mode_page->entry_length;
7452
7453         bus = mode_page->bus;
7454
7455         for (i = 0; i < mode_page->num_entries; i++) {
7456                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7457                         dev_err(&ioa_cfg->pdev->dev,
7458                                 "Term power is absent on scsi bus %d\n",
7459                                 bus->res_addr.bus);
7460                 }
7461
7462                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7463         }
7464 }
7465
7466 /**
7467  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7468  * @ioa_cfg:    ioa config struct
7469  *
7470  * Looks through the config table checking for SES devices. If
7471  * the SES device is in the SES table indicating a maximum SCSI
7472  * bus speed, the speed is limited for the bus.
7473  *
7474  * Return value:
7475  *      none
7476  **/
7477 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7478 {
7479         u32 max_xfer_rate;
7480         int i;
7481
7482         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7483                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7484                                                        ioa_cfg->bus_attr[i].bus_width);
7485
7486                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7487                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7488         }
7489 }
7490
7491 /**
7492  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7493  * @ioa_cfg:    ioa config struct
7494  * @mode_pages: mode page 28 buffer
7495  *
7496  * Updates mode page 28 based on driver configuration
7497  *
7498  * Return value:
7499  *      none
7500  **/
7501 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7502                                           struct ipr_mode_pages *mode_pages)
7503 {
7504         int i, entry_length;
7505         struct ipr_dev_bus_entry *bus;
7506         struct ipr_bus_attributes *bus_attr;
7507         struct ipr_mode_page28 *mode_page;
7508
7509         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7510                                       sizeof(struct ipr_mode_page28));
7511
7512         entry_length = mode_page->entry_length;
7513
7514         /* Loop for each device bus entry */
7515         for (i = 0, bus = mode_page->bus;
7516              i < mode_page->num_entries;
7517              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7518                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7519                         dev_err(&ioa_cfg->pdev->dev,
7520                                 "Invalid resource address reported: 0x%08X\n",
7521                                 IPR_GET_PHYS_LOC(bus->res_addr));
7522                         continue;
7523                 }
7524
7525                 bus_attr = &ioa_cfg->bus_attr[i];
7526                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7527                 bus->bus_width = bus_attr->bus_width;
7528                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7529                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7530                 if (bus_attr->qas_enabled)
7531                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7532                 else
7533                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7534         }
7535 }
7536
7537 /**
7538  * ipr_build_mode_select - Build a mode select command
7539  * @ipr_cmd:    ipr command struct
7540  * @res_handle: resource handle to send command to
7541  * @parm:               Byte 2 of Mode Sense command
7542  * @dma_addr:   DMA buffer address
7543  * @xfer_len:   data transfer length
7544  *
7545  * Return value:
7546  *      none
7547  **/
7548 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7549                                   __be32 res_handle, u8 parm,
7550                                   dma_addr_t dma_addr, u8 xfer_len)
7551 {
7552         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7553
7554         ioarcb->res_handle = res_handle;
7555         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7556         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7557         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7558         ioarcb->cmd_pkt.cdb[1] = parm;
7559         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7560
7561         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7562 }
7563
7564 /**
7565  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7566  * @ipr_cmd:    ipr command struct
7567  *
7568  * This function sets up the SCSI bus attributes and sends
7569  * a Mode Select for Page 28 to activate them.
7570  *
7571  * Return value:
7572  *      IPR_RC_JOB_RETURN
7573  **/
7574 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7575 {
7576         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7577         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7578         int length;
7579
7580         ENTER;
7581         ipr_scsi_bus_speed_limit(ioa_cfg);
7582         ipr_check_term_power(ioa_cfg, mode_pages);
7583         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7584         length = mode_pages->hdr.length + 1;
7585         mode_pages->hdr.length = 0;
7586
7587         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7588                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7589                               length);
7590
7591         ipr_cmd->job_step = ipr_set_supported_devs;
7592         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7593                                     struct ipr_resource_entry, queue);
7594         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7595
7596         LEAVE;
7597         return IPR_RC_JOB_RETURN;
7598 }
7599
7600 /**
7601  * ipr_build_mode_sense - Builds a mode sense command
7602  * @ipr_cmd:    ipr command struct
7603  * @res:                resource entry struct
7604  * @parm:               Byte 2 of mode sense command
7605  * @dma_addr:   DMA address of mode sense buffer
7606  * @xfer_len:   Size of DMA buffer
7607  *
7608  * Return value:
7609  *      none
7610  **/
7611 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7612                                  __be32 res_handle,
7613                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7614 {
7615         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7616
7617         ioarcb->res_handle = res_handle;
7618         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7619         ioarcb->cmd_pkt.cdb[2] = parm;
7620         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7621         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7622
7623         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7624 }
7625
7626 /**
7627  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7628  * @ipr_cmd:    ipr command struct
7629  *
7630  * This function handles the failure of an IOA bringup command.
7631  *
7632  * Return value:
7633  *      IPR_RC_JOB_RETURN
7634  **/
7635 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7636 {
7637         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7638         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7639
7640         dev_err(&ioa_cfg->pdev->dev,
7641                 "0x%02X failed with IOASC: 0x%08X\n",
7642                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7643
7644         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7645         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7646         return IPR_RC_JOB_RETURN;
7647 }
7648
7649 /**
7650  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7651  * @ipr_cmd:    ipr command struct
7652  *
7653  * This function handles the failure of a Mode Sense to the IOAFP.
7654  * Some adapters do not handle all mode pages.
7655  *
7656  * Return value:
7657  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7658  **/
7659 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7660 {
7661         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7662         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7663
7664         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7665                 ipr_cmd->job_step = ipr_set_supported_devs;
7666                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7667                                             struct ipr_resource_entry, queue);
7668                 return IPR_RC_JOB_CONTINUE;
7669         }
7670
7671         return ipr_reset_cmd_failed(ipr_cmd);
7672 }
7673
7674 /**
7675  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7676  * @ipr_cmd:    ipr command struct
7677  *
7678  * This function send a Page 28 mode sense to the IOA to
7679  * retrieve SCSI bus attributes.
7680  *
7681  * Return value:
7682  *      IPR_RC_JOB_RETURN
7683  **/
7684 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7685 {
7686         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7687
7688         ENTER;
7689         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7690                              0x28, ioa_cfg->vpd_cbs_dma +
7691                              offsetof(struct ipr_misc_cbs, mode_pages),
7692                              sizeof(struct ipr_mode_pages));
7693
7694         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7695         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7696
7697         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7698
7699         LEAVE;
7700         return IPR_RC_JOB_RETURN;
7701 }
7702
7703 /**
7704  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7705  * @ipr_cmd:    ipr command struct
7706  *
7707  * This function enables dual IOA RAID support if possible.
7708  *
7709  * Return value:
7710  *      IPR_RC_JOB_RETURN
7711  **/
7712 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7713 {
7714         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7715         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7716         struct ipr_mode_page24 *mode_page;
7717         int length;
7718
7719         ENTER;
7720         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7721                                       sizeof(struct ipr_mode_page24));
7722
7723         if (mode_page)
7724                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7725
7726         length = mode_pages->hdr.length + 1;
7727         mode_pages->hdr.length = 0;
7728
7729         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7730                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7731                               length);
7732
7733         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7734         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7735
7736         LEAVE;
7737         return IPR_RC_JOB_RETURN;
7738 }
7739
7740 /**
7741  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7742  * @ipr_cmd:    ipr command struct
7743  *
7744  * This function handles the failure of a Mode Sense to the IOAFP.
7745  * Some adapters do not handle all mode pages.
7746  *
7747  * Return value:
7748  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7749  **/
7750 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7751 {
7752         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7753
7754         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7755                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7756                 return IPR_RC_JOB_CONTINUE;
7757         }
7758
7759         return ipr_reset_cmd_failed(ipr_cmd);
7760 }
7761
7762 /**
7763  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7764  * @ipr_cmd:    ipr command struct
7765  *
7766  * This function send a mode sense to the IOA to retrieve
7767  * the IOA Advanced Function Control mode page.
7768  *
7769  * Return value:
7770  *      IPR_RC_JOB_RETURN
7771  **/
7772 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7773 {
7774         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7775
7776         ENTER;
7777         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7778                              0x24, ioa_cfg->vpd_cbs_dma +
7779                              offsetof(struct ipr_misc_cbs, mode_pages),
7780                              sizeof(struct ipr_mode_pages));
7781
7782         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7783         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7784
7785         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7786
7787         LEAVE;
7788         return IPR_RC_JOB_RETURN;
7789 }
7790
7791 /**
7792  * ipr_init_res_table - Initialize the resource table
7793  * @ipr_cmd:    ipr command struct
7794  *
7795  * This function looks through the existing resource table, comparing
7796  * it with the config table. This function will take care of old/new
7797  * devices and schedule adding/removing them from the mid-layer
7798  * as appropriate.
7799  *
7800  * Return value:
7801  *      IPR_RC_JOB_CONTINUE
7802  **/
7803 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7804 {
7805         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7806         struct ipr_resource_entry *res, *temp;
7807         struct ipr_config_table_entry_wrapper cfgtew;
7808         int entries, found, flag, i;
7809         LIST_HEAD(old_res);
7810
7811         ENTER;
7812         if (ioa_cfg->sis64)
7813                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7814         else
7815                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7816
7817         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7818                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7819
7820         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7821                 list_move_tail(&res->queue, &old_res);
7822
7823         if (ioa_cfg->sis64)
7824                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7825         else
7826                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7827
7828         for (i = 0; i < entries; i++) {
7829                 if (ioa_cfg->sis64)
7830                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7831                 else
7832                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7833                 found = 0;
7834
7835                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7836                         if (ipr_is_same_device(res, &cfgtew)) {
7837                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7838                                 found = 1;
7839                                 break;
7840                         }
7841                 }
7842
7843                 if (!found) {
7844                         if (list_empty(&ioa_cfg->free_res_q)) {
7845                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7846                                 break;
7847                         }
7848
7849                         found = 1;
7850                         res = list_entry(ioa_cfg->free_res_q.next,
7851                                          struct ipr_resource_entry, queue);
7852                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7853                         ipr_init_res_entry(res, &cfgtew);
7854                         res->add_to_ml = 1;
7855                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7856                         res->sdev->allow_restart = 1;
7857
7858                 if (found)
7859                         ipr_update_res_entry(res, &cfgtew);
7860         }
7861
7862         list_for_each_entry_safe(res, temp, &old_res, queue) {
7863                 if (res->sdev) {
7864                         res->del_from_ml = 1;
7865                         res->res_handle = IPR_INVALID_RES_HANDLE;
7866                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7867                 }
7868         }
7869
7870         list_for_each_entry_safe(res, temp, &old_res, queue) {
7871                 ipr_clear_res_target(res);
7872                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7873         }
7874
7875         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7876                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7877         else
7878                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7879
7880         LEAVE;
7881         return IPR_RC_JOB_CONTINUE;
7882 }
7883
7884 /**
7885  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7886  * @ipr_cmd:    ipr command struct
7887  *
7888  * This function sends a Query IOA Configuration command
7889  * to the adapter to retrieve the IOA configuration table.
7890  *
7891  * Return value:
7892  *      IPR_RC_JOB_RETURN
7893  **/
7894 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7895 {
7896         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7897         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7898         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7899         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7900
7901         ENTER;
7902         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7903                 ioa_cfg->dual_raid = 1;
7904         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7905                  ucode_vpd->major_release, ucode_vpd->card_type,
7906                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7907         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7908         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7909
7910         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7911         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7912         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7913         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7914
7915         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7916                        IPR_IOADL_FLAGS_READ_LAST);
7917
7918         ipr_cmd->job_step = ipr_init_res_table;
7919
7920         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7921
7922         LEAVE;
7923         return IPR_RC_JOB_RETURN;
7924 }
7925
7926 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7927 {
7928         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7929
7930         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7931                 return IPR_RC_JOB_CONTINUE;
7932
7933         return ipr_reset_cmd_failed(ipr_cmd);
7934 }
7935
7936 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7937                                          __be32 res_handle, u8 sa_code)
7938 {
7939         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7940
7941         ioarcb->res_handle = res_handle;
7942         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7943         ioarcb->cmd_pkt.cdb[1] = sa_code;
7944         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7945 }
7946
7947 /**
7948  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7949  * action
7950  *
7951  * Return value:
7952  *      none
7953  **/
7954 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7955 {
7956         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7957         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7958         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7959
7960         ENTER;
7961
7962         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7963
7964         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7965                 ipr_build_ioa_service_action(ipr_cmd,
7966                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7967                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7968
7969                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7970
7971                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7972                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7973                            IPR_SET_SUP_DEVICE_TIMEOUT);
7974
7975                 LEAVE;
7976                 return IPR_RC_JOB_RETURN;
7977         }
7978
7979         LEAVE;
7980         return IPR_RC_JOB_CONTINUE;
7981 }
7982
7983 /**
7984  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7985  * @ipr_cmd:    ipr command struct
7986  *
7987  * This utility function sends an inquiry to the adapter.
7988  *
7989  * Return value:
7990  *      none
7991  **/
7992 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7993                               dma_addr_t dma_addr, u8 xfer_len)
7994 {
7995         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7996
7997         ENTER;
7998         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7999         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8000
8001         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8002         ioarcb->cmd_pkt.cdb[1] = flags;
8003         ioarcb->cmd_pkt.cdb[2] = page;
8004         ioarcb->cmd_pkt.cdb[4] = xfer_len;
8005
8006         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
8007
8008         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8009         LEAVE;
8010 }
8011
8012 /**
8013  * ipr_inquiry_page_supported - Is the given inquiry page supported
8014  * @page0:              inquiry page 0 buffer
8015  * @page:               page code.
8016  *
8017  * This function determines if the specified inquiry page is supported.
8018  *
8019  * Return value:
8020  *      1 if page is supported / 0 if not
8021  **/
8022 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8023 {
8024         int i;
8025
8026         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8027                 if (page0->page[i] == page)
8028                         return 1;
8029
8030         return 0;
8031 }
8032
8033 /**
8034  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8035  * @ipr_cmd:    ipr command struct
8036  *
8037  * This function sends a Page 0xC4 inquiry to the adapter
8038  * to retrieve software VPD information.
8039  *
8040  * Return value:
8041  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8042  **/
8043 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8044 {
8045         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8046         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8047         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8048
8049         ENTER;
8050         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8051         memset(pageC4, 0, sizeof(*pageC4));
8052
8053         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8054                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8055                                   (ioa_cfg->vpd_cbs_dma
8056                                    + offsetof(struct ipr_misc_cbs,
8057                                               pageC4_data)),
8058                                   sizeof(struct ipr_inquiry_pageC4));
8059                 return IPR_RC_JOB_RETURN;
8060         }
8061
8062         LEAVE;
8063         return IPR_RC_JOB_CONTINUE;
8064 }
8065
8066 /**
8067  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8068  * @ipr_cmd:    ipr command struct
8069  *
8070  * This function sends a Page 0xD0 inquiry to the adapter
8071  * to retrieve adapter capabilities.
8072  *
8073  * Return value:
8074  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8075  **/
8076 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8077 {
8078         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8079         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8080         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8081
8082         ENTER;
8083         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8084         memset(cap, 0, sizeof(*cap));
8085
8086         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8087                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8088                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8089                                   sizeof(struct ipr_inquiry_cap));
8090                 return IPR_RC_JOB_RETURN;
8091         }
8092
8093         LEAVE;
8094         return IPR_RC_JOB_CONTINUE;
8095 }
8096
8097 /**
8098  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8099  * @ipr_cmd:    ipr command struct
8100  *
8101  * This function sends a Page 3 inquiry to the adapter
8102  * to retrieve software VPD information.
8103  *
8104  * Return value:
8105  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8106  **/
8107 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8108 {
8109         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8110
8111         ENTER;
8112
8113         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8114
8115         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8116                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8117                           sizeof(struct ipr_inquiry_page3));
8118
8119         LEAVE;
8120         return IPR_RC_JOB_RETURN;
8121 }
8122
8123 /**
8124  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8125  * @ipr_cmd:    ipr command struct
8126  *
8127  * This function sends a Page 0 inquiry to the adapter
8128  * to retrieve supported inquiry pages.
8129  *
8130  * Return value:
8131  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8132  **/
8133 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8134 {
8135         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8136         char type[5];
8137
8138         ENTER;
8139
8140         /* Grab the type out of the VPD and store it away */
8141         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8142         type[4] = '\0';
8143         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8144
8145         if (ipr_invalid_adapter(ioa_cfg)) {
8146                 dev_err(&ioa_cfg->pdev->dev,
8147                         "Adapter not supported in this hardware configuration.\n");
8148
8149                 if (!ipr_testmode) {
8150                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8151                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8152                         list_add_tail(&ipr_cmd->queue,
8153                                         &ioa_cfg->hrrq->hrrq_free_q);
8154                         return IPR_RC_JOB_RETURN;
8155                 }
8156         }
8157
8158         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8159
8160         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8161                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8162                           sizeof(struct ipr_inquiry_page0));
8163
8164         LEAVE;
8165         return IPR_RC_JOB_RETURN;
8166 }
8167
8168 /**
8169  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8170  * @ipr_cmd:    ipr command struct
8171  *
8172  * This function sends a standard inquiry to the adapter.
8173  *
8174  * Return value:
8175  *      IPR_RC_JOB_RETURN
8176  **/
8177 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8178 {
8179         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8180
8181         ENTER;
8182         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8183
8184         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8185                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8186                           sizeof(struct ipr_ioa_vpd));
8187
8188         LEAVE;
8189         return IPR_RC_JOB_RETURN;
8190 }
8191
8192 /**
8193  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8194  * @ipr_cmd:    ipr command struct
8195  *
8196  * This function send an Identify Host Request Response Queue
8197  * command to establish the HRRQ with the adapter.
8198  *
8199  * Return value:
8200  *      IPR_RC_JOB_RETURN
8201  **/
8202 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8203 {
8204         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8205         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8206         struct ipr_hrr_queue *hrrq;
8207
8208         ENTER;
8209         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8210         if (ioa_cfg->identify_hrrq_index == 0)
8211                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8212
8213         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8214                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8215
8216                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8217                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8218
8219                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8220                 if (ioa_cfg->sis64)
8221                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8222
8223                 if (ioa_cfg->nvectors == 1)
8224                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8225                 else
8226                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8227
8228                 ioarcb->cmd_pkt.cdb[2] =
8229                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8230                 ioarcb->cmd_pkt.cdb[3] =
8231                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8232                 ioarcb->cmd_pkt.cdb[4] =
8233                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8234                 ioarcb->cmd_pkt.cdb[5] =
8235                         ((u64) hrrq->host_rrq_dma) & 0xff;
8236                 ioarcb->cmd_pkt.cdb[7] =
8237                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8238                 ioarcb->cmd_pkt.cdb[8] =
8239                         (sizeof(u32) * hrrq->size) & 0xff;
8240
8241                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8242                         ioarcb->cmd_pkt.cdb[9] =
8243                                         ioa_cfg->identify_hrrq_index;
8244
8245                 if (ioa_cfg->sis64) {
8246                         ioarcb->cmd_pkt.cdb[10] =
8247                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8248                         ioarcb->cmd_pkt.cdb[11] =
8249                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8250                         ioarcb->cmd_pkt.cdb[12] =
8251                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8252                         ioarcb->cmd_pkt.cdb[13] =
8253                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8254                 }
8255
8256                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8257                         ioarcb->cmd_pkt.cdb[14] =
8258                                         ioa_cfg->identify_hrrq_index;
8259
8260                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8261                            IPR_INTERNAL_TIMEOUT);
8262
8263                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8264                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8265
8266                 LEAVE;
8267                 return IPR_RC_JOB_RETURN;
8268         }
8269
8270         LEAVE;
8271         return IPR_RC_JOB_CONTINUE;
8272 }
8273
8274 /**
8275  * ipr_reset_timer_done - Adapter reset timer function
8276  * @ipr_cmd:    ipr command struct
8277  *
8278  * Description: This function is used in adapter reset processing
8279  * for timing events. If the reset_cmd pointer in the IOA
8280  * config struct is not this adapter's we are doing nested
8281  * resets and fail_all_ops will take care of freeing the
8282  * command block.
8283  *
8284  * Return value:
8285  *      none
8286  **/
8287 static void ipr_reset_timer_done(struct timer_list *t)
8288 {
8289         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8290         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8291         unsigned long lock_flags = 0;
8292
8293         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8294
8295         if (ioa_cfg->reset_cmd == ipr_cmd) {
8296                 list_del(&ipr_cmd->queue);
8297                 ipr_cmd->done(ipr_cmd);
8298         }
8299
8300         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8301 }
8302
8303 /**
8304  * ipr_reset_start_timer - Start a timer for adapter reset job
8305  * @ipr_cmd:    ipr command struct
8306  * @timeout:    timeout value
8307  *
8308  * Description: This function is used in adapter reset processing
8309  * for timing events. If the reset_cmd pointer in the IOA
8310  * config struct is not this adapter's we are doing nested
8311  * resets and fail_all_ops will take care of freeing the
8312  * command block.
8313  *
8314  * Return value:
8315  *      none
8316  **/
8317 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8318                                   unsigned long timeout)
8319 {
8320
8321         ENTER;
8322         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8323         ipr_cmd->done = ipr_reset_ioa_job;
8324
8325         ipr_cmd->timer.expires = jiffies + timeout;
8326         ipr_cmd->timer.function = ipr_reset_timer_done;
8327         add_timer(&ipr_cmd->timer);
8328 }
8329
8330 /**
8331  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8332  * @ioa_cfg:    ioa cfg struct
8333  *
8334  * Return value:
8335  *      nothing
8336  **/
8337 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8338 {
8339         struct ipr_hrr_queue *hrrq;
8340
8341         for_each_hrrq(hrrq, ioa_cfg) {
8342                 spin_lock(&hrrq->_lock);
8343                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8344
8345                 /* Initialize Host RRQ pointers */
8346                 hrrq->hrrq_start = hrrq->host_rrq;
8347                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8348                 hrrq->hrrq_curr = hrrq->hrrq_start;
8349                 hrrq->toggle_bit = 1;
8350                 spin_unlock(&hrrq->_lock);
8351         }
8352         wmb();
8353
8354         ioa_cfg->identify_hrrq_index = 0;
8355         if (ioa_cfg->hrrq_num == 1)
8356                 atomic_set(&ioa_cfg->hrrq_index, 0);
8357         else
8358                 atomic_set(&ioa_cfg->hrrq_index, 1);
8359
8360         /* Zero out config table */
8361         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8362 }
8363
8364 /**
8365  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8366  * @ipr_cmd:    ipr command struct
8367  *
8368  * Return value:
8369  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8370  **/
8371 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8372 {
8373         unsigned long stage, stage_time;
8374         u32 feedback;
8375         volatile u32 int_reg;
8376         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8377         u64 maskval = 0;
8378
8379         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8380         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8381         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8382
8383         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8384
8385         /* sanity check the stage_time value */
8386         if (stage_time == 0)
8387                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8388         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8389                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8390         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8391                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8392
8393         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8394                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8395                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8396                 stage_time = ioa_cfg->transop_timeout;
8397                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8398         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8399                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8400                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8401                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8402                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8403                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8404                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8405                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8406                         return IPR_RC_JOB_CONTINUE;
8407                 }
8408         }
8409
8410         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8411         ipr_cmd->timer.function = ipr_oper_timeout;
8412         ipr_cmd->done = ipr_reset_ioa_job;
8413         add_timer(&ipr_cmd->timer);
8414
8415         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8416
8417         return IPR_RC_JOB_RETURN;
8418 }
8419
8420 /**
8421  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8422  * @ipr_cmd:    ipr command struct
8423  *
8424  * This function reinitializes some control blocks and
8425  * enables destructive diagnostics on the adapter.
8426  *
8427  * Return value:
8428  *      IPR_RC_JOB_RETURN
8429  **/
8430 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8431 {
8432         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8433         volatile u32 int_reg;
8434         volatile u64 maskval;
8435         int i;
8436
8437         ENTER;
8438         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8439         ipr_init_ioa_mem(ioa_cfg);
8440
8441         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8442                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8443                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8444                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8445         }
8446         if (ioa_cfg->sis64) {
8447                 /* Set the adapter to the correct endian mode. */
8448                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8449                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8450         }
8451
8452         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8453
8454         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8455                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8456                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8457                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8458                 return IPR_RC_JOB_CONTINUE;
8459         }
8460
8461         /* Enable destructive diagnostics on IOA */
8462         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8463
8464         if (ioa_cfg->sis64) {
8465                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8466                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8467                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8468         } else
8469                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8470
8471         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8472
8473         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8474
8475         if (ioa_cfg->sis64) {
8476                 ipr_cmd->job_step = ipr_reset_next_stage;
8477                 return IPR_RC_JOB_CONTINUE;
8478         }
8479
8480         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8481         ipr_cmd->timer.function = ipr_oper_timeout;
8482         ipr_cmd->done = ipr_reset_ioa_job;
8483         add_timer(&ipr_cmd->timer);
8484         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8485
8486         LEAVE;
8487         return IPR_RC_JOB_RETURN;
8488 }
8489
8490 /**
8491  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8492  * @ipr_cmd:    ipr command struct
8493  *
8494  * This function is invoked when an adapter dump has run out
8495  * of processing time.
8496  *
8497  * Return value:
8498  *      IPR_RC_JOB_CONTINUE
8499  **/
8500 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8501 {
8502         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8503
8504         if (ioa_cfg->sdt_state == GET_DUMP)
8505                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8506         else if (ioa_cfg->sdt_state == READ_DUMP)
8507                 ioa_cfg->sdt_state = ABORT_DUMP;
8508
8509         ioa_cfg->dump_timeout = 1;
8510         ipr_cmd->job_step = ipr_reset_alert;
8511
8512         return IPR_RC_JOB_CONTINUE;
8513 }
8514
8515 /**
8516  * ipr_unit_check_no_data - Log a unit check/no data error log
8517  * @ioa_cfg:            ioa config struct
8518  *
8519  * Logs an error indicating the adapter unit checked, but for some
8520  * reason, we were unable to fetch the unit check buffer.
8521  *
8522  * Return value:
8523  *      nothing
8524  **/
8525 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8526 {
8527         ioa_cfg->errors_logged++;
8528         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8529 }
8530
8531 /**
8532  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8533  * @ioa_cfg:            ioa config struct
8534  *
8535  * Fetches the unit check buffer from the adapter by clocking the data
8536  * through the mailbox register.
8537  *
8538  * Return value:
8539  *      nothing
8540  **/
8541 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8542 {
8543         unsigned long mailbox;
8544         struct ipr_hostrcb *hostrcb;
8545         struct ipr_uc_sdt sdt;
8546         int rc, length;
8547         u32 ioasc;
8548
8549         mailbox = readl(ioa_cfg->ioa_mailbox);
8550
8551         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8552                 ipr_unit_check_no_data(ioa_cfg);
8553                 return;
8554         }
8555
8556         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8557         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8558                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8559
8560         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8561             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8562             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8563                 ipr_unit_check_no_data(ioa_cfg);
8564                 return;
8565         }
8566
8567         /* Find length of the first sdt entry (UC buffer) */
8568         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8569                 length = be32_to_cpu(sdt.entry[0].end_token);
8570         else
8571                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8572                           be32_to_cpu(sdt.entry[0].start_token)) &
8573                           IPR_FMT2_MBX_ADDR_MASK;
8574
8575         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8576                              struct ipr_hostrcb, queue);
8577         list_del_init(&hostrcb->queue);
8578         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8579
8580         rc = ipr_get_ldump_data_section(ioa_cfg,
8581                                         be32_to_cpu(sdt.entry[0].start_token),
8582                                         (__be32 *)&hostrcb->hcam,
8583                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8584
8585         if (!rc) {
8586                 ipr_handle_log_data(ioa_cfg, hostrcb);
8587                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8588                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8589                     ioa_cfg->sdt_state == GET_DUMP)
8590                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8591         } else
8592                 ipr_unit_check_no_data(ioa_cfg);
8593
8594         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8595 }
8596
8597 /**
8598  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8599  * @ipr_cmd:    ipr command struct
8600  *
8601  * Description: This function will call to get the unit check buffer.
8602  *
8603  * Return value:
8604  *      IPR_RC_JOB_RETURN
8605  **/
8606 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8607 {
8608         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8609
8610         ENTER;
8611         ioa_cfg->ioa_unit_checked = 0;
8612         ipr_get_unit_check_buffer(ioa_cfg);
8613         ipr_cmd->job_step = ipr_reset_alert;
8614         ipr_reset_start_timer(ipr_cmd, 0);
8615
8616         LEAVE;
8617         return IPR_RC_JOB_RETURN;
8618 }
8619
8620 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8621 {
8622         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8623
8624         ENTER;
8625
8626         if (ioa_cfg->sdt_state != GET_DUMP)
8627                 return IPR_RC_JOB_RETURN;
8628
8629         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8630             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8631              IPR_PCII_MAILBOX_STABLE)) {
8632
8633                 if (!ipr_cmd->u.time_left)
8634                         dev_err(&ioa_cfg->pdev->dev,
8635                                 "Timed out waiting for Mailbox register.\n");
8636
8637                 ioa_cfg->sdt_state = READ_DUMP;
8638                 ioa_cfg->dump_timeout = 0;
8639                 if (ioa_cfg->sis64)
8640                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8641                 else
8642                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8643                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8644                 schedule_work(&ioa_cfg->work_q);
8645
8646         } else {
8647                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8648                 ipr_reset_start_timer(ipr_cmd,
8649                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8650         }
8651
8652         LEAVE;
8653         return IPR_RC_JOB_RETURN;
8654 }
8655
8656 /**
8657  * ipr_reset_restore_cfg_space - Restore PCI config space.
8658  * @ipr_cmd:    ipr command struct
8659  *
8660  * Description: This function restores the saved PCI config space of
8661  * the adapter, fails all outstanding ops back to the callers, and
8662  * fetches the dump/unit check if applicable to this reset.
8663  *
8664  * Return value:
8665  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8666  **/
8667 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8668 {
8669         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8670         u32 int_reg;
8671
8672         ENTER;
8673         ioa_cfg->pdev->state_saved = true;
8674         pci_restore_state(ioa_cfg->pdev);
8675
8676         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8677                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8678                 return IPR_RC_JOB_CONTINUE;
8679         }
8680
8681         ipr_fail_all_ops(ioa_cfg);
8682
8683         if (ioa_cfg->sis64) {
8684                 /* Set the adapter to the correct endian mode. */
8685                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8686                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8687         }
8688
8689         if (ioa_cfg->ioa_unit_checked) {
8690                 if (ioa_cfg->sis64) {
8691                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8692                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8693                         return IPR_RC_JOB_RETURN;
8694                 } else {
8695                         ioa_cfg->ioa_unit_checked = 0;
8696                         ipr_get_unit_check_buffer(ioa_cfg);
8697                         ipr_cmd->job_step = ipr_reset_alert;
8698                         ipr_reset_start_timer(ipr_cmd, 0);
8699                         return IPR_RC_JOB_RETURN;
8700                 }
8701         }
8702
8703         if (ioa_cfg->in_ioa_bringdown) {
8704                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8705         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8706                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8707                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8708         } else {
8709                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8710         }
8711
8712         LEAVE;
8713         return IPR_RC_JOB_CONTINUE;
8714 }
8715
8716 /**
8717  * ipr_reset_bist_done - BIST has completed on the adapter.
8718  * @ipr_cmd:    ipr command struct
8719  *
8720  * Description: Unblock config space and resume the reset process.
8721  *
8722  * Return value:
8723  *      IPR_RC_JOB_CONTINUE
8724  **/
8725 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8726 {
8727         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8728
8729         ENTER;
8730         if (ioa_cfg->cfg_locked)
8731                 pci_cfg_access_unlock(ioa_cfg->pdev);
8732         ioa_cfg->cfg_locked = 0;
8733         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8734         LEAVE;
8735         return IPR_RC_JOB_CONTINUE;
8736 }
8737
8738 /**
8739  * ipr_reset_start_bist - Run BIST on the adapter.
8740  * @ipr_cmd:    ipr command struct
8741  *
8742  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8743  *
8744  * Return value:
8745  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8746  **/
8747 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8748 {
8749         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8750         int rc = PCIBIOS_SUCCESSFUL;
8751
8752         ENTER;
8753         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8754                 writel(IPR_UPROCI_SIS64_START_BIST,
8755                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8756         else
8757                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8758
8759         if (rc == PCIBIOS_SUCCESSFUL) {
8760                 ipr_cmd->job_step = ipr_reset_bist_done;
8761                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8762                 rc = IPR_RC_JOB_RETURN;
8763         } else {
8764                 if (ioa_cfg->cfg_locked)
8765                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8766                 ioa_cfg->cfg_locked = 0;
8767                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8768                 rc = IPR_RC_JOB_CONTINUE;
8769         }
8770
8771         LEAVE;
8772         return rc;
8773 }
8774
8775 /**
8776  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8777  * @ipr_cmd:    ipr command struct
8778  *
8779  * Description: This clears PCI reset to the adapter and delays two seconds.
8780  *
8781  * Return value:
8782  *      IPR_RC_JOB_RETURN
8783  **/
8784 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8785 {
8786         ENTER;
8787         ipr_cmd->job_step = ipr_reset_bist_done;
8788         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8789         LEAVE;
8790         return IPR_RC_JOB_RETURN;
8791 }
8792
8793 /**
8794  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8795  * @work:       work struct
8796  *
8797  * Description: This pulses warm reset to a slot.
8798  *
8799  **/
8800 static void ipr_reset_reset_work(struct work_struct *work)
8801 {
8802         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8803         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8804         struct pci_dev *pdev = ioa_cfg->pdev;
8805         unsigned long lock_flags = 0;
8806
8807         ENTER;
8808         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8809         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8810         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8811
8812         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8813         if (ioa_cfg->reset_cmd == ipr_cmd)
8814                 ipr_reset_ioa_job(ipr_cmd);
8815         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8816         LEAVE;
8817 }
8818
8819 /**
8820  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8821  * @ipr_cmd:    ipr command struct
8822  *
8823  * Description: This asserts PCI reset to the adapter.
8824  *
8825  * Return value:
8826  *      IPR_RC_JOB_RETURN
8827  **/
8828 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8829 {
8830         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8831
8832         ENTER;
8833         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8834         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8835         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8836         LEAVE;
8837         return IPR_RC_JOB_RETURN;
8838 }
8839
8840 /**
8841  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8842  * @ipr_cmd:    ipr command struct
8843  *
8844  * Description: This attempts to block config access to the IOA.
8845  *
8846  * Return value:
8847  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8848  **/
8849 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8850 {
8851         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8852         int rc = IPR_RC_JOB_CONTINUE;
8853
8854         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8855                 ioa_cfg->cfg_locked = 1;
8856                 ipr_cmd->job_step = ioa_cfg->reset;
8857         } else {
8858                 if (ipr_cmd->u.time_left) {
8859                         rc = IPR_RC_JOB_RETURN;
8860                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8861                         ipr_reset_start_timer(ipr_cmd,
8862                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8863                 } else {
8864                         ipr_cmd->job_step = ioa_cfg->reset;
8865                         dev_err(&ioa_cfg->pdev->dev,
8866                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8867                 }
8868         }
8869
8870         return rc;
8871 }
8872
8873 /**
8874  * ipr_reset_block_config_access - Block config access to the IOA
8875  * @ipr_cmd:    ipr command struct
8876  *
8877  * Description: This attempts to block config access to the IOA
8878  *
8879  * Return value:
8880  *      IPR_RC_JOB_CONTINUE
8881  **/
8882 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8883 {
8884         ipr_cmd->ioa_cfg->cfg_locked = 0;
8885         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8886         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8887         return IPR_RC_JOB_CONTINUE;
8888 }
8889
8890 /**
8891  * ipr_reset_allowed - Query whether or not IOA can be reset
8892  * @ioa_cfg:    ioa config struct
8893  *
8894  * Return value:
8895  *      0 if reset not allowed / non-zero if reset is allowed
8896  **/
8897 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8898 {
8899         volatile u32 temp_reg;
8900
8901         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8902         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8903 }
8904
8905 /**
8906  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8907  * @ipr_cmd:    ipr command struct
8908  *
8909  * Description: This function waits for adapter permission to run BIST,
8910  * then runs BIST. If the adapter does not give permission after a
8911  * reasonable time, we will reset the adapter anyway. The impact of
8912  * resetting the adapter without warning the adapter is the risk of
8913  * losing the persistent error log on the adapter. If the adapter is
8914  * reset while it is writing to the flash on the adapter, the flash
8915  * segment will have bad ECC and be zeroed.
8916  *
8917  * Return value:
8918  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8919  **/
8920 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8921 {
8922         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8923         int rc = IPR_RC_JOB_RETURN;
8924
8925         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8926                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8927                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8928         } else {
8929                 ipr_cmd->job_step = ipr_reset_block_config_access;
8930                 rc = IPR_RC_JOB_CONTINUE;
8931         }
8932
8933         return rc;
8934 }
8935
8936 /**
8937  * ipr_reset_alert - Alert the adapter of a pending reset
8938  * @ipr_cmd:    ipr command struct
8939  *
8940  * Description: This function alerts the adapter that it will be reset.
8941  * If memory space is not currently enabled, proceed directly
8942  * to running BIST on the adapter. The timer must always be started
8943  * so we guarantee we do not run BIST from ipr_isr.
8944  *
8945  * Return value:
8946  *      IPR_RC_JOB_RETURN
8947  **/
8948 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8949 {
8950         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8951         u16 cmd_reg;
8952         int rc;
8953
8954         ENTER;
8955         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8956
8957         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8958                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8959                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8960                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8961         } else {
8962                 ipr_cmd->job_step = ipr_reset_block_config_access;
8963         }
8964
8965         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8966         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8967
8968         LEAVE;
8969         return IPR_RC_JOB_RETURN;
8970 }
8971
8972 /**
8973  * ipr_reset_quiesce_done - Complete IOA disconnect
8974  * @ipr_cmd:    ipr command struct
8975  *
8976  * Description: Freeze the adapter to complete quiesce processing
8977  *
8978  * Return value:
8979  *      IPR_RC_JOB_CONTINUE
8980  **/
8981 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8982 {
8983         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8984
8985         ENTER;
8986         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8987         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8988         LEAVE;
8989         return IPR_RC_JOB_CONTINUE;
8990 }
8991
8992 /**
8993  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8994  * @ipr_cmd:    ipr command struct
8995  *
8996  * Description: Ensure nothing is outstanding to the IOA and
8997  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8998  *
8999  * Return value:
9000  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9001  **/
9002 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9003 {
9004         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9005         struct ipr_cmnd *loop_cmd;
9006         struct ipr_hrr_queue *hrrq;
9007         int rc = IPR_RC_JOB_CONTINUE;
9008         int count = 0;
9009
9010         ENTER;
9011         ipr_cmd->job_step = ipr_reset_quiesce_done;
9012
9013         for_each_hrrq(hrrq, ioa_cfg) {
9014                 spin_lock(&hrrq->_lock);
9015                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9016                         count++;
9017                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9018                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9019                         rc = IPR_RC_JOB_RETURN;
9020                         break;
9021                 }
9022                 spin_unlock(&hrrq->_lock);
9023
9024                 if (count)
9025                         break;
9026         }
9027
9028         LEAVE;
9029         return rc;
9030 }
9031
9032 /**
9033  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9034  * @ipr_cmd:    ipr command struct
9035  *
9036  * Description: Cancel any oustanding HCAMs to the IOA.
9037  *
9038  * Return value:
9039  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9040  **/
9041 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9042 {
9043         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9044         int rc = IPR_RC_JOB_CONTINUE;
9045         struct ipr_cmd_pkt *cmd_pkt;
9046         struct ipr_cmnd *hcam_cmd;
9047         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9048
9049         ENTER;
9050         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9051
9052         if (!hrrq->ioa_is_dead) {
9053                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9054                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9055                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9056                                         continue;
9057
9058                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9059                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9060                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9061                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9062                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9063                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9064                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9065                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9066                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9067                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9068                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9069                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9070                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9071                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9072
9073                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9074                                            IPR_CANCEL_TIMEOUT);
9075
9076                                 rc = IPR_RC_JOB_RETURN;
9077                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9078                                 break;
9079                         }
9080                 }
9081         } else
9082                 ipr_cmd->job_step = ipr_reset_alert;
9083
9084         LEAVE;
9085         return rc;
9086 }
9087
9088 /**
9089  * ipr_reset_ucode_download_done - Microcode download completion
9090  * @ipr_cmd:    ipr command struct
9091  *
9092  * Description: This function unmaps the microcode download buffer.
9093  *
9094  * Return value:
9095  *      IPR_RC_JOB_CONTINUE
9096  **/
9097 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9098 {
9099         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9100         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9101
9102         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9103                      sglist->num_sg, DMA_TO_DEVICE);
9104
9105         ipr_cmd->job_step = ipr_reset_alert;
9106         return IPR_RC_JOB_CONTINUE;
9107 }
9108
9109 /**
9110  * ipr_reset_ucode_download - Download microcode to the adapter
9111  * @ipr_cmd:    ipr command struct
9112  *
9113  * Description: This function checks to see if it there is microcode
9114  * to download to the adapter. If there is, a download is performed.
9115  *
9116  * Return value:
9117  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9118  **/
9119 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9120 {
9121         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9122         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9123
9124         ENTER;
9125         ipr_cmd->job_step = ipr_reset_alert;
9126
9127         if (!sglist)
9128                 return IPR_RC_JOB_CONTINUE;
9129
9130         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9131         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9132         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9133         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9134         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9135         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9136         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9137
9138         if (ioa_cfg->sis64)
9139                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9140         else
9141                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9142         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9143
9144         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9145                    IPR_WRITE_BUFFER_TIMEOUT);
9146
9147         LEAVE;
9148         return IPR_RC_JOB_RETURN;
9149 }
9150
9151 /**
9152  * ipr_reset_shutdown_ioa - Shutdown the adapter
9153  * @ipr_cmd:    ipr command struct
9154  *
9155  * Description: This function issues an adapter shutdown of the
9156  * specified type to the specified adapter as part of the
9157  * adapter reset job.
9158  *
9159  * Return value:
9160  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9161  **/
9162 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9163 {
9164         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9165         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9166         unsigned long timeout;
9167         int rc = IPR_RC_JOB_CONTINUE;
9168
9169         ENTER;
9170         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9171                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9172         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9173                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9174                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9175                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9176                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9177                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9178
9179                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9180                         timeout = IPR_SHUTDOWN_TIMEOUT;
9181                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9182                         timeout = IPR_INTERNAL_TIMEOUT;
9183                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9184                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9185                 else
9186                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9187
9188                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9189
9190                 rc = IPR_RC_JOB_RETURN;
9191                 ipr_cmd->job_step = ipr_reset_ucode_download;
9192         } else
9193                 ipr_cmd->job_step = ipr_reset_alert;
9194
9195         LEAVE;
9196         return rc;
9197 }
9198
9199 /**
9200  * ipr_reset_ioa_job - Adapter reset job
9201  * @ipr_cmd:    ipr command struct
9202  *
9203  * Description: This function is the job router for the adapter reset job.
9204  *
9205  * Return value:
9206  *      none
9207  **/
9208 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9209 {
9210         u32 rc, ioasc;
9211         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9212
9213         do {
9214                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9215
9216                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9217                         /*
9218                          * We are doing nested adapter resets and this is
9219                          * not the current reset job.
9220                          */
9221                         list_add_tail(&ipr_cmd->queue,
9222                                         &ipr_cmd->hrrq->hrrq_free_q);
9223                         return;
9224                 }
9225
9226                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9227                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9228                         if (rc == IPR_RC_JOB_RETURN)
9229                                 return;
9230                 }
9231
9232                 ipr_reinit_ipr_cmnd(ipr_cmd);
9233                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9234                 rc = ipr_cmd->job_step(ipr_cmd);
9235         } while (rc == IPR_RC_JOB_CONTINUE);
9236 }
9237
9238 /**
9239  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9240  * @ioa_cfg:            ioa config struct
9241  * @job_step:           first job step of reset job
9242  * @shutdown_type:      shutdown type
9243  *
9244  * Description: This function will initiate the reset of the given adapter
9245  * starting at the selected job step.
9246  * If the caller needs to wait on the completion of the reset,
9247  * the caller must sleep on the reset_wait_q.
9248  *
9249  * Return value:
9250  *      none
9251  **/
9252 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9253                                     int (*job_step) (struct ipr_cmnd *),
9254                                     enum ipr_shutdown_type shutdown_type)
9255 {
9256         struct ipr_cmnd *ipr_cmd;
9257         int i;
9258
9259         ioa_cfg->in_reset_reload = 1;
9260         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9261                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9262                 ioa_cfg->hrrq[i].allow_cmds = 0;
9263                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9264         }
9265         wmb();
9266         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9267                 ioa_cfg->scsi_unblock = 0;
9268                 ioa_cfg->scsi_blocked = 1;
9269                 scsi_block_requests(ioa_cfg->host);
9270         }
9271
9272         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9273         ioa_cfg->reset_cmd = ipr_cmd;
9274         ipr_cmd->job_step = job_step;
9275         ipr_cmd->u.shutdown_type = shutdown_type;
9276
9277         ipr_reset_ioa_job(ipr_cmd);
9278 }
9279
9280 /**
9281  * ipr_initiate_ioa_reset - Initiate an adapter reset
9282  * @ioa_cfg:            ioa config struct
9283  * @shutdown_type:      shutdown type
9284  *
9285  * Description: This function will initiate the reset of the given adapter.
9286  * If the caller needs to wait on the completion of the reset,
9287  * the caller must sleep on the reset_wait_q.
9288  *
9289  * Return value:
9290  *      none
9291  **/
9292 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9293                                    enum ipr_shutdown_type shutdown_type)
9294 {
9295         int i;
9296
9297         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9298                 return;
9299
9300         if (ioa_cfg->in_reset_reload) {
9301                 if (ioa_cfg->sdt_state == GET_DUMP)
9302                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9303                 else if (ioa_cfg->sdt_state == READ_DUMP)
9304                         ioa_cfg->sdt_state = ABORT_DUMP;
9305         }
9306
9307         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9308                 dev_err(&ioa_cfg->pdev->dev,
9309                         "IOA taken offline - error recovery failed\n");
9310
9311                 ioa_cfg->reset_retries = 0;
9312                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9313                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9314                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9315                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9316                 }
9317                 wmb();
9318
9319                 if (ioa_cfg->in_ioa_bringdown) {
9320                         ioa_cfg->reset_cmd = NULL;
9321                         ioa_cfg->in_reset_reload = 0;
9322                         ipr_fail_all_ops(ioa_cfg);
9323                         wake_up_all(&ioa_cfg->reset_wait_q);
9324
9325                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9326                                 ioa_cfg->scsi_unblock = 1;
9327                                 schedule_work(&ioa_cfg->work_q);
9328                         }
9329                         return;
9330                 } else {
9331                         ioa_cfg->in_ioa_bringdown = 1;
9332                         shutdown_type = IPR_SHUTDOWN_NONE;
9333                 }
9334         }
9335
9336         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9337                                 shutdown_type);
9338 }
9339
9340 /**
9341  * ipr_reset_freeze - Hold off all I/O activity
9342  * @ipr_cmd:    ipr command struct
9343  *
9344  * Description: If the PCI slot is frozen, hold off all I/O
9345  * activity; then, as soon as the slot is available again,
9346  * initiate an adapter reset.
9347  */
9348 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9349 {
9350         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9351         int i;
9352
9353         /* Disallow new interrupts, avoid loop */
9354         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9355                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9356                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9357                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9358         }
9359         wmb();
9360         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9361         ipr_cmd->done = ipr_reset_ioa_job;
9362         return IPR_RC_JOB_RETURN;
9363 }
9364
9365 /**
9366  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9367  * @pdev:       PCI device struct
9368  *
9369  * Description: This routine is called to tell us that the MMIO
9370  * access to the IOA has been restored
9371  */
9372 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9373 {
9374         unsigned long flags = 0;
9375         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9376
9377         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9378         if (!ioa_cfg->probe_done)
9379                 pci_save_state(pdev);
9380         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9381         return PCI_ERS_RESULT_NEED_RESET;
9382 }
9383
9384 /**
9385  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9386  * @pdev:       PCI device struct
9387  *
9388  * Description: This routine is called to tell us that the PCI bus
9389  * is down. Can't do anything here, except put the device driver
9390  * into a holding pattern, waiting for the PCI bus to come back.
9391  */
9392 static void ipr_pci_frozen(struct pci_dev *pdev)
9393 {
9394         unsigned long flags = 0;
9395         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9396
9397         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9398         if (ioa_cfg->probe_done)
9399                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9400         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9401 }
9402
9403 /**
9404  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9405  * @pdev:       PCI device struct
9406  *
9407  * Description: This routine is called by the pci error recovery
9408  * code after the PCI slot has been reset, just before we
9409  * should resume normal operations.
9410  */
9411 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9412 {
9413         unsigned long flags = 0;
9414         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9415
9416         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9417         if (ioa_cfg->probe_done) {
9418                 if (ioa_cfg->needs_warm_reset)
9419                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9420                 else
9421                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9422                                                 IPR_SHUTDOWN_NONE);
9423         } else
9424                 wake_up_all(&ioa_cfg->eeh_wait_q);
9425         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9426         return PCI_ERS_RESULT_RECOVERED;
9427 }
9428
9429 /**
9430  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9431  * @pdev:       PCI device struct
9432  *
9433  * Description: This routine is called when the PCI bus has
9434  * permanently failed.
9435  */
9436 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9437 {
9438         unsigned long flags = 0;
9439         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9440         int i;
9441
9442         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9443         if (ioa_cfg->probe_done) {
9444                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9445                         ioa_cfg->sdt_state = ABORT_DUMP;
9446                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9447                 ioa_cfg->in_ioa_bringdown = 1;
9448                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9449                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9450                         ioa_cfg->hrrq[i].allow_cmds = 0;
9451                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9452                 }
9453                 wmb();
9454                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9455         } else
9456                 wake_up_all(&ioa_cfg->eeh_wait_q);
9457         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9458 }
9459
9460 /**
9461  * ipr_pci_error_detected - Called when a PCI error is detected.
9462  * @pdev:       PCI device struct
9463  * @state:      PCI channel state
9464  *
9465  * Description: Called when a PCI error is detected.
9466  *
9467  * Return value:
9468  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9469  */
9470 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9471                                                pci_channel_state_t state)
9472 {
9473         switch (state) {
9474         case pci_channel_io_frozen:
9475                 ipr_pci_frozen(pdev);
9476                 return PCI_ERS_RESULT_CAN_RECOVER;
9477         case pci_channel_io_perm_failure:
9478                 ipr_pci_perm_failure(pdev);
9479                 return PCI_ERS_RESULT_DISCONNECT;
9480                 break;
9481         default:
9482                 break;
9483         }
9484         return PCI_ERS_RESULT_NEED_RESET;
9485 }
9486
9487 /**
9488  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9489  * @ioa_cfg:    ioa cfg struct
9490  *
9491  * Description: This is the second phase of adapter initialization
9492  * This function takes care of initilizing the adapter to the point
9493  * where it can accept new commands.
9494
9495  * Return value:
9496  *      0 on success / -EIO on failure
9497  **/
9498 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9499 {
9500         int rc = 0;
9501         unsigned long host_lock_flags = 0;
9502
9503         ENTER;
9504         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9505         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9506         ioa_cfg->probe_done = 1;
9507         if (ioa_cfg->needs_hard_reset) {
9508                 ioa_cfg->needs_hard_reset = 0;
9509                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9510         } else
9511                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9512                                         IPR_SHUTDOWN_NONE);
9513         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9514
9515         LEAVE;
9516         return rc;
9517 }
9518
9519 /**
9520  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9521  * @ioa_cfg:    ioa config struct
9522  *
9523  * Return value:
9524  *      none
9525  **/
9526 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9527 {
9528         int i;
9529
9530         if (ioa_cfg->ipr_cmnd_list) {
9531                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9532                         if (ioa_cfg->ipr_cmnd_list[i])
9533                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9534                                               ioa_cfg->ipr_cmnd_list[i],
9535                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9536
9537                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9538                 }
9539         }
9540
9541         if (ioa_cfg->ipr_cmd_pool)
9542                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9543
9544         kfree(ioa_cfg->ipr_cmnd_list);
9545         kfree(ioa_cfg->ipr_cmnd_list_dma);
9546         ioa_cfg->ipr_cmnd_list = NULL;
9547         ioa_cfg->ipr_cmnd_list_dma = NULL;
9548         ioa_cfg->ipr_cmd_pool = NULL;
9549 }
9550
9551 /**
9552  * ipr_free_mem - Frees memory allocated for an adapter
9553  * @ioa_cfg:    ioa cfg struct
9554  *
9555  * Return value:
9556  *      nothing
9557  **/
9558 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9559 {
9560         int i;
9561
9562         kfree(ioa_cfg->res_entries);
9563         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9564                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9565         ipr_free_cmd_blks(ioa_cfg);
9566
9567         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9568                 dma_free_coherent(&ioa_cfg->pdev->dev,
9569                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9570                                   ioa_cfg->hrrq[i].host_rrq,
9571                                   ioa_cfg->hrrq[i].host_rrq_dma);
9572
9573         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9574                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9575
9576         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9577                 dma_free_coherent(&ioa_cfg->pdev->dev,
9578                                   sizeof(struct ipr_hostrcb),
9579                                   ioa_cfg->hostrcb[i],
9580                                   ioa_cfg->hostrcb_dma[i]);
9581         }
9582
9583         ipr_free_dump(ioa_cfg);
9584         kfree(ioa_cfg->trace);
9585 }
9586
9587 /**
9588  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9589  * @ioa_cfg:    ipr cfg struct
9590  *
9591  * This function frees all allocated IRQs for the
9592  * specified adapter.
9593  *
9594  * Return value:
9595  *      none
9596  **/
9597 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9598 {
9599         struct pci_dev *pdev = ioa_cfg->pdev;
9600         int i;
9601
9602         for (i = 0; i < ioa_cfg->nvectors; i++)
9603                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9604         pci_free_irq_vectors(pdev);
9605 }
9606
9607 /**
9608  * ipr_free_all_resources - Free all allocated resources for an adapter.
9609  * @ipr_cmd:    ipr command struct
9610  *
9611  * This function frees all allocated resources for the
9612  * specified adapter.
9613  *
9614  * Return value:
9615  *      none
9616  **/
9617 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9618 {
9619         struct pci_dev *pdev = ioa_cfg->pdev;
9620
9621         ENTER;
9622         ipr_free_irqs(ioa_cfg);
9623         if (ioa_cfg->reset_work_q)
9624                 destroy_workqueue(ioa_cfg->reset_work_q);
9625         iounmap(ioa_cfg->hdw_dma_regs);
9626         pci_release_regions(pdev);
9627         ipr_free_mem(ioa_cfg);
9628         scsi_host_put(ioa_cfg->host);
9629         pci_disable_device(pdev);
9630         LEAVE;
9631 }
9632
9633 /**
9634  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9635  * @ioa_cfg:    ioa config struct
9636  *
9637  * Return value:
9638  *      0 on success / -ENOMEM on allocation failure
9639  **/
9640 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9641 {
9642         struct ipr_cmnd *ipr_cmd;
9643         struct ipr_ioarcb *ioarcb;
9644         dma_addr_t dma_addr;
9645         int i, entries_each_hrrq, hrrq_id = 0;
9646
9647         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9648                                                 sizeof(struct ipr_cmnd), 512, 0);
9649
9650         if (!ioa_cfg->ipr_cmd_pool)
9651                 return -ENOMEM;
9652
9653         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9654         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9655
9656         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9657                 ipr_free_cmd_blks(ioa_cfg);
9658                 return -ENOMEM;
9659         }
9660
9661         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9662                 if (ioa_cfg->hrrq_num > 1) {
9663                         if (i == 0) {
9664                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9665                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9666                                 ioa_cfg->hrrq[i].max_cmd_id =
9667                                         (entries_each_hrrq - 1);
9668                         } else {
9669                                 entries_each_hrrq =
9670                                         IPR_NUM_BASE_CMD_BLKS/
9671                                         (ioa_cfg->hrrq_num - 1);
9672                                 ioa_cfg->hrrq[i].min_cmd_id =
9673                                         IPR_NUM_INTERNAL_CMD_BLKS +
9674                                         (i - 1) * entries_each_hrrq;
9675                                 ioa_cfg->hrrq[i].max_cmd_id =
9676                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9677                                         i * entries_each_hrrq - 1);
9678                         }
9679                 } else {
9680                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9681                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9682                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9683                 }
9684                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9685         }
9686
9687         BUG_ON(ioa_cfg->hrrq_num == 0);
9688
9689         i = IPR_NUM_CMD_BLKS -
9690                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9691         if (i > 0) {
9692                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9693                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9694         }
9695
9696         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9697                 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9698                                 GFP_KERNEL, &dma_addr);
9699
9700                 if (!ipr_cmd) {
9701                         ipr_free_cmd_blks(ioa_cfg);
9702                         return -ENOMEM;
9703                 }
9704
9705                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9706                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9707
9708                 ioarcb = &ipr_cmd->ioarcb;
9709                 ipr_cmd->dma_addr = dma_addr;
9710                 if (ioa_cfg->sis64)
9711                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9712                 else
9713                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9714
9715                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9716                 if (ioa_cfg->sis64) {
9717                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9718                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9719                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9720                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9721                 } else {
9722                         ioarcb->write_ioadl_addr =
9723                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9724                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9725                         ioarcb->ioasa_host_pci_addr =
9726                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9727                 }
9728                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9729                 ipr_cmd->cmd_index = i;
9730                 ipr_cmd->ioa_cfg = ioa_cfg;
9731                 ipr_cmd->sense_buffer_dma = dma_addr +
9732                         offsetof(struct ipr_cmnd, sense_buffer);
9733
9734                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9735                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9736                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9737                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9738                         hrrq_id++;
9739         }
9740
9741         return 0;
9742 }
9743
9744 /**
9745  * ipr_alloc_mem - Allocate memory for an adapter
9746  * @ioa_cfg:    ioa config struct
9747  *
9748  * Return value:
9749  *      0 on success / non-zero for error
9750  **/
9751 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9752 {
9753         struct pci_dev *pdev = ioa_cfg->pdev;
9754         int i, rc = -ENOMEM;
9755
9756         ENTER;
9757         ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9758                                        sizeof(struct ipr_resource_entry),
9759                                        GFP_KERNEL);
9760
9761         if (!ioa_cfg->res_entries)
9762                 goto out;
9763
9764         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9765                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9766                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9767         }
9768
9769         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9770                                               sizeof(struct ipr_misc_cbs),
9771                                               &ioa_cfg->vpd_cbs_dma,
9772                                               GFP_KERNEL);
9773
9774         if (!ioa_cfg->vpd_cbs)
9775                 goto out_free_res_entries;
9776
9777         if (ipr_alloc_cmd_blks(ioa_cfg))
9778                 goto out_free_vpd_cbs;
9779
9780         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9781                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9782                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9783                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9784                                         GFP_KERNEL);
9785
9786                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9787                         while (--i >= 0)
9788                                 dma_free_coherent(&pdev->dev,
9789                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9790                                         ioa_cfg->hrrq[i].host_rrq,
9791                                         ioa_cfg->hrrq[i].host_rrq_dma);
9792                         goto out_ipr_free_cmd_blocks;
9793                 }
9794                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9795         }
9796
9797         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9798                                                   ioa_cfg->cfg_table_size,
9799                                                   &ioa_cfg->cfg_table_dma,
9800                                                   GFP_KERNEL);
9801
9802         if (!ioa_cfg->u.cfg_table)
9803                 goto out_free_host_rrq;
9804
9805         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9806                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9807                                                          sizeof(struct ipr_hostrcb),
9808                                                          &ioa_cfg->hostrcb_dma[i],
9809                                                          GFP_KERNEL);
9810
9811                 if (!ioa_cfg->hostrcb[i])
9812                         goto out_free_hostrcb_dma;
9813
9814                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9815                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9816                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9817                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9818         }
9819
9820         ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9821                                  sizeof(struct ipr_trace_entry),
9822                                  GFP_KERNEL);
9823
9824         if (!ioa_cfg->trace)
9825                 goto out_free_hostrcb_dma;
9826
9827         rc = 0;
9828 out:
9829         LEAVE;
9830         return rc;
9831
9832 out_free_hostrcb_dma:
9833         while (i-- > 0) {
9834                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9835                                   ioa_cfg->hostrcb[i],
9836                                   ioa_cfg->hostrcb_dma[i]);
9837         }
9838         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9839                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9840 out_free_host_rrq:
9841         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9842                 dma_free_coherent(&pdev->dev,
9843                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9844                                   ioa_cfg->hrrq[i].host_rrq,
9845                                   ioa_cfg->hrrq[i].host_rrq_dma);
9846         }
9847 out_ipr_free_cmd_blocks:
9848         ipr_free_cmd_blks(ioa_cfg);
9849 out_free_vpd_cbs:
9850         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9851                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9852 out_free_res_entries:
9853         kfree(ioa_cfg->res_entries);
9854         goto out;
9855 }
9856
9857 /**
9858  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9859  * @ioa_cfg:    ioa config struct
9860  *
9861  * Return value:
9862  *      none
9863  **/
9864 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9865 {
9866         int i;
9867
9868         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9869                 ioa_cfg->bus_attr[i].bus = i;
9870                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9871                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9872                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9873                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9874                 else
9875                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9876         }
9877 }
9878
9879 /**
9880  * ipr_init_regs - Initialize IOA registers
9881  * @ioa_cfg:    ioa config struct
9882  *
9883  * Return value:
9884  *      none
9885  **/
9886 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9887 {
9888         const struct ipr_interrupt_offsets *p;
9889         struct ipr_interrupts *t;
9890         void __iomem *base;
9891
9892         p = &ioa_cfg->chip_cfg->regs;
9893         t = &ioa_cfg->regs;
9894         base = ioa_cfg->hdw_dma_regs;
9895
9896         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9897         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9898         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9899         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9900         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9901         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9902         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9903         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9904         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9905         t->ioarrin_reg = base + p->ioarrin_reg;
9906         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9907         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9908         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9909         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9910         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9911         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9912
9913         if (ioa_cfg->sis64) {
9914                 t->init_feedback_reg = base + p->init_feedback_reg;
9915                 t->dump_addr_reg = base + p->dump_addr_reg;
9916                 t->dump_data_reg = base + p->dump_data_reg;
9917                 t->endian_swap_reg = base + p->endian_swap_reg;
9918         }
9919 }
9920
9921 /**
9922  * ipr_init_ioa_cfg - Initialize IOA config struct
9923  * @ioa_cfg:    ioa config struct
9924  * @host:               scsi host struct
9925  * @pdev:               PCI dev struct
9926  *
9927  * Return value:
9928  *      none
9929  **/
9930 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9931                              struct Scsi_Host *host, struct pci_dev *pdev)
9932 {
9933         int i;
9934
9935         ioa_cfg->host = host;
9936         ioa_cfg->pdev = pdev;
9937         ioa_cfg->log_level = ipr_log_level;
9938         ioa_cfg->doorbell = IPR_DOORBELL;
9939         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9940         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9941         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9942         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9943         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9944         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9945
9946         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9947         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9948         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9949         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9950         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9951         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9952         INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9953         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9954         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9955         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9956         ioa_cfg->sdt_state = INACTIVE;
9957
9958         ipr_initialize_bus_attr(ioa_cfg);
9959         ioa_cfg->max_devs_supported = ipr_max_devs;
9960
9961         if (ioa_cfg->sis64) {
9962                 host->max_channel = IPR_MAX_SIS64_BUSES;
9963                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9964                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9965                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9966                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9967                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9968                                            + ((sizeof(struct ipr_config_table_entry64)
9969                                                * ioa_cfg->max_devs_supported)));
9970         } else {
9971                 host->max_channel = IPR_VSET_BUS;
9972                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9973                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9974                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9975                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9976                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9977                                            + ((sizeof(struct ipr_config_table_entry)
9978                                                * ioa_cfg->max_devs_supported)));
9979         }
9980
9981         host->unique_id = host->host_no;
9982         host->max_cmd_len = IPR_MAX_CDB_LEN;
9983         host->can_queue = ioa_cfg->max_cmds;
9984         pci_set_drvdata(pdev, ioa_cfg);
9985
9986         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9987                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9988                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9989                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9990                 if (i == 0)
9991                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9992                 else
9993                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9994         }
9995 }
9996
9997 /**
9998  * ipr_get_chip_info - Find adapter chip information
9999  * @dev_id:             PCI device id struct
10000  *
10001  * Return value:
10002  *      ptr to chip information on success / NULL on failure
10003  **/
10004 static const struct ipr_chip_t *
10005 ipr_get_chip_info(const struct pci_device_id *dev_id)
10006 {
10007         int i;
10008
10009         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10010                 if (ipr_chip[i].vendor == dev_id->vendor &&
10011                     ipr_chip[i].device == dev_id->device)
10012                         return &ipr_chip[i];
10013         return NULL;
10014 }
10015
10016 /**
10017  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10018  *                                              during probe time
10019  * @ioa_cfg:    ioa config struct
10020  *
10021  * Return value:
10022  *      None
10023  **/
10024 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10025 {
10026         struct pci_dev *pdev = ioa_cfg->pdev;
10027
10028         if (pci_channel_offline(pdev)) {
10029                 wait_event_timeout(ioa_cfg->eeh_wait_q,
10030                                    !pci_channel_offline(pdev),
10031                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10032                 pci_restore_state(pdev);
10033         }
10034 }
10035
10036 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10037 {
10038         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10039
10040         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10041                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10042                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10043                 ioa_cfg->vectors_info[vec_idx].
10044                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10045         }
10046 }
10047
10048 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10049                 struct pci_dev *pdev)
10050 {
10051         int i, rc;
10052
10053         for (i = 1; i < ioa_cfg->nvectors; i++) {
10054                 rc = request_irq(pci_irq_vector(pdev, i),
10055                         ipr_isr_mhrrq,
10056                         0,
10057                         ioa_cfg->vectors_info[i].desc,
10058                         &ioa_cfg->hrrq[i]);
10059                 if (rc) {
10060                         while (--i > 0)
10061                                 free_irq(pci_irq_vector(pdev, i),
10062                                         &ioa_cfg->hrrq[i]);
10063                         return rc;
10064                 }
10065         }
10066         return 0;
10067 }
10068
10069 /**
10070  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10071  * @pdev:               PCI device struct
10072  *
10073  * Description: Simply set the msi_received flag to 1 indicating that
10074  * Message Signaled Interrupts are supported.
10075  *
10076  * Return value:
10077  *      0 on success / non-zero on failure
10078  **/
10079 static irqreturn_t ipr_test_intr(int irq, void *devp)
10080 {
10081         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10082         unsigned long lock_flags = 0;
10083         irqreturn_t rc = IRQ_HANDLED;
10084
10085         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10086         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10087
10088         ioa_cfg->msi_received = 1;
10089         wake_up(&ioa_cfg->msi_wait_q);
10090
10091         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10092         return rc;
10093 }
10094
10095 /**
10096  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10097  * @pdev:               PCI device struct
10098  *
10099  * Description: This routine sets up and initiates a test interrupt to determine
10100  * if the interrupt is received via the ipr_test_intr() service routine.
10101  * If the tests fails, the driver will fall back to LSI.
10102  *
10103  * Return value:
10104  *      0 on success / non-zero on failure
10105  **/
10106 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10107 {
10108         int rc;
10109         volatile u32 int_reg;
10110         unsigned long lock_flags = 0;
10111         int irq = pci_irq_vector(pdev, 0);
10112
10113         ENTER;
10114
10115         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10116         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10117         ioa_cfg->msi_received = 0;
10118         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10119         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10120         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10121         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10122
10123         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10124         if (rc) {
10125                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10126                 return rc;
10127         } else if (ipr_debug)
10128                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10129
10130         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10131         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10132         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10133         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10134         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10135
10136         if (!ioa_cfg->msi_received) {
10137                 /* MSI test failed */
10138                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10139                 rc = -EOPNOTSUPP;
10140         } else if (ipr_debug)
10141                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10142
10143         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10144
10145         free_irq(irq, ioa_cfg);
10146
10147         LEAVE;
10148
10149         return rc;
10150 }
10151
10152  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10153  * @pdev:               PCI device struct
10154  * @dev_id:             PCI device id struct
10155  *
10156  * Return value:
10157  *      0 on success / non-zero on failure
10158  **/
10159 static int ipr_probe_ioa(struct pci_dev *pdev,
10160                          const struct pci_device_id *dev_id)
10161 {
10162         struct ipr_ioa_cfg *ioa_cfg;
10163         struct Scsi_Host *host;
10164         unsigned long ipr_regs_pci;
10165         void __iomem *ipr_regs;
10166         int rc = PCIBIOS_SUCCESSFUL;
10167         volatile u32 mask, uproc, interrupts;
10168         unsigned long lock_flags, driver_lock_flags;
10169         unsigned int irq_flag;
10170
10171         ENTER;
10172
10173         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10174         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10175
10176         if (!host) {
10177                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10178                 rc = -ENOMEM;
10179                 goto out;
10180         }
10181
10182         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10183         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10184         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10185
10186         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10187
10188         if (!ioa_cfg->ipr_chip) {
10189                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10190                         dev_id->vendor, dev_id->device);
10191                 goto out_scsi_host_put;
10192         }
10193
10194         /* set SIS 32 or SIS 64 */
10195         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10196         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10197         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10198         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10199
10200         if (ipr_transop_timeout)
10201                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10202         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10203                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10204         else
10205                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10206
10207         ioa_cfg->revid = pdev->revision;
10208
10209         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10210
10211         ipr_regs_pci = pci_resource_start(pdev, 0);
10212
10213         rc = pci_request_regions(pdev, IPR_NAME);
10214         if (rc < 0) {
10215                 dev_err(&pdev->dev,
10216                         "Couldn't register memory range of registers\n");
10217                 goto out_scsi_host_put;
10218         }
10219
10220         rc = pci_enable_device(pdev);
10221
10222         if (rc || pci_channel_offline(pdev)) {
10223                 if (pci_channel_offline(pdev)) {
10224                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10225                         rc = pci_enable_device(pdev);
10226                 }
10227
10228                 if (rc) {
10229                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10230                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10231                         goto out_release_regions;
10232                 }
10233         }
10234
10235         ipr_regs = pci_ioremap_bar(pdev, 0);
10236
10237         if (!ipr_regs) {
10238                 dev_err(&pdev->dev,
10239                         "Couldn't map memory range of registers\n");
10240                 rc = -ENOMEM;
10241                 goto out_disable;
10242         }
10243
10244         ioa_cfg->hdw_dma_regs = ipr_regs;
10245         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10246         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10247
10248         ipr_init_regs(ioa_cfg);
10249
10250         if (ioa_cfg->sis64) {
10251                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10252                 if (rc < 0) {
10253                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10254                         rc = dma_set_mask_and_coherent(&pdev->dev,
10255                                                        DMA_BIT_MASK(32));
10256                 }
10257         } else
10258                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10259
10260         if (rc < 0) {
10261                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10262                 goto cleanup_nomem;
10263         }
10264
10265         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10266                                    ioa_cfg->chip_cfg->cache_line_size);
10267
10268         if (rc != PCIBIOS_SUCCESSFUL) {
10269                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10270                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10271                 rc = -EIO;
10272                 goto cleanup_nomem;
10273         }
10274
10275         /* Issue MMIO read to ensure card is not in EEH */
10276         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10277         ipr_wait_for_pci_err_recovery(ioa_cfg);
10278
10279         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10280                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10281                         IPR_MAX_MSIX_VECTORS);
10282                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10283         }
10284
10285         irq_flag = PCI_IRQ_LEGACY;
10286         if (ioa_cfg->ipr_chip->has_msi)
10287                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10288         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10289         if (rc < 0) {
10290                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10291                 goto cleanup_nomem;
10292         }
10293         ioa_cfg->nvectors = rc;
10294
10295         if (!pdev->msi_enabled && !pdev->msix_enabled)
10296                 ioa_cfg->clear_isr = 1;
10297
10298         pci_set_master(pdev);
10299
10300         if (pci_channel_offline(pdev)) {
10301                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10302                 pci_set_master(pdev);
10303                 if (pci_channel_offline(pdev)) {
10304                         rc = -EIO;
10305                         goto out_msi_disable;
10306                 }
10307         }
10308
10309         if (pdev->msi_enabled || pdev->msix_enabled) {
10310                 rc = ipr_test_msi(ioa_cfg, pdev);
10311                 switch (rc) {
10312                 case 0:
10313                         dev_info(&pdev->dev,
10314                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10315                                 pdev->msix_enabled ? "-X" : "");
10316                         break;
10317                 case -EOPNOTSUPP:
10318                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10319                         pci_free_irq_vectors(pdev);
10320
10321                         ioa_cfg->nvectors = 1;
10322                         ioa_cfg->clear_isr = 1;
10323                         break;
10324                 default:
10325                         goto out_msi_disable;
10326                 }
10327         }
10328
10329         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10330                                 (unsigned int)num_online_cpus(),
10331                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10332
10333         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10334                 goto out_msi_disable;
10335
10336         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10337                 goto out_msi_disable;
10338
10339         rc = ipr_alloc_mem(ioa_cfg);
10340         if (rc < 0) {
10341                 dev_err(&pdev->dev,
10342                         "Couldn't allocate enough memory for device driver!\n");
10343                 goto out_msi_disable;
10344         }
10345
10346         /* Save away PCI config space for use following IOA reset */
10347         rc = pci_save_state(pdev);
10348
10349         if (rc != PCIBIOS_SUCCESSFUL) {
10350                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10351                 rc = -EIO;
10352                 goto cleanup_nolog;
10353         }
10354
10355         /*
10356          * If HRRQ updated interrupt is not masked, or reset alert is set,
10357          * the card is in an unknown state and needs a hard reset
10358          */
10359         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10360         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10361         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10362         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10363                 ioa_cfg->needs_hard_reset = 1;
10364         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10365                 ioa_cfg->needs_hard_reset = 1;
10366         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10367                 ioa_cfg->ioa_unit_checked = 1;
10368
10369         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10370         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10371         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10372
10373         if (pdev->msi_enabled || pdev->msix_enabled) {
10374                 name_msi_vectors(ioa_cfg);
10375                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10376                         ioa_cfg->vectors_info[0].desc,
10377                         &ioa_cfg->hrrq[0]);
10378                 if (!rc)
10379                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10380         } else {
10381                 rc = request_irq(pdev->irq, ipr_isr,
10382                          IRQF_SHARED,
10383                          IPR_NAME, &ioa_cfg->hrrq[0]);
10384         }
10385         if (rc) {
10386                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10387                         pdev->irq, rc);
10388                 goto cleanup_nolog;
10389         }
10390
10391         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10392             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10393                 ioa_cfg->needs_warm_reset = 1;
10394                 ioa_cfg->reset = ipr_reset_slot_reset;
10395
10396                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10397                                                                 WQ_MEM_RECLAIM, host->host_no);
10398
10399                 if (!ioa_cfg->reset_work_q) {
10400                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10401                         rc = -ENOMEM;
10402                         goto out_free_irq;
10403                 }
10404         } else
10405                 ioa_cfg->reset = ipr_reset_start_bist;
10406
10407         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10408         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10409         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10410
10411         LEAVE;
10412 out:
10413         return rc;
10414
10415 out_free_irq:
10416         ipr_free_irqs(ioa_cfg);
10417 cleanup_nolog:
10418         ipr_free_mem(ioa_cfg);
10419 out_msi_disable:
10420         ipr_wait_for_pci_err_recovery(ioa_cfg);
10421         pci_free_irq_vectors(pdev);
10422 cleanup_nomem:
10423         iounmap(ipr_regs);
10424 out_disable:
10425         pci_disable_device(pdev);
10426 out_release_regions:
10427         pci_release_regions(pdev);
10428 out_scsi_host_put:
10429         scsi_host_put(host);
10430         goto out;
10431 }
10432
10433 /**
10434  * ipr_initiate_ioa_bringdown - Bring down an adapter
10435  * @ioa_cfg:            ioa config struct
10436  * @shutdown_type:      shutdown type
10437  *
10438  * Description: This function will initiate bringing down the adapter.
10439  * This consists of issuing an IOA shutdown to the adapter
10440  * to flush the cache, and running BIST.
10441  * If the caller needs to wait on the completion of the reset,
10442  * the caller must sleep on the reset_wait_q.
10443  *
10444  * Return value:
10445  *      none
10446  **/
10447 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10448                                        enum ipr_shutdown_type shutdown_type)
10449 {
10450         ENTER;
10451         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10452                 ioa_cfg->sdt_state = ABORT_DUMP;
10453         ioa_cfg->reset_retries = 0;
10454         ioa_cfg->in_ioa_bringdown = 1;
10455         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10456         LEAVE;
10457 }
10458
10459 /**
10460  * __ipr_remove - Remove a single adapter
10461  * @pdev:       pci device struct
10462  *
10463  * Adapter hot plug remove entry point.
10464  *
10465  * Return value:
10466  *      none
10467  **/
10468 static void __ipr_remove(struct pci_dev *pdev)
10469 {
10470         unsigned long host_lock_flags = 0;
10471         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10472         int i;
10473         unsigned long driver_lock_flags;
10474         ENTER;
10475
10476         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10477         while (ioa_cfg->in_reset_reload) {
10478                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10479                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10480                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10481         }
10482
10483         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10484                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10485                 ioa_cfg->hrrq[i].removing_ioa = 1;
10486                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10487         }
10488         wmb();
10489         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10490
10491         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10492         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10493         flush_work(&ioa_cfg->work_q);
10494         if (ioa_cfg->reset_work_q)
10495                 flush_workqueue(ioa_cfg->reset_work_q);
10496         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10497         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10498
10499         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10500         list_del(&ioa_cfg->queue);
10501         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10502
10503         if (ioa_cfg->sdt_state == ABORT_DUMP)
10504                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10505         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10506
10507         ipr_free_all_resources(ioa_cfg);
10508
10509         LEAVE;
10510 }
10511
10512 /**
10513  * ipr_remove - IOA hot plug remove entry point
10514  * @pdev:       pci device struct
10515  *
10516  * Adapter hot plug remove entry point.
10517  *
10518  * Return value:
10519  *      none
10520  **/
10521 static void ipr_remove(struct pci_dev *pdev)
10522 {
10523         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10524
10525         ENTER;
10526
10527         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10528                               &ipr_trace_attr);
10529         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10530                              &ipr_dump_attr);
10531         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10532                         &ipr_ioa_async_err_log);
10533         scsi_remove_host(ioa_cfg->host);
10534
10535         __ipr_remove(pdev);
10536
10537         LEAVE;
10538 }
10539
10540 /**
10541  * ipr_probe - Adapter hot plug add entry point
10542  *
10543  * Return value:
10544  *      0 on success / non-zero on failure
10545  **/
10546 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10547 {
10548         struct ipr_ioa_cfg *ioa_cfg;
10549         unsigned long flags;
10550         int rc, i;
10551
10552         rc = ipr_probe_ioa(pdev, dev_id);
10553
10554         if (rc)
10555                 return rc;
10556
10557         ioa_cfg = pci_get_drvdata(pdev);
10558         rc = ipr_probe_ioa_part2(ioa_cfg);
10559
10560         if (rc) {
10561                 __ipr_remove(pdev);
10562                 return rc;
10563         }
10564
10565         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10566
10567         if (rc) {
10568                 __ipr_remove(pdev);
10569                 return rc;
10570         }
10571
10572         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10573                                    &ipr_trace_attr);
10574
10575         if (rc) {
10576                 scsi_remove_host(ioa_cfg->host);
10577                 __ipr_remove(pdev);
10578                 return rc;
10579         }
10580
10581         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10582                         &ipr_ioa_async_err_log);
10583
10584         if (rc) {
10585                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10586                                 &ipr_dump_attr);
10587                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10588                                 &ipr_trace_attr);
10589                 scsi_remove_host(ioa_cfg->host);
10590                 __ipr_remove(pdev);
10591                 return rc;
10592         }
10593
10594         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10595                                    &ipr_dump_attr);
10596
10597         if (rc) {
10598                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10599                                       &ipr_ioa_async_err_log);
10600                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10601                                       &ipr_trace_attr);
10602                 scsi_remove_host(ioa_cfg->host);
10603                 __ipr_remove(pdev);
10604                 return rc;
10605         }
10606         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10607         ioa_cfg->scan_enabled = 1;
10608         schedule_work(&ioa_cfg->work_q);
10609         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10610
10611         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10612
10613         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10614                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10615                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10616                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10617                 }
10618         }
10619
10620         scsi_scan_host(ioa_cfg->host);
10621
10622         return 0;
10623 }
10624
10625 /**
10626  * ipr_shutdown - Shutdown handler.
10627  * @pdev:       pci device struct
10628  *
10629  * This function is invoked upon system shutdown/reboot. It will issue
10630  * an adapter shutdown to the adapter to flush the write cache.
10631  *
10632  * Return value:
10633  *      none
10634  **/
10635 static void ipr_shutdown(struct pci_dev *pdev)
10636 {
10637         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10638         unsigned long lock_flags = 0;
10639         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10640         int i;
10641
10642         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10643         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10644                 ioa_cfg->iopoll_weight = 0;
10645                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10646                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10647         }
10648
10649         while (ioa_cfg->in_reset_reload) {
10650                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10651                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10652                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10653         }
10654
10655         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10656                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10657
10658         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10659         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10660         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10661         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10662                 ipr_free_irqs(ioa_cfg);
10663                 pci_disable_device(ioa_cfg->pdev);
10664         }
10665 }
10666
10667 static struct pci_device_id ipr_pci_table[] = {
10668         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10669                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10670         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10671                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10672         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10673                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10674         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10675                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10676         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10677                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10678         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10679                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10680         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10681                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10682         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10683                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10684                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10685         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10686               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10687         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10688               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10689               IPR_USE_LONG_TRANSOP_TIMEOUT },
10690         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10691               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10692               IPR_USE_LONG_TRANSOP_TIMEOUT },
10693         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10694               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10695         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10696               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10697               IPR_USE_LONG_TRANSOP_TIMEOUT},
10698         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10699               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10700               IPR_USE_LONG_TRANSOP_TIMEOUT },
10701         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10702               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10703               IPR_USE_LONG_TRANSOP_TIMEOUT },
10704         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10705               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10706         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10707               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10708         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10709               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10710               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10711         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10712                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10713         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10714                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10715         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10716                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10717                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10718         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10719                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10720                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10721         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10722                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10723         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10724                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10725         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10726                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10727         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10728                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10729         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10730                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10731         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10732                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10733         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10734                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10735         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10736                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10737         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10738                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10739         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10740                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10741         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10742                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10743         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10744                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10745         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10746                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10747         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10749         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10751         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10753         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10754                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10755         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10756                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10757         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10758                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10759         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10760                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10761         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10762                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10763         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10764                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10765         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10766                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10767         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10768                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10769         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10770                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10771         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10772                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10773         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10774                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10775         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10776                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10777         { }
10778 };
10779 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10780
10781 static const struct pci_error_handlers ipr_err_handler = {
10782         .error_detected = ipr_pci_error_detected,
10783         .mmio_enabled = ipr_pci_mmio_enabled,
10784         .slot_reset = ipr_pci_slot_reset,
10785 };
10786
10787 static struct pci_driver ipr_driver = {
10788         .name = IPR_NAME,
10789         .id_table = ipr_pci_table,
10790         .probe = ipr_probe,
10791         .remove = ipr_remove,
10792         .shutdown = ipr_shutdown,
10793         .err_handler = &ipr_err_handler,
10794 };
10795
10796 /**
10797  * ipr_halt_done - Shutdown prepare completion
10798  *
10799  * Return value:
10800  *      none
10801  **/
10802 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10803 {
10804         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10805 }
10806
10807 /**
10808  * ipr_halt - Issue shutdown prepare to all adapters
10809  *
10810  * Return value:
10811  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10812  **/
10813 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10814 {
10815         struct ipr_cmnd *ipr_cmd;
10816         struct ipr_ioa_cfg *ioa_cfg;
10817         unsigned long flags = 0, driver_lock_flags;
10818
10819         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10820                 return NOTIFY_DONE;
10821
10822         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10823
10824         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10825                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10826                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10827                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10828                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10829                         continue;
10830                 }
10831
10832                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10833                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10834                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10835                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10836                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10837
10838                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10839                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10840         }
10841         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10842
10843         return NOTIFY_OK;
10844 }
10845
10846 static struct notifier_block ipr_notifier = {
10847         ipr_halt, NULL, 0
10848 };
10849
10850 /**
10851  * ipr_init - Module entry point
10852  *
10853  * Return value:
10854  *      0 on success / negative value on failure
10855  **/
10856 static int __init ipr_init(void)
10857 {
10858         int rc;
10859
10860         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10861                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10862
10863         register_reboot_notifier(&ipr_notifier);
10864         rc = pci_register_driver(&ipr_driver);
10865         if (rc) {
10866                 unregister_reboot_notifier(&ipr_notifier);
10867                 return rc;
10868         }
10869
10870         return 0;
10871 }
10872
10873 /**
10874  * ipr_exit - Module unload
10875  *
10876  * Module unload entry point.
10877  *
10878  * Return value:
10879  *      none
10880  **/
10881 static void __exit ipr_exit(void)
10882 {
10883         unregister_reboot_notifier(&ipr_notifier);
10884         pci_unregister_driver(&ipr_driver);
10885 }
10886
10887 module_init(ipr_init);
10888 module_exit(ipr_exit);